code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from datetime import date, timedelta
import pytest
from isodate.duration import Duration
from ..interval import IntervalManager
def test_get_intervalled_forms(manager, create_forms, cleanup_db):
forms = manager.get_intervalled_forms()
assert len(forms) == 2
@pytest.mark.parametrize(
"value,exp_startdate,exp_duration",
[
(
"2018-03-01/P1Y2M10D",
Duration(10, 0, 0, years=1, months=2),
date(2018, 3, 1),
),
("P2W", timedelta(14), None),
("P1Y2M1p0D", False, False),
("P1Y2M10DT2H30M", False, False),
("not a date/P2W", False, False),
("2018-03-01/P2W/", False, False),
],
)
def test_parse_interval(manager, value, exp_startdate, exp_duration):
startdate, duration = manager.parse_interval(value)
assert startdate == exp_startdate
assert duration == exp_duration
def test_run_new_form(client, manager, create_form_to_workflow, cleanup_db):
query = """\
query allCases {
allCases (workflow: "my-test-workflow") {
pageInfo {
startCursor
endCursor
}
edges {
node {
id
}
}
}
}\
"""
resp = client.execute(query)
assert len(resp["data"]["allCases"]["edges"]) == 0
manager.run()
resp = client.execute(query)
assert len(resp["data"]["allCases"]["edges"]) == 2
def test_run_existing_case(manager):
data = {
"meta": {
"interval": {
"weekday": "tue",
"interval": "2018-03-01/P2W",
"workflow_slug": "my-test-workflow",
}
},
"id": "Rm9ybTpteS1pbnRlcnZhbC1mb3Jt",
"slug": "my-interval-form",
"documents": {
"edges": [
{
"node": {
"case": {
"closedAt": None,
"closedByUser": None,
"status": "RUNNING",
}
}
}
]
},
}
assert not manager.get_last_run(data)
data = {
"meta": {
"interval": {
"weekday": "tue",
"interval": "2018-03-01/P2W",
"workflow_slug": "my-test-workflow",
}
},
"id": "Rm9ybTpteS1pbnRlcnZhbC1mb3Jt",
"slug": "my-interval-form",
"documents": {
"edges": [
{
"node": {
"case": {
"closedAt": "2019-03-01",
"closedByUser": 5,
"status": "COMPLETED",
}
}
}
]
},
}
assert manager.get_last_run(data) == date(2019, 3, 1)
def test_needs_action(manager):
kwargs = {
"last_run": date(2018, 1, 1),
"interval": timedelta(14),
"start": date(2017, 1, 1),
}
assert manager.needs_action(**kwargs)
kwargs["last_run"] = date.today() - timedelta(days=13)
assert not manager.needs_action(**kwargs)
kwargs["weekday"] = date.today().weekday() - 2
assert manager.needs_action(**kwargs)
kwargs["start"] = date.today() + timedelta(days=5)
assert not manager.needs_action(**kwargs)
kwargs["start"] = None
assert manager.needs_action(**kwargs)
def test_fail_processing(mocker, manager, create_form_to_workflow, cleanup_db):
mocker.patch.object(IntervalManager, "handle_form")
IntervalManager.handle_form.side_effect = Exception
# No exception should be raised
manager.run() | caluma_interval/tests/test_interval.py | from datetime import date, timedelta
import pytest
from isodate.duration import Duration
from ..interval import IntervalManager
def test_get_intervalled_forms(manager, create_forms, cleanup_db):
forms = manager.get_intervalled_forms()
assert len(forms) == 2
@pytest.mark.parametrize(
"value,exp_startdate,exp_duration",
[
(
"2018-03-01/P1Y2M10D",
Duration(10, 0, 0, years=1, months=2),
date(2018, 3, 1),
),
("P2W", timedelta(14), None),
("P1Y2M1p0D", False, False),
("P1Y2M10DT2H30M", False, False),
("not a date/P2W", False, False),
("2018-03-01/P2W/", False, False),
],
)
def test_parse_interval(manager, value, exp_startdate, exp_duration):
startdate, duration = manager.parse_interval(value)
assert startdate == exp_startdate
assert duration == exp_duration
def test_run_new_form(client, manager, create_form_to_workflow, cleanup_db):
query = """\
query allCases {
allCases (workflow: "my-test-workflow") {
pageInfo {
startCursor
endCursor
}
edges {
node {
id
}
}
}
}\
"""
resp = client.execute(query)
assert len(resp["data"]["allCases"]["edges"]) == 0
manager.run()
resp = client.execute(query)
assert len(resp["data"]["allCases"]["edges"]) == 2
def test_run_existing_case(manager):
data = {
"meta": {
"interval": {
"weekday": "tue",
"interval": "2018-03-01/P2W",
"workflow_slug": "my-test-workflow",
}
},
"id": "Rm9ybTpteS1pbnRlcnZhbC1mb3Jt",
"slug": "my-interval-form",
"documents": {
"edges": [
{
"node": {
"case": {
"closedAt": None,
"closedByUser": None,
"status": "RUNNING",
}
}
}
]
},
}
assert not manager.get_last_run(data)
data = {
"meta": {
"interval": {
"weekday": "tue",
"interval": "2018-03-01/P2W",
"workflow_slug": "my-test-workflow",
}
},
"id": "Rm9ybTpteS1pbnRlcnZhbC1mb3Jt",
"slug": "my-interval-form",
"documents": {
"edges": [
{
"node": {
"case": {
"closedAt": "2019-03-01",
"closedByUser": 5,
"status": "COMPLETED",
}
}
}
]
},
}
assert manager.get_last_run(data) == date(2019, 3, 1)
def test_needs_action(manager):
kwargs = {
"last_run": date(2018, 1, 1),
"interval": timedelta(14),
"start": date(2017, 1, 1),
}
assert manager.needs_action(**kwargs)
kwargs["last_run"] = date.today() - timedelta(days=13)
assert not manager.needs_action(**kwargs)
kwargs["weekday"] = date.today().weekday() - 2
assert manager.needs_action(**kwargs)
kwargs["start"] = date.today() + timedelta(days=5)
assert not manager.needs_action(**kwargs)
kwargs["start"] = None
assert manager.needs_action(**kwargs)
def test_fail_processing(mocker, manager, create_form_to_workflow, cleanup_db):
mocker.patch.object(IntervalManager, "handle_form")
IntervalManager.handle_form.side_effect = Exception
# No exception should be raised
manager.run() | 0.710929 | 0.547162 |
"""Argument parser.
"""
import argparse
import typing
from .. import api
from . import constants, utils
DEFAULTS = dict(
loglevel=0, list=False, output=None, itype=None, otype=None, atype=None,
merge=api.MS_DICTS, ignore_missing=False, template=False, env=False,
schema=None, validate=False, gen_schema=False, extra_opts=None
)
def gen_type_help_txt(types: str, target: str = 'Input') -> str:
"""Generate a type help txt.
"""
return (f'Select type of {target} files from {types}'
'[Automatically detected by those extension]')
def make_parser(defaults: typing.Optional[typing.Dict] = None,
prog: typing.Optional[str] = None
) -> argparse.ArgumentParser:
"""Make an instance of argparse.ArgumentParser to parse arguments.
"""
if defaults is None:
defaults = DEFAULTS
ctypes: typing.List[str] = utils.list_parser_types()
ctypes_s: str = ', '.join(ctypes)
apsr = argparse.ArgumentParser(prog=prog, usage=constants.USAGE)
apsr.set_defaults(**defaults)
apsr.add_argument('inputs', type=str, nargs='*', help='Input files')
apsr.add_argument(
'--version', action='version',
version=f'%%(prog)s {".".join(api.version())}'
)
apsr.add_argument('-o', '--output', help='Output file path')
apsr.add_argument('-I', '--itype', choices=ctypes, metavar='ITYPE',
help=gen_type_help_txt(ctypes_s))
apsr.add_argument('-O', '--otype', choices=ctypes, metavar='OTYPE',
help=gen_type_help_txt(ctypes_s, 'Output'))
mss = api.MERGE_STRATEGIES
mss_s = ', '.join(mss)
mt_help = ('Select strategy to merge multiple configs from '
f'{mss_s} {defaults["merge"]}')
apsr.add_argument('-M', '--merge', choices=mss, metavar='MERGE',
help=mt_help)
apsr.add_argument('-A', '--args', help='Argument configs to override')
apsr.add_argument('--atype', choices=ctypes, metavar='ATYPE',
help=constants.ATYPE_HELP_FMT % ctypes_s)
lpog = apsr.add_argument_group('List specific options')
lpog.add_argument('-L', '--list', action='store_true',
help='List supported config types')
spog = apsr.add_argument_group('Schema specific options')
spog.add_argument('--validate', action='store_true',
help='Only validate input files and do not output. '
'You must specify schema file with -S/--schema '
'option.')
spog.add_argument('--gen-schema', action='store_true',
help='Generate JSON schema for givne config file[s] '
'and output it instead of (merged) configuration.')
gspog = apsr.add_argument_group('Query/Get/set options')
gspog.add_argument('-Q', '--query', help=constants.QUERY_HELP)
gspog.add_argument('--get', help=constants.GET_HELP)
gspog.add_argument('--set', help=constants.SET_HELP)
cpog = apsr.add_argument_group('Common options')
cpog.add_argument('-x', '--ignore-missing', action='store_true',
help='Ignore missing input files')
cpog.add_argument('-T', '--template', action='store_true',
help='Enable template config support')
cpog.add_argument('-E', '--env', action='store_true',
help='Load configuration defaults from '
'environment values')
cpog.add_argument('-S', '--schema', help='Specify Schema file[s] path')
cpog.add_argument('-e', '--extra-opts',
help='Extra options given to the API call, '
'--extra-options indent:2 (specify the '
'indent for pretty-printing of JSON outputs) '
'for example')
cpog.add_argument('-v', '--verbose', action='count', dest='loglevel',
help='Verbose mode; -v or -vv (more verbose)')
return apsr
def parse(argv: typing.List[str],
prog: typing.Optional[str] = None
) -> typing.Tuple[argparse.ArgumentParser, argparse.Namespace]:
"""
Parse given arguments ``argv`` and return it with the parser.
"""
psr = make_parser(prog=prog)
return (psr, psr.parse_args(argv))
# vim:sw=4:ts=4:et: | src/anyconfig/cli/parse_args.py | """Argument parser.
"""
import argparse
import typing
from .. import api
from . import constants, utils
DEFAULTS = dict(
loglevel=0, list=False, output=None, itype=None, otype=None, atype=None,
merge=api.MS_DICTS, ignore_missing=False, template=False, env=False,
schema=None, validate=False, gen_schema=False, extra_opts=None
)
def gen_type_help_txt(types: str, target: str = 'Input') -> str:
"""Generate a type help txt.
"""
return (f'Select type of {target} files from {types}'
'[Automatically detected by those extension]')
def make_parser(defaults: typing.Optional[typing.Dict] = None,
prog: typing.Optional[str] = None
) -> argparse.ArgumentParser:
"""Make an instance of argparse.ArgumentParser to parse arguments.
"""
if defaults is None:
defaults = DEFAULTS
ctypes: typing.List[str] = utils.list_parser_types()
ctypes_s: str = ', '.join(ctypes)
apsr = argparse.ArgumentParser(prog=prog, usage=constants.USAGE)
apsr.set_defaults(**defaults)
apsr.add_argument('inputs', type=str, nargs='*', help='Input files')
apsr.add_argument(
'--version', action='version',
version=f'%%(prog)s {".".join(api.version())}'
)
apsr.add_argument('-o', '--output', help='Output file path')
apsr.add_argument('-I', '--itype', choices=ctypes, metavar='ITYPE',
help=gen_type_help_txt(ctypes_s))
apsr.add_argument('-O', '--otype', choices=ctypes, metavar='OTYPE',
help=gen_type_help_txt(ctypes_s, 'Output'))
mss = api.MERGE_STRATEGIES
mss_s = ', '.join(mss)
mt_help = ('Select strategy to merge multiple configs from '
f'{mss_s} {defaults["merge"]}')
apsr.add_argument('-M', '--merge', choices=mss, metavar='MERGE',
help=mt_help)
apsr.add_argument('-A', '--args', help='Argument configs to override')
apsr.add_argument('--atype', choices=ctypes, metavar='ATYPE',
help=constants.ATYPE_HELP_FMT % ctypes_s)
lpog = apsr.add_argument_group('List specific options')
lpog.add_argument('-L', '--list', action='store_true',
help='List supported config types')
spog = apsr.add_argument_group('Schema specific options')
spog.add_argument('--validate', action='store_true',
help='Only validate input files and do not output. '
'You must specify schema file with -S/--schema '
'option.')
spog.add_argument('--gen-schema', action='store_true',
help='Generate JSON schema for givne config file[s] '
'and output it instead of (merged) configuration.')
gspog = apsr.add_argument_group('Query/Get/set options')
gspog.add_argument('-Q', '--query', help=constants.QUERY_HELP)
gspog.add_argument('--get', help=constants.GET_HELP)
gspog.add_argument('--set', help=constants.SET_HELP)
cpog = apsr.add_argument_group('Common options')
cpog.add_argument('-x', '--ignore-missing', action='store_true',
help='Ignore missing input files')
cpog.add_argument('-T', '--template', action='store_true',
help='Enable template config support')
cpog.add_argument('-E', '--env', action='store_true',
help='Load configuration defaults from '
'environment values')
cpog.add_argument('-S', '--schema', help='Specify Schema file[s] path')
cpog.add_argument('-e', '--extra-opts',
help='Extra options given to the API call, '
'--extra-options indent:2 (specify the '
'indent for pretty-printing of JSON outputs) '
'for example')
cpog.add_argument('-v', '--verbose', action='count', dest='loglevel',
help='Verbose mode; -v or -vv (more verbose)')
return apsr
def parse(argv: typing.List[str],
prog: typing.Optional[str] = None
) -> typing.Tuple[argparse.ArgumentParser, argparse.Namespace]:
"""
Parse given arguments ``argv`` and return it with the parser.
"""
psr = make_parser(prog=prog)
return (psr, psr.parse_args(argv))
# vim:sw=4:ts=4:et: | 0.629888 | 0.145996 |
import numpy as np
import sys
from sklearn.preprocessing import normalize
class Bee:
"""
A single bee, used in one run of the model.
A neat way of storing useful variables and methods for use by the main loop.
"""
def __init__(self, set_2013_probs):
"""
Matrix representing distances between sites.
0 1 2 3 4 5
nest, flower1, f2, f3, f4, f5
"""
self.distance_matrix = [[0, 50, 100, 120, 100, 50],
[50, 0, 50, 80, 80, 50],
[100, 50, 0, 50, 80, 80],
[120, 80, 50, 0, 50, 80],
[100, 80, 80, 50, 0, 50],
[50, 50, 80, 80, 50, 0 ]]
# enable this to manipulate the scale, as discussed in the 2013 paper:
# self.distance_matrix = np.divide(self.distance_matrix, 10)
"""
Matrix representing the probability of the bee transitioning between flower pairs.
Probabilities set as per Lihoreau 2012, with 0.1 for near and 0.6 for far.
Same indexes as distance_matrix.
"""
self.transition_probability_matrix = [[0, 0.8, 0.0, 0.0, 0.0, 0.2],
[0.6, 0, 0.6, 0.1, 0.1, 0.6],
[0.1, 0.6, 0, 0.6, 0.1, 0.1],
[0.1, 0.1, 0.6, 0, 0.6, 0.1],
[0.1, 0.1, 0.1, 0.6, 0, 0.6],
[0.6, 0.6, 0.1, 0.1, 0.6, 0 ]]
self.normalize_probability_matrix()
if set_2013_probs:
self.set_distance_style_probabilities()
# A 6*6 matrix to record each transition between locations
self.transition_recording_matrix = []
self.reset_transition_matrix()
# the bee's current location
self.location = 0
# set of unique visited locations
self.unique_visited_locations = set([])
# list of all visited locations in order
self.visited_locations = []
# the current minimum total bout distance found in this run
self.min_distance = sys.maxsize
def move(self):
"""Undertakes one location-to-location move of the bee
"""
# calculate the bee's next location
dest = self.get_destination()
# record the transition
self.transition_recording_matrix[self.location][dest] += 1
# update the bee's location
self.location = dest
# add the new location to location lists
self.unique_visited_locations.add(self.location)
self.visited_locations.append(self.location)
def get_destination(self):
"""Returns the next location for the bee, calculated using the probability matrix
"""
return np.random.choice([0,1,2,3,4,5], p=self.transition_probability_matrix[self.location])
# when enabled, the following disallows choosing the previous location:
# if len(self.visited_locations) > 1:
# while choice == self.visited_locations[-2]:
# choice = np.random.choice([0,1,2,3,4,5], p=self.transition_probability_matrix[self.location])
# return choice
def get_total_distance(self):
"""Returns the summed distance of all transitions currently stored
"""
return np.sum(np.multiply(self.distance_matrix, self.transition_recording_matrix))
def reset_transition_matrix(self):
"""Sets the transiton recording matrix to all zeros
"""
self.transition_recording_matrix = np.zeros((6,6), dtype=int)
def normalize_probability_matrix(self):
"""Goes through each row of the probability matrix and normalizes the values (0-1)
"""
self.transition_probability_matrix = normalize(self.transition_probability_matrix,
axis=1, norm='l1')
def update_probability_matrix(self, prob_enhancement_factor):
"""
Updates the probability matrix, heightening probabilities of the transitions
in the bout currently stored in the transition_recording_matrix by a given factor
"""
# create a matrix to multiply the probability matrix with
mult_mat = np.multiply(self.transition_recording_matrix, prob_enhancement_factor)
# make all 0s 1s, so probs remain the same when multiplied by this matrix
np.place(mult_mat, mult_mat == 0, 1)
self.transition_probability_matrix = np.multiply(self.transition_probability_matrix, mult_mat)
self.normalize_probability_matrix() # don't forget to normalize those probabilities!
def set_distance_style_probabilities(self):
"""
Sets the initial transition probability matrix as per the 2013 paper.
Probabilities are inversely proportional to the squared distance between flowers,
normalized with respect to all flowers.
"""
new_probs = np.copy(self.distance_matrix)
new_probs = np.power(new_probs, 2)
for i, row in enumerate(new_probs):
row_sum = sum(row)
for j in range(len(new_probs)):
if row[j] != 0:
row[j] = row_sum - row[j]
self.transition_probability_matrix = new_probs
self.normalize_probability_matrix() | trapline_formation/singleBee.py | import numpy as np
import sys
from sklearn.preprocessing import normalize
class Bee:
"""
A single bee, used in one run of the model.
A neat way of storing useful variables and methods for use by the main loop.
"""
def __init__(self, set_2013_probs):
"""
Matrix representing distances between sites.
0 1 2 3 4 5
nest, flower1, f2, f3, f4, f5
"""
self.distance_matrix = [[0, 50, 100, 120, 100, 50],
[50, 0, 50, 80, 80, 50],
[100, 50, 0, 50, 80, 80],
[120, 80, 50, 0, 50, 80],
[100, 80, 80, 50, 0, 50],
[50, 50, 80, 80, 50, 0 ]]
# enable this to manipulate the scale, as discussed in the 2013 paper:
# self.distance_matrix = np.divide(self.distance_matrix, 10)
"""
Matrix representing the probability of the bee transitioning between flower pairs.
Probabilities set as per Lihoreau 2012, with 0.1 for near and 0.6 for far.
Same indexes as distance_matrix.
"""
self.transition_probability_matrix = [[0, 0.8, 0.0, 0.0, 0.0, 0.2],
[0.6, 0, 0.6, 0.1, 0.1, 0.6],
[0.1, 0.6, 0, 0.6, 0.1, 0.1],
[0.1, 0.1, 0.6, 0, 0.6, 0.1],
[0.1, 0.1, 0.1, 0.6, 0, 0.6],
[0.6, 0.6, 0.1, 0.1, 0.6, 0 ]]
self.normalize_probability_matrix()
if set_2013_probs:
self.set_distance_style_probabilities()
# A 6*6 matrix to record each transition between locations
self.transition_recording_matrix = []
self.reset_transition_matrix()
# the bee's current location
self.location = 0
# set of unique visited locations
self.unique_visited_locations = set([])
# list of all visited locations in order
self.visited_locations = []
# the current minimum total bout distance found in this run
self.min_distance = sys.maxsize
def move(self):
"""Undertakes one location-to-location move of the bee
"""
# calculate the bee's next location
dest = self.get_destination()
# record the transition
self.transition_recording_matrix[self.location][dest] += 1
# update the bee's location
self.location = dest
# add the new location to location lists
self.unique_visited_locations.add(self.location)
self.visited_locations.append(self.location)
def get_destination(self):
"""Returns the next location for the bee, calculated using the probability matrix
"""
return np.random.choice([0,1,2,3,4,5], p=self.transition_probability_matrix[self.location])
# when enabled, the following disallows choosing the previous location:
# if len(self.visited_locations) > 1:
# while choice == self.visited_locations[-2]:
# choice = np.random.choice([0,1,2,3,4,5], p=self.transition_probability_matrix[self.location])
# return choice
def get_total_distance(self):
"""Returns the summed distance of all transitions currently stored
"""
return np.sum(np.multiply(self.distance_matrix, self.transition_recording_matrix))
def reset_transition_matrix(self):
"""Sets the transiton recording matrix to all zeros
"""
self.transition_recording_matrix = np.zeros((6,6), dtype=int)
def normalize_probability_matrix(self):
"""Goes through each row of the probability matrix and normalizes the values (0-1)
"""
self.transition_probability_matrix = normalize(self.transition_probability_matrix,
axis=1, norm='l1')
def update_probability_matrix(self, prob_enhancement_factor):
"""
Updates the probability matrix, heightening probabilities of the transitions
in the bout currently stored in the transition_recording_matrix by a given factor
"""
# create a matrix to multiply the probability matrix with
mult_mat = np.multiply(self.transition_recording_matrix, prob_enhancement_factor)
# make all 0s 1s, so probs remain the same when multiplied by this matrix
np.place(mult_mat, mult_mat == 0, 1)
self.transition_probability_matrix = np.multiply(self.transition_probability_matrix, mult_mat)
self.normalize_probability_matrix() # don't forget to normalize those probabilities!
def set_distance_style_probabilities(self):
"""
Sets the initial transition probability matrix as per the 2013 paper.
Probabilities are inversely proportional to the squared distance between flowers,
normalized with respect to all flowers.
"""
new_probs = np.copy(self.distance_matrix)
new_probs = np.power(new_probs, 2)
for i, row in enumerate(new_probs):
row_sum = sum(row)
for j in range(len(new_probs)):
if row[j] != 0:
row[j] = row_sum - row[j]
self.transition_probability_matrix = new_probs
self.normalize_probability_matrix() | 0.528047 | 0.603611 |
from __future__ import division, absolute_import, unicode_literals, print_function
import h5py
import numpy as np
from collections import Iterable
__all__ = ['flatten_complex_to_real', 'get_compound_sub_dtypes', 'flatten_compound_to_real', 'check_dtype',
'stack_real_to_complex', 'validate_dtype',
'stack_real_to_compound', 'stack_real_to_target_dtype', 'flatten_to_real', 'contains_integers']
def contains_integers(iter_int, min_val=None):
"""
Checks if the provided object is iterable (list, tuple etc.) and contains integers optionally greater than equal to
the provided min_val
Parameters
----------
iter_int : Iterable of integers
Iterable of integers
min_val : int, optional, default = None
The value above which each element of iterable must possess. By default, this is ignored.
Returns
-------
bool
Whether or not the provided object is an iterable of integers
"""
if not isinstance(iter_int, Iterable):
raise TypeError('iter_int should be an Iterable')
if len(iter_int) == 0:
return False
try:
if min_val is not None:
if min_val % 1 != 0:
raise ValueError('min_val should be an integer')
return np.all([x % 1 == 0 and x >= min_val for x in iter_int])
else:
return np.all([x % 1 == 0 for x in iter_int])
except TypeError:
return False
def flatten_complex_to_real(ds_main):
"""
Stacks the real values followed by the imaginary values in the last dimension of the given N dimensional matrix.
Thus a complex matrix of shape (2, 3, 5) will turn into a matrix of shape (2, 3, 10)
Parameters
----------
ds_main : complex array-like or h5py.Dataset
Dataset of interest
Returns
-------
retval : ND real numpy array
"""
if not isinstance(ds_main, (h5py.Dataset, np.ndarray)):
raise TypeError('ds_main should either be a h5py.Dataset or numpy array')
if not is_complex_dtype(ds_main.dtype):
raise TypeError("Expected a complex valued matrix")
axis = np.array(ds_main).ndim - 1
if axis == -1:
return np.hstack([np.real(ds_main), np.imag(ds_main)])
else: # along the last axis
return np.concatenate([np.real(ds_main), np.imag(ds_main)], axis=axis)
def flatten_compound_to_real(ds_main):
"""
Flattens the individual components in a structured array or compound valued hdf5 dataset along the last axis to form
a real valued array. Thus a compound h5py.Dataset or structured numpy matrix of shape (2, 3, 5) having 3 components
will turn into a real valued matrix of shape (2, 3, 15), assuming that all the sub-dtypes of the matrix are real
valued. ie - this function does not handle structured dtypes having complex values
Parameters
----------
ds_main : numpy array that is a structured array or h5py.Dataset of compound dtype
Dataset of interest
Returns
-------
retval : n-dimensional real numpy array
real valued dataset
"""
if isinstance(ds_main, h5py.Dataset):
if len(ds_main.dtype) == 0:
raise TypeError("Expected compound h5py dataset")
return np.concatenate([np.array(ds_main[name]) for name in ds_main.dtype.names], axis=len(ds_main.shape) - 1)
elif isinstance(ds_main, np.ndarray):
if len(ds_main.dtype) == 0:
raise TypeError("Expected structured numpy array")
if ds_main.ndim > 0:
return np.concatenate([ds_main[name] for name in ds_main.dtype.names], axis=ds_main.ndim - 1)
else:
return np.hstack([ds_main[name] for name in ds_main.dtype.names])
elif isinstance(ds_main, np.void):
return np.hstack([ds_main[name] for name in ds_main.dtype.names])
else:
raise TypeError('Datatype {} not supported in struct_to_scalar'.format(type(ds_main)))
def flatten_to_real(ds_main):
"""
Flattens complex / compound / real valued arrays to real valued arrays
Parameters
----------
ds_main : nD compound, complex or real numpy array or HDF5 dataset
Data that could be compound, complex or real
Returns
----------
ds_main : nD numpy array
Data raveled to a float data type
"""
if not isinstance(ds_main, (h5py.Dataset, np.ndarray)):
ds_main = np.array(ds_main)
if is_complex_dtype(ds_main.dtype):
return flatten_complex_to_real(ds_main)
elif len(ds_main.dtype) > 0:
return flatten_compound_to_real(ds_main)
else:
return ds_main
def get_compound_sub_dtypes(struct_dtype):
"""
Returns a dictionary of the dtypes of each of the fields in the given structured array dtype
Parameters
----------
struct_dtype : numpy.dtype object
dtype of a structured array
Returns
-------
dtypes : dict
Dictionary whose keys are the field names and values are the corresponding dtypes
"""
if not isinstance(struct_dtype, np.dtype):
raise TypeError('Provided object must be a structured array dtype')
dtypes = dict()
for field_name in struct_dtype.fields:
dtypes[field_name] = struct_dtype.fields[field_name][0]
return dtypes
def check_dtype(h5_dset):
"""
Checks the datatype of the input HDF5 dataset and provides the appropriate
function calls to convert it to a float
Parameters
----------
h5_dset : HDF5 Dataset
Dataset of interest
Returns
-------
func : function
function that will convert the dataset to a float
is_complex : Boolean
is the input dataset complex?
is_compound : Boolean
is the input dataset compound?
n_features : Unsigned integer, the length of the 2nd dimension of
the data after func is called on it
type_mult : Unsigned integer
multiplier that converts from the typesize of the input dtype to the
typesize of the data after func is run on it
"""
if not isinstance(h5_dset, h5py.Dataset):
raise TypeError('h5_dset should be a h5py.Dataset object')
is_complex = False
is_compound = False
in_dtype = h5_dset.dtype
# TODO: avoid assuming 2d shape - why does one even need n_samples!? We only care about the last dimension!
n_features = h5_dset.shape[-1]
if is_complex_dtype(h5_dset.dtype):
is_complex = True
new_dtype = np.real(h5_dset[0, 0]).dtype
type_mult = new_dtype.itemsize * 2
func = flatten_complex_to_real
n_features *= 2
elif len(h5_dset.dtype) > 0:
"""
Some form of structured numpy is in use
We only support real scalars for the component types at the current time
"""
is_compound = True
# TODO: Avoid hard-coding to float32
new_dtype = np.float32
type_mult = len(in_dtype) * new_dtype(0).itemsize
func = flatten_compound_to_real
n_features *= len(in_dtype)
else:
if h5_dset.dtype not in [np.float32, np.float64]:
new_dtype = np.float32
else:
new_dtype = h5_dset.dtype.type
type_mult = new_dtype(0).itemsize
func = new_dtype
return func, is_complex, is_compound, n_features, type_mult
def stack_real_to_complex(ds_real):
"""
Puts the real and imaginary sections of the provided matrix (in the last axis) together to make complex matrix
Parameters
------------
ds_real : n dimensional real-valued numpy array or h5py.Dataset
Data arranged as [instance, 2 x features]
where the first half of the features are the real component and the
second half contains the imaginary components
Returns
----------
ds_compound : 2D complex numpy array
Data arranged as [sample, features]
"""
if not isinstance(ds_real, (np.ndarray, h5py.Dataset)):
if not isinstance(ds_real, Iterable):
raise TypeError("Expected at least an iterable like a list or tuple")
ds_real = np.array(ds_real)
if len(ds_real.dtype) > 0:
raise TypeError("Array cannot have a compound dtype")
if is_complex_dtype(ds_real.dtype):
raise TypeError("Array cannot have complex dtype")
if ds_real.shape[-1] / 2 != ds_real.shape[-1] // 2:
raise ValueError("Last dimension must be even sized")
half_point = ds_real.shape[-1] // 2
return ds_real[..., :half_point] + 1j * ds_real[..., half_point:]
def stack_real_to_compound(ds_real, compound_type):
"""
Converts a real-valued dataset to a compound dataset (along the last axis) of the provided compound d-type
Parameters
------------
ds_real : n dimensional real-valued numpy array or h5py.Dataset
Data arranged as [instance, features]
compound_type : dtype
Target complex datatype
Returns
----------
ds_compound : ND complex numpy array
Data arranged as [sample, features]
"""
if not isinstance(ds_real, (np.ndarray, h5py.Dataset)):
if not isinstance(ds_real, Iterable):
raise TypeError("Expected at least an iterable like a list or tuple")
ds_real = np.array(ds_real)
if len(ds_real.dtype) > 0:
raise TypeError("Array cannot have a compound dtype")
elif is_complex_dtype(ds_real.dtype):
raise TypeError("Array cannot have complex dtype")
if not isinstance(compound_type, np.dtype):
raise TypeError('Provided object must be a structured array dtype')
new_spec_length = ds_real.shape[-1] / len(compound_type)
if new_spec_length % 1:
raise ValueError('Provided compound type was not compatible by number of elements')
new_spec_length = int(new_spec_length)
new_shape = list(ds_real.shape) # Make mutable
new_shape[-1] = new_spec_length
ds_compound = np.empty(new_shape, dtype=compound_type)
for name_ind, name in enumerate(compound_type.names):
i_start = name_ind * new_spec_length
i_end = (name_ind + 1) * new_spec_length
ds_compound[name] = ds_real[..., i_start:i_end]
return np.squeeze(ds_compound)
def stack_real_to_target_dtype(ds_real, new_dtype):
"""
Transforms real data into the target dtype
Parameters
----------
ds_real : nD real numpy array or HDF5 dataset
Source dataset
new_dtype : dtype
Target data type
Returns
----------
ret_val : nD numpy array
Data of the target data type
"""
if is_complex_dtype(new_dtype):
return stack_real_to_complex(ds_real)
elif len(new_dtype) > 0:
return stack_real_to_compound(ds_real, new_dtype)
else:
return new_dtype.type(ds_real)
def validate_dtype(dtype):
"""
Checks the provided object to ensure that it is a valid dtype that can be written to an HDF5 file.
Raises a type error if invalid. Returns True if the object passed the tests
Parameters
----------
dtype : object
Object that is hopefully a h5py.Datatype, np.dtype object.
Returns
-------
status : bool
True if the object was a valid dtype
"""
if isinstance(dtype, (h5py.Datatype, np.dtype)):
pass
elif isinstance(np.dtype(dtype), np.dtype):
# This should catch all those instances when dtype is something familiar like - np.float32
pass
else:
raise TypeError('dtype should either be a numpy or h5py dtype')
return True
def is_complex_dtype(dtype):
"""
Checks if the provided dtype is a complex dtype
Parameters
----------
dtype : object
Object that is a h5py.Datatype, np.dtype object.
Returns
-------
is_complex : bool
True if the dtype was a complex dtype. Else returns False
"""
validate_dtype(dtype)
if dtype in [np.complex, np.complex64, np.complex128]:
return True
return False | pycroscopy/core/io/dtype_utils.py | from __future__ import division, absolute_import, unicode_literals, print_function
import h5py
import numpy as np
from collections import Iterable
__all__ = ['flatten_complex_to_real', 'get_compound_sub_dtypes', 'flatten_compound_to_real', 'check_dtype',
'stack_real_to_complex', 'validate_dtype',
'stack_real_to_compound', 'stack_real_to_target_dtype', 'flatten_to_real', 'contains_integers']
def contains_integers(iter_int, min_val=None):
"""
Checks if the provided object is iterable (list, tuple etc.) and contains integers optionally greater than equal to
the provided min_val
Parameters
----------
iter_int : Iterable of integers
Iterable of integers
min_val : int, optional, default = None
The value above which each element of iterable must possess. By default, this is ignored.
Returns
-------
bool
Whether or not the provided object is an iterable of integers
"""
if not isinstance(iter_int, Iterable):
raise TypeError('iter_int should be an Iterable')
if len(iter_int) == 0:
return False
try:
if min_val is not None:
if min_val % 1 != 0:
raise ValueError('min_val should be an integer')
return np.all([x % 1 == 0 and x >= min_val for x in iter_int])
else:
return np.all([x % 1 == 0 for x in iter_int])
except TypeError:
return False
def flatten_complex_to_real(ds_main):
"""
Stacks the real values followed by the imaginary values in the last dimension of the given N dimensional matrix.
Thus a complex matrix of shape (2, 3, 5) will turn into a matrix of shape (2, 3, 10)
Parameters
----------
ds_main : complex array-like or h5py.Dataset
Dataset of interest
Returns
-------
retval : ND real numpy array
"""
if not isinstance(ds_main, (h5py.Dataset, np.ndarray)):
raise TypeError('ds_main should either be a h5py.Dataset or numpy array')
if not is_complex_dtype(ds_main.dtype):
raise TypeError("Expected a complex valued matrix")
axis = np.array(ds_main).ndim - 1
if axis == -1:
return np.hstack([np.real(ds_main), np.imag(ds_main)])
else: # along the last axis
return np.concatenate([np.real(ds_main), np.imag(ds_main)], axis=axis)
def flatten_compound_to_real(ds_main):
"""
Flattens the individual components in a structured array or compound valued hdf5 dataset along the last axis to form
a real valued array. Thus a compound h5py.Dataset or structured numpy matrix of shape (2, 3, 5) having 3 components
will turn into a real valued matrix of shape (2, 3, 15), assuming that all the sub-dtypes of the matrix are real
valued. ie - this function does not handle structured dtypes having complex values
Parameters
----------
ds_main : numpy array that is a structured array or h5py.Dataset of compound dtype
Dataset of interest
Returns
-------
retval : n-dimensional real numpy array
real valued dataset
"""
if isinstance(ds_main, h5py.Dataset):
if len(ds_main.dtype) == 0:
raise TypeError("Expected compound h5py dataset")
return np.concatenate([np.array(ds_main[name]) for name in ds_main.dtype.names], axis=len(ds_main.shape) - 1)
elif isinstance(ds_main, np.ndarray):
if len(ds_main.dtype) == 0:
raise TypeError("Expected structured numpy array")
if ds_main.ndim > 0:
return np.concatenate([ds_main[name] for name in ds_main.dtype.names], axis=ds_main.ndim - 1)
else:
return np.hstack([ds_main[name] for name in ds_main.dtype.names])
elif isinstance(ds_main, np.void):
return np.hstack([ds_main[name] for name in ds_main.dtype.names])
else:
raise TypeError('Datatype {} not supported in struct_to_scalar'.format(type(ds_main)))
def flatten_to_real(ds_main):
"""
Flattens complex / compound / real valued arrays to real valued arrays
Parameters
----------
ds_main : nD compound, complex or real numpy array or HDF5 dataset
Data that could be compound, complex or real
Returns
----------
ds_main : nD numpy array
Data raveled to a float data type
"""
if not isinstance(ds_main, (h5py.Dataset, np.ndarray)):
ds_main = np.array(ds_main)
if is_complex_dtype(ds_main.dtype):
return flatten_complex_to_real(ds_main)
elif len(ds_main.dtype) > 0:
return flatten_compound_to_real(ds_main)
else:
return ds_main
def get_compound_sub_dtypes(struct_dtype):
"""
Returns a dictionary of the dtypes of each of the fields in the given structured array dtype
Parameters
----------
struct_dtype : numpy.dtype object
dtype of a structured array
Returns
-------
dtypes : dict
Dictionary whose keys are the field names and values are the corresponding dtypes
"""
if not isinstance(struct_dtype, np.dtype):
raise TypeError('Provided object must be a structured array dtype')
dtypes = dict()
for field_name in struct_dtype.fields:
dtypes[field_name] = struct_dtype.fields[field_name][0]
return dtypes
def check_dtype(h5_dset):
"""
Checks the datatype of the input HDF5 dataset and provides the appropriate
function calls to convert it to a float
Parameters
----------
h5_dset : HDF5 Dataset
Dataset of interest
Returns
-------
func : function
function that will convert the dataset to a float
is_complex : Boolean
is the input dataset complex?
is_compound : Boolean
is the input dataset compound?
n_features : Unsigned integer, the length of the 2nd dimension of
the data after func is called on it
type_mult : Unsigned integer
multiplier that converts from the typesize of the input dtype to the
typesize of the data after func is run on it
"""
if not isinstance(h5_dset, h5py.Dataset):
raise TypeError('h5_dset should be a h5py.Dataset object')
is_complex = False
is_compound = False
in_dtype = h5_dset.dtype
# TODO: avoid assuming 2d shape - why does one even need n_samples!? We only care about the last dimension!
n_features = h5_dset.shape[-1]
if is_complex_dtype(h5_dset.dtype):
is_complex = True
new_dtype = np.real(h5_dset[0, 0]).dtype
type_mult = new_dtype.itemsize * 2
func = flatten_complex_to_real
n_features *= 2
elif len(h5_dset.dtype) > 0:
"""
Some form of structured numpy is in use
We only support real scalars for the component types at the current time
"""
is_compound = True
# TODO: Avoid hard-coding to float32
new_dtype = np.float32
type_mult = len(in_dtype) * new_dtype(0).itemsize
func = flatten_compound_to_real
n_features *= len(in_dtype)
else:
if h5_dset.dtype not in [np.float32, np.float64]:
new_dtype = np.float32
else:
new_dtype = h5_dset.dtype.type
type_mult = new_dtype(0).itemsize
func = new_dtype
return func, is_complex, is_compound, n_features, type_mult
def stack_real_to_complex(ds_real):
"""
Puts the real and imaginary sections of the provided matrix (in the last axis) together to make complex matrix
Parameters
------------
ds_real : n dimensional real-valued numpy array or h5py.Dataset
Data arranged as [instance, 2 x features]
where the first half of the features are the real component and the
second half contains the imaginary components
Returns
----------
ds_compound : 2D complex numpy array
Data arranged as [sample, features]
"""
if not isinstance(ds_real, (np.ndarray, h5py.Dataset)):
if not isinstance(ds_real, Iterable):
raise TypeError("Expected at least an iterable like a list or tuple")
ds_real = np.array(ds_real)
if len(ds_real.dtype) > 0:
raise TypeError("Array cannot have a compound dtype")
if is_complex_dtype(ds_real.dtype):
raise TypeError("Array cannot have complex dtype")
if ds_real.shape[-1] / 2 != ds_real.shape[-1] // 2:
raise ValueError("Last dimension must be even sized")
half_point = ds_real.shape[-1] // 2
return ds_real[..., :half_point] + 1j * ds_real[..., half_point:]
def stack_real_to_compound(ds_real, compound_type):
"""
Converts a real-valued dataset to a compound dataset (along the last axis) of the provided compound d-type
Parameters
------------
ds_real : n dimensional real-valued numpy array or h5py.Dataset
Data arranged as [instance, features]
compound_type : dtype
Target complex datatype
Returns
----------
ds_compound : ND complex numpy array
Data arranged as [sample, features]
"""
if not isinstance(ds_real, (np.ndarray, h5py.Dataset)):
if not isinstance(ds_real, Iterable):
raise TypeError("Expected at least an iterable like a list or tuple")
ds_real = np.array(ds_real)
if len(ds_real.dtype) > 0:
raise TypeError("Array cannot have a compound dtype")
elif is_complex_dtype(ds_real.dtype):
raise TypeError("Array cannot have complex dtype")
if not isinstance(compound_type, np.dtype):
raise TypeError('Provided object must be a structured array dtype')
new_spec_length = ds_real.shape[-1] / len(compound_type)
if new_spec_length % 1:
raise ValueError('Provided compound type was not compatible by number of elements')
new_spec_length = int(new_spec_length)
new_shape = list(ds_real.shape) # Make mutable
new_shape[-1] = new_spec_length
ds_compound = np.empty(new_shape, dtype=compound_type)
for name_ind, name in enumerate(compound_type.names):
i_start = name_ind * new_spec_length
i_end = (name_ind + 1) * new_spec_length
ds_compound[name] = ds_real[..., i_start:i_end]
return np.squeeze(ds_compound)
def stack_real_to_target_dtype(ds_real, new_dtype):
"""
Transforms real data into the target dtype
Parameters
----------
ds_real : nD real numpy array or HDF5 dataset
Source dataset
new_dtype : dtype
Target data type
Returns
----------
ret_val : nD numpy array
Data of the target data type
"""
if is_complex_dtype(new_dtype):
return stack_real_to_complex(ds_real)
elif len(new_dtype) > 0:
return stack_real_to_compound(ds_real, new_dtype)
else:
return new_dtype.type(ds_real)
def validate_dtype(dtype):
"""
Checks the provided object to ensure that it is a valid dtype that can be written to an HDF5 file.
Raises a type error if invalid. Returns True if the object passed the tests
Parameters
----------
dtype : object
Object that is hopefully a h5py.Datatype, np.dtype object.
Returns
-------
status : bool
True if the object was a valid dtype
"""
if isinstance(dtype, (h5py.Datatype, np.dtype)):
pass
elif isinstance(np.dtype(dtype), np.dtype):
# This should catch all those instances when dtype is something familiar like - np.float32
pass
else:
raise TypeError('dtype should either be a numpy or h5py dtype')
return True
def is_complex_dtype(dtype):
"""
Checks if the provided dtype is a complex dtype
Parameters
----------
dtype : object
Object that is a h5py.Datatype, np.dtype object.
Returns
-------
is_complex : bool
True if the dtype was a complex dtype. Else returns False
"""
validate_dtype(dtype)
if dtype in [np.complex, np.complex64, np.complex128]:
return True
return False | 0.782621 | 0.575767 |
from __future__ import annotations
from typing import Type, Any
class _InvalidCase:
"""Sentinel object for defining when a case is not met."""
_INVALID_CASE = _InvalidCase()
class _UndefinedEval:
"""Sentinel object for defining when a switch statement has not been evaluated yet."""
_UNDEFINED_EVAL = _UndefinedEval()
_CASE_FLAG_NAME = "_is_case_method"
# A case is 'default' when its predicate is always true.
# Declaring a variable named 'default' is simply to increase
# readability for the user.
default = True
def resolve(s: Type[switch]) -> Any:
"""Decorator for auto-resolving switch statement after its declaration."""
return s.eval()
class switch:
"""
Is a switch-case implementation.
Use by inheriting from this class, decorating case methods with
`case(predicate)`, and optionally decorating subclass with `resolve`
to evaluate the switch-case statement immediately after declaration.
"""
__slots__ = []
_cached_eval = _UNDEFINED_EVAL
@staticmethod
def case(predicate):
"""A decorator that defines default behavior for case function definitions in a class."""
is_correct_case = bool(predicate)
def decorator(function):
def wrapper(*args, **kwargs):
if not is_correct_case:
return _INVALID_CASE
result = function(*args, **kwargs)
return result
wrapper.__setattr__(_CASE_FLAG_NAME, True)
return wrapper
return decorator
@classmethod
def eval(cls):
"""Resolves the switch statement, and returns the accepted case's returning value."""
if cls._cached_eval is not _UNDEFINED_EVAL:
return cls._cached_eval
case_methods = [
x
for x in cls.__dict__.values()
if callable(x) and x.__dict__.get(_CASE_FLAG_NAME, False)
]
for func in case_methods:
result = func(cls)
if result is not _INVALID_CASE:
cls._cached_eval = result
return result
raise ValueError("There is no case with a True predicate.")
def __new__(cls, *args, **kwargs):
raise TypeError(f"{cls} cannot be instantiated.") | switch/__init__.py | from __future__ import annotations
from typing import Type, Any
class _InvalidCase:
"""Sentinel object for defining when a case is not met."""
_INVALID_CASE = _InvalidCase()
class _UndefinedEval:
"""Sentinel object for defining when a switch statement has not been evaluated yet."""
_UNDEFINED_EVAL = _UndefinedEval()
_CASE_FLAG_NAME = "_is_case_method"
# A case is 'default' when its predicate is always true.
# Declaring a variable named 'default' is simply to increase
# readability for the user.
default = True
def resolve(s: Type[switch]) -> Any:
"""Decorator for auto-resolving switch statement after its declaration."""
return s.eval()
class switch:
"""
Is a switch-case implementation.
Use by inheriting from this class, decorating case methods with
`case(predicate)`, and optionally decorating subclass with `resolve`
to evaluate the switch-case statement immediately after declaration.
"""
__slots__ = []
_cached_eval = _UNDEFINED_EVAL
@staticmethod
def case(predicate):
"""A decorator that defines default behavior for case function definitions in a class."""
is_correct_case = bool(predicate)
def decorator(function):
def wrapper(*args, **kwargs):
if not is_correct_case:
return _INVALID_CASE
result = function(*args, **kwargs)
return result
wrapper.__setattr__(_CASE_FLAG_NAME, True)
return wrapper
return decorator
@classmethod
def eval(cls):
"""Resolves the switch statement, and returns the accepted case's returning value."""
if cls._cached_eval is not _UNDEFINED_EVAL:
return cls._cached_eval
case_methods = [
x
for x in cls.__dict__.values()
if callable(x) and x.__dict__.get(_CASE_FLAG_NAME, False)
]
for func in case_methods:
result = func(cls)
if result is not _INVALID_CASE:
cls._cached_eval = result
return result
raise ValueError("There is no case with a True predicate.")
def __new__(cls, *args, **kwargs):
raise TypeError(f"{cls} cannot be instantiated.") | 0.932622 | 0.270003 |
from __future__ import print_function
import sys
import json
import regex
from tqdm import tqdm
from .filters import style
from .utils import clone_pull, execute_cmd, is_git_dir, utf8_decode
from .parser import parse_arguments
from .clients import render
def main(args=sys.argv[1:], render_results=True, skip_clone_pull=False):
investigator = Poirot(args, render_results, skip_clone_pull)
for pattern in tqdm(investigator.info["patterns"]):
investigator.search(utf8_decode(pattern))
return investigator.get_results()
class Poirot(object):
def __init__(self, args, render_results=True, skip_clone_pull=True):
self.render_results = render_results
self.info = parse_arguments(args)
self.results = {utf8_decode(p): {} for p in self.info["patterns"]}
if self.info["staged"]:
is_git_dir(self.info["git_dir"])
elif self.info["git_url"] and not skip_clone_pull:
clone_pull(self.info["git_url"], self.info["repo_dir"])
def search(self, pattern):
"""
Delegates to add_staged_results or add_committed_results
"""
if self.info["staged"]:
self.add_staged_results(pattern)
else:
self.add_committed_results(pattern)
def add_staged_results(self, pattern):
"""
Adds staged matches to results
"""
result = self.search_staged(pattern)
self.results[pattern] = {"staged": {"files": result}} if result else {}
def search_staged(self, pattern):
"""
Takes a text pattern and local repo directory and returns the
file name, line number, and text matching the given pattern in
a staged revision.
"""
cmd = ["git", "diff", "--staged", "--unified=0", "--",
self.info["repo_dir"]]
(out, err) = execute_cmd(cmd)
return self.parse_diff(diff=out, pattern=pattern)
def add_committed_results(self, pattern):
"""
Adds committed matches (logs, messages) to results
"""
def merge_committed(target, commit_range):
"""
Reads in yielded commit sha and pattern match information from
search_committed. Adds to pattern matches for for the particular
commit.
"""
for commit, metadata in self.search_committed(target, pattern, commit_range):
self.results[pattern].setdefault(commit, {}).update(metadata)
for commit_range in self.info["revlist"]:
merge_committed("diff", commit_range)
merge_committed("message", commit_range)
def search_committed(self, target, pattern, commit_range):
"""
Searches within a range of commits for commit messages or diffs
containing the text pattern. Yields a matching revision's SHA
and the message or file name, line number, text matching the
given pattern, and authorship.
"""
for log in self.get_logs(target, pattern, commit_range):
sha, metadata = self.parse_log(log)
if target == "message":
yield sha, metadata
else:
# show the diffs for a given commit
cmd = ["git", "--git-dir", self.info["git_dir"],
"show", sha, "--no-color", "--unified=0"]
(out, err) = execute_cmd(cmd)
file_diffs = self.parse_diff(diff=out, pattern=pattern)
if file_diffs:
metadata["files"] = file_diffs
yield sha, metadata
def get_logs(self, target, pattern, commit_range):
"""
Searches and returns git logs in a range of revisions that match a
specified pattern, either in the message or modified lines.
"""
cmd = ["git", "--git-dir", self.info["git_dir"], "log",
commit_range, "-i", "-E", "--oneline"]
if target == "message":
cmd.extend(["--format=COMMIT: %h AUTHORDATE: %aD AUTHORNAME: %an AUTHOREMAIL: %ae LOG: %s %b"])
cmd.extend(["--grep", pattern]) # limits matching to the log message
elif target == "diff":
cmd.extend(["--format=COMMIT: %h AUTHORDATE: %aD AUTHORNAME: %an AUTHOREMAIL: %ae"])
cmd.extend(["-G" + pattern]) # matches on added/removed lines
if self.info["author"]:
cmd.extend(["--author", self.info["author"]])
if self.info["before"]:
cmd.extend(["--before", self.info["before"]])
if self.info["after"]:
cmd.extend(["--after", self.info["after"]])
(out, err) = execute_cmd(cmd)
return out.strip().split("COMMIT: ")[1:]
@staticmethod
def parse_log(log):
"""
Parses the information contained in a pretty-formatted
--oneline commit log (i.e. the commit SHA, author's
name and email, the date the commit was authored, and,
optionally, the commit's message).
"""
metadata = {}
sha, log = log.split(" AUTHORDATE: ", 1)
metadata["author_date"], log = log.split(" AUTHORNAME: ", 1)
metadata["author_name"], log = log.split(" AUTHOREMAIL: ", 1)
try:
metadata["author_email"], metadata["message"] = log.split(" LOG: ", 1)
metadata["message"] = utf8_decode(metadata["message"])
except ValueError:
metadata["author_email"] = log
metadata = {key: metadata[key].strip() for key in metadata.keys()}
return sha, metadata
@staticmethod
def parse_diff(diff, pattern):
"""
Takes a single commit's diff and pattern. Returns the files
and lines in the revision that match the pattern.
"""
def split_diff(diff):
"""
Divides a diff into the file name and the rest of its
(split by newline).
"""
deleted_re = regex.compile(r"^deleted file")
try:
diff = diff.split("\n", 2)
if not deleted_re.match(diff[1]):
filename = diff[0].split(" b/", 1)[1]
diff = "@@" + diff[2].split("@@", 1)[1]
diff_text = diff.split("\n")
return filename, diff_text
except IndexError:
pass
def find_matches_in_diff(diff_text):
"""
Takes the lines from a file's diff and yields them
if they were added and match the given pattern.
"""
line_re = regex.compile(r"@@ \-[0-9,]+ \+([0-9]+)[, ].*")
pattern_re = regex.compile(pattern, regex.I)
line_num = 0
for line in diff_text:
line = utf8_decode(line)
if not line:
pass
elif line[0] == "@":
line_num = int(regex.sub(line_re, r"\1", line))
elif line[0] == "+":
if pattern_re.search(line):
yield {"line": line_num, "text": line[1:].strip()}
line_num += 1
try:
if isinstance(diff, bytes):
diff = diff.decode() # coerce bytes type to str
except:
pass
files = []
file_diffs = diff.split("diff --git ")[1:] # split the diff by file modified
for file_diff in file_diffs:
try:
(filename, diff_text) = split_diff(file_diff)
matches = [m for m in find_matches_in_diff(diff_text)]
if matches:
files.append({"file": filename, "matches": matches})
except TypeError: # ignore empty lines
pass
return files
def get_results(self):
if self.info["output"]:
with open(self.info["output"], "w") as outfile:
json.dump(self.results, outfile, ensure_ascii=False, indent=4)
if not self.render_results:
return self.results
if any(self.results.values()):
render(self.results, self.info)
sys.exit(1)
else:
print(style("Poirot didn't find anything!", "darkblue"))
sys.exit(0) | poirot/poirot.py |
from __future__ import print_function
import sys
import json
import regex
from tqdm import tqdm
from .filters import style
from .utils import clone_pull, execute_cmd, is_git_dir, utf8_decode
from .parser import parse_arguments
from .clients import render
def main(args=sys.argv[1:], render_results=True, skip_clone_pull=False):
investigator = Poirot(args, render_results, skip_clone_pull)
for pattern in tqdm(investigator.info["patterns"]):
investigator.search(utf8_decode(pattern))
return investigator.get_results()
class Poirot(object):
def __init__(self, args, render_results=True, skip_clone_pull=True):
self.render_results = render_results
self.info = parse_arguments(args)
self.results = {utf8_decode(p): {} for p in self.info["patterns"]}
if self.info["staged"]:
is_git_dir(self.info["git_dir"])
elif self.info["git_url"] and not skip_clone_pull:
clone_pull(self.info["git_url"], self.info["repo_dir"])
def search(self, pattern):
"""
Delegates to add_staged_results or add_committed_results
"""
if self.info["staged"]:
self.add_staged_results(pattern)
else:
self.add_committed_results(pattern)
def add_staged_results(self, pattern):
"""
Adds staged matches to results
"""
result = self.search_staged(pattern)
self.results[pattern] = {"staged": {"files": result}} if result else {}
def search_staged(self, pattern):
"""
Takes a text pattern and local repo directory and returns the
file name, line number, and text matching the given pattern in
a staged revision.
"""
cmd = ["git", "diff", "--staged", "--unified=0", "--",
self.info["repo_dir"]]
(out, err) = execute_cmd(cmd)
return self.parse_diff(diff=out, pattern=pattern)
def add_committed_results(self, pattern):
"""
Adds committed matches (logs, messages) to results
"""
def merge_committed(target, commit_range):
"""
Reads in yielded commit sha and pattern match information from
search_committed. Adds to pattern matches for for the particular
commit.
"""
for commit, metadata in self.search_committed(target, pattern, commit_range):
self.results[pattern].setdefault(commit, {}).update(metadata)
for commit_range in self.info["revlist"]:
merge_committed("diff", commit_range)
merge_committed("message", commit_range)
def search_committed(self, target, pattern, commit_range):
"""
Searches within a range of commits for commit messages or diffs
containing the text pattern. Yields a matching revision's SHA
and the message or file name, line number, text matching the
given pattern, and authorship.
"""
for log in self.get_logs(target, pattern, commit_range):
sha, metadata = self.parse_log(log)
if target == "message":
yield sha, metadata
else:
# show the diffs for a given commit
cmd = ["git", "--git-dir", self.info["git_dir"],
"show", sha, "--no-color", "--unified=0"]
(out, err) = execute_cmd(cmd)
file_diffs = self.parse_diff(diff=out, pattern=pattern)
if file_diffs:
metadata["files"] = file_diffs
yield sha, metadata
def get_logs(self, target, pattern, commit_range):
"""
Searches and returns git logs in a range of revisions that match a
specified pattern, either in the message or modified lines.
"""
cmd = ["git", "--git-dir", self.info["git_dir"], "log",
commit_range, "-i", "-E", "--oneline"]
if target == "message":
cmd.extend(["--format=COMMIT: %h AUTHORDATE: %aD AUTHORNAME: %an AUTHOREMAIL: %ae LOG: %s %b"])
cmd.extend(["--grep", pattern]) # limits matching to the log message
elif target == "diff":
cmd.extend(["--format=COMMIT: %h AUTHORDATE: %aD AUTHORNAME: %an AUTHOREMAIL: %ae"])
cmd.extend(["-G" + pattern]) # matches on added/removed lines
if self.info["author"]:
cmd.extend(["--author", self.info["author"]])
if self.info["before"]:
cmd.extend(["--before", self.info["before"]])
if self.info["after"]:
cmd.extend(["--after", self.info["after"]])
(out, err) = execute_cmd(cmd)
return out.strip().split("COMMIT: ")[1:]
@staticmethod
def parse_log(log):
"""
Parses the information contained in a pretty-formatted
--oneline commit log (i.e. the commit SHA, author's
name and email, the date the commit was authored, and,
optionally, the commit's message).
"""
metadata = {}
sha, log = log.split(" AUTHORDATE: ", 1)
metadata["author_date"], log = log.split(" AUTHORNAME: ", 1)
metadata["author_name"], log = log.split(" AUTHOREMAIL: ", 1)
try:
metadata["author_email"], metadata["message"] = log.split(" LOG: ", 1)
metadata["message"] = utf8_decode(metadata["message"])
except ValueError:
metadata["author_email"] = log
metadata = {key: metadata[key].strip() for key in metadata.keys()}
return sha, metadata
@staticmethod
def parse_diff(diff, pattern):
"""
Takes a single commit's diff and pattern. Returns the files
and lines in the revision that match the pattern.
"""
def split_diff(diff):
"""
Divides a diff into the file name and the rest of its
(split by newline).
"""
deleted_re = regex.compile(r"^deleted file")
try:
diff = diff.split("\n", 2)
if not deleted_re.match(diff[1]):
filename = diff[0].split(" b/", 1)[1]
diff = "@@" + diff[2].split("@@", 1)[1]
diff_text = diff.split("\n")
return filename, diff_text
except IndexError:
pass
def find_matches_in_diff(diff_text):
"""
Takes the lines from a file's diff and yields them
if they were added and match the given pattern.
"""
line_re = regex.compile(r"@@ \-[0-9,]+ \+([0-9]+)[, ].*")
pattern_re = regex.compile(pattern, regex.I)
line_num = 0
for line in diff_text:
line = utf8_decode(line)
if not line:
pass
elif line[0] == "@":
line_num = int(regex.sub(line_re, r"\1", line))
elif line[0] == "+":
if pattern_re.search(line):
yield {"line": line_num, "text": line[1:].strip()}
line_num += 1
try:
if isinstance(diff, bytes):
diff = diff.decode() # coerce bytes type to str
except:
pass
files = []
file_diffs = diff.split("diff --git ")[1:] # split the diff by file modified
for file_diff in file_diffs:
try:
(filename, diff_text) = split_diff(file_diff)
matches = [m for m in find_matches_in_diff(diff_text)]
if matches:
files.append({"file": filename, "matches": matches})
except TypeError: # ignore empty lines
pass
return files
def get_results(self):
if self.info["output"]:
with open(self.info["output"], "w") as outfile:
json.dump(self.results, outfile, ensure_ascii=False, indent=4)
if not self.render_results:
return self.results
if any(self.results.values()):
render(self.results, self.info)
sys.exit(1)
else:
print(style("Poirot didn't find anything!", "darkblue"))
sys.exit(0) | 0.397938 | 0.182462 |
PBPTonscreenText = 0.2
# battle/RewardPanel.py
RPdirectFrame = (1.75, 1, 0.75)
RPtrackLabels = 0.05
RPmeritBarLabels = 0.165
# battle/RewardPanel.py
RPmeritLabelXPosition = 0.55
RPmeritBarsXPosition = 0.825
# battle/BattleBase.py
BBbattleInputTimeout = 20.0
# battle/FireCogPanel.py
FCPtextFrameScale = 0.08
# building/DistributedHQInterior.py
DHtoonName = 0.9
DHtoonNamePos = (-4, 0, 0)
DHscorePos = (-4.6, 0, 0)
DHtrophyPos = (-6.6, 0, 0.3)
# building/Elevator.py
EelevatorHopOff = 0.8
# catalog/CatalogChatItemPicker.py
CCIPmessagePickerCancel = 0.06
# catalog/CatalogItemPanel.py
CIPnameLabel = 1
CIPwordwrapOffset = 0
# catalog/CatalogScreen.py
CSgiftTogglePos = (00.855, -0.13)
CSgiftToggle = 0.08
CSbackCatalogButton = 0.065
# chat/TTChatInputSpeedChat.py
CISCspeedChat = .055
CISCtopLevelOverlap = 0.
# chat/ToontownChatManager.py
CMnormalButton = 0.06
CMscButtonPos = (-1.129, 0, 0.928)
CMscButton = 0.06
CMwhisperFrame = 0.06
CMwhisperButton = 0.05
CMunpaidChatWarningwordwrap = 18
CMunpaidChatWarning = 0.06
CMunpaidChatWarning_text_z = 0.30
CMpayButton = 0.06
CMpayButton_pos_z = -0.13
CMopenChatWarning = 0.06
CMactivateChat = 0.05
CMchatActivated = 0.06
CMNoPasswordContinue_z = -0.28
# coghq/LawbotCogHQLoader.py
LCLdgSign = 0.1 # the scale of the gate name
# coghq/SellbotCogHQLoader.py
SCLfdSign = .075
SCLdgSign = 0.1 # the scale of the gate name
# coghq/DistributedFactory.py
DFfactoryRoomTitle = 0.8
# coghq/DistributedMintElevatorExt.py
DMEEsignText = 2
# coghq/BossbotCogHQLoader.py
BCHQLmakeSign = 1.12
# coghq/DistributedGolfGreenGame.py
DGGGquitButton = 0.045
DGGGhowToButton = 0.045
DGGGscoreLabel = 0.075
# estate/houseDesign.py
HDhelpText = 0.8
HDatticButton = 1.0
HDroomButton = 1.0
HDtrashButton = 1.0
HDscrolledList = 0.1
# estate/PlantingGUI.py
GardeningInstructionScale = 0.07
# estate/FlowerPanel.py
FPBlankLabelPos = -0.35
FPBlankLabelTextScale = 0.05
# estate/FlowerPicker.py
FPFlowerValueTotal = 0.055
# estate/FlowerSellGUI.py
FSGDFTextScale = .06
FSGCancelBtnTextScale = 0.06
FSGOkBtnTextScale = 0.06
# estate/GardenTutorial.py
GardenTutorialPage2Wordwrap = 13.5
GardenTutorialPage4Wordwrap = 16.5
# fishing/BingoCardGui.py
BCGjpText = (0.04)
BCGjpTextWordwrap = 10.5
BCGnextGame = 1.71
# fishing/FishSellGUI.py
FSGokButton = 0.06
FSGcancelButton = 0.06
# fishing/FishPanel.py
FPnewEntry = 0.08
FPnewRecord = 0.08
# fishing/GenusPanel.py
GPgenus = 0.045
# friends/FriendsListPanel.py
FLPnewFriend = 0.045
FLPsecrets = 0.045
FLPsecretsPos = (0.152, 0.0, 0.14)
FLPtitleScale = 0.04
# friends/FriendInviter.py
FIstopButton = 0.05
FIdialog = 0.06
FIcancelButtonPosition = (0.20, 0.0, -0.1)
FIcancelButtonPositionX = 0.20
FIstopButtonPosition = (-0.2, 0.0, 0.05)
FIstopButtonPositionX = -0.2
FIstopTextPosition = (0.075, -0.015)
FIstopTextPositionY = -0.015
FIyesButtonPositionX = -0.15
FIdirectFrameTextWorkWrap = 14
FIdirectFrameTextPosZ = 0.2
# golf/DistributedGolfHole.py
DGHpowerReminder = 0.09
DGHaimInstructions = 0.065
DGHteeInstructions = 0.065
# golf/DistributedGolfHole.py
DGHAimInstructScale = 0.1
DGHTeeInstructScale = 0.1
# golf/GolfScoreBoard.py
GSBExitCourseBTextPose = (0.15, -.01)
GSBtitleLabelScale = 0.07
# golf/GolfScoreBoard.py
GSBexitCourseBPos = (0.19, -.01)
GSBtitleLabel = 0.06
# hood/EstateHood.py
EHpopupInfo = .08
# hood/Hood.py
HDenterTitleTextScale = 0.16
# login/AvatarChoice.py
ACplayThisToon = 0.12
ACmakeAToon = 0.12
ACsubscribersOnly = 0.115
ACdeleteWithPassword = <PASSWORD>
ACstatusText = 1.0
# login/AvatarChooser.py
ACtitle = 0.15
ACquitButton = 0.1
AClogoutButton = 0.1
ACquitButton_pos = -0.035
# minigame/MinigameAvatarScorePanel.py
MASPscoreText = 0.1
MASPnameText = 0.055
# minigame/MinigameRulesPanel.py
MRPplayButton = 0.055
MRPinstructionsText = 0.07
# minigame/MinigamePowerMeter.py
MPMpowerText = 0.07
MPMtooSlow = 0.07
MPMtooFast = 0.07
MPMgaugeA = .35
MPMgaugeTargetTop = .35
MPMgaugeTargetBot = .35
# minigame/Purchase.py
PstatusLabel = 0.08
# minigame/PurchaseBase.py
PBstatusLabel = 0.08
# makeatoon/NameShop.py
NSmaxNameWidth = 8.0
NSdirectScrolleList = 0.1
NSmakeLabel = 0.1
NSmakeCheckBox = 0.8
NSnameEntry = 0.08
NStypeANameButton = 0.06
NStypeANameButton_pos = -0.02
NSnameResult = 0.065
NStypeName = 0.1
NSnewName = 0.08
NScolorPrecede = True
# makeatoon/MakeAToon.py
MATenterGenderShop = 0.18
MATenterBodyShop = 0.18
MATenterColorShop = 0.18
MATenterClothesShop = 0.16
MATenterNameShop = 0.15
MATclothesGUIshirt_scale = 0.06
MATclothesGUIshirt_posL = 0.010
MATclothesGUIshirt_posR = -0.014
MATnextButtonScale = 0.08
# makeatoon\ShuffleButton.py
SBshuffleBtn = 0.08
# minigame/DistributedPairingGame.py
DPGPointsFrameTextScale = 0.7
DPGFlipsFrameTextScale = 0.7
# minigame/DistributedTravelGame.py
DTGVoteBtnTextScale = 0.07
DTGUseLabelTextScale = 0.1
DTGVotesPeriodLabelTextScale = 0.1
DTGVotesToGoLabelTextScale = 0.1
DTGUpLabelTextScale = 0.125
DTGDownLabelTextScale = 0.125
DTGRemainingVotesFrameTextScale = 0.7
# minigame/MinigameRulesPanel.py
MRPGameTitleTextScale = 0.11
MRPGameTitleTextPos = (-0.046, 0.2, 0.092)
MRPInstructionsTextWordwrap = 26.5
MRPInstructionsTextPos = (-0.115, 0.05, 0)
# Stuff for trolley metagame
TravelGameBonusBeansSize = 0.65
# parties/InviteVisual.py
IVwhenTextLabel = 0.06
IVactivityTextLabel = 0.06
# parties/PartyPlanner.py
PPDescriptionScale = 0.06
PPelementTitleLabelScale = 0.07
PPelementBuyButtonTextScale = 0.055
PPtitleScale = 0.1
PPpbulicDescriptionLabel = 0.065
PPprivateDescriptionLabel = 0.065
PPpublicButton = 0.05
PPprivateButton = 0.05
PPcostLabel = 0.065
PPpartyGroundsLabel = 1.0
PPinstructionLabel = 0.07
PPelementPrice = 0.065
# parties/DistributedParty.py
DPpartyCountdownClockTextScale = 1.1
DPpartyCountdownClockMinutesScale = 1.1
DPpartyCountdownClockColonScale = 1.1
DPpartyCountdownClockSecondScale = 1.1
DPpartyCountdownClockMinutesPosY = 0.0
DPpartyCountdownClockColonPosY = 0.0
DPpartyCountdownClockSecondPosY = 0.0
# parties/PublicPartyGui.py
PPGpartyStartButton = 0.065
PPGinstructionsLabel = 0.065
PPGcreatePartyListAndLabel = 0.06
# parties/JukeboxGui.py
JGcurrentlyPlayingLabel = 0.07
JGsongNameLabel = 0.13
JGaddSongButton = 0.1
JGnumItemsVisible = 9
JGlistItem = 1.0
# pets/PetAvatarPanel.py & town/TownBattleSOSPetInfoPanel.py
PAPfeed = 0.5
PAPcall = 0.5
PAPowner = 0.35
PAPscratch = 0.5
PAPstateLabel = 0.4
PAPstateLabelPos = (0.7, 0, 3.5)
PAPstateLabelwordwrap = 7.5
# pets/PetDetailPanel.py
PDPtrickText = 0.17
PDPlaff = 0.17
PDPlaffPos = (0.0, -0.05)
# pets/PetshopGUI.py
PGUItextScale = 1
PGUIchooserTitle = .10
PGUIwordwrap = 14
PGUIdescLabel = 0.9
PGUIreturnConfirm = .07
PGUIpetsopAdopt = 0.6
PGUIadoptSubmit = 0.8
PGUIpetsopAdoptPos = (-0.21, 1.05)
PGUIpetshopCancelPos = (-3.3, 2.95)
PGUIcharLength = 1 # 1 for one byte code 3 for two byte code
# pets/PetTutorial.py
PTtitle = 0.13
PTpage1Pos = (0.15, 0.13)
PTpage2Pos = (-0.27, 0.16)
PTpage3Pos = (0.15, 0.13)
# quest/QuestPoster.py
QPauxText = 0.04
QPtextScale = 0.045
QPtextWordwrap = 15.6
QPinfoZ = -0.0625
# racing/DistributedLeaderBoard.py
DLBtitleRowScale = .4
# racing/DistributedRace.py
DRenterWaiting = .2
DRrollScale = .5
# raceing/DistributedRacePad.py
DRPnodeScale = 0.65
# racing/KartShopGui.py
KSGtextSizeBig = 0.088
KSGtextSizeSmall = 0.055
KSGaccDescriptionWordwrap = 11
# racing/RaceEndPanels.py
REPraceEnd = 0.08
REPraceExit = 0.04
REPticket_text_x = -0.6
# racing/RaceGUI.py
RGphotoFinish = 0.25
RGplaceLabelNumPos = (-1.2, 0, -0.97)
RGplaceLabelStrPos = (-1.05, 0.0, -0.8)
# racing/DistributedRaceAI.py
DRAwaitingForJoin = 60
# safezone/DistributedFishingSpot.py
DFSfailureDialog = 0.06
# safezone/Playground.py
PimgLabel = 1.0
# safezone/GZSafeZoneLoader.py
GSZLbossbotSignScale = 1.5
# shtiker/EventsPage.py
EPtitleLabel = 0.12
EPhostTab = 0.07
EPinvitedTab = 0.07
EPcalendarTab = 0.07
EPnewsTab = 0.07
EPhostingCancelButton = 0.04
EPhostingDateLabel = 0.05
EPpartyGoButton = 0.045
EPpublicPrivateLabel = 0.05
EPpublicButton = 0.5
EPprivateButton = 0.5
EPinvitePartyGoButton = 0.045
EPdecorationItemLabel = 0.055
EPactivityItemLabel = 0.055
EPcreateListAndLabel = 0.055
# shtiker/FishPage.py
FPtankTab = 0.07
FPcollectionTab = 0.07
FPtrophyTab = 0.07
# shtiker/DisplaySettingsDialog.py
DSDintroText = 0.06
DSDintroTextwordwrap = 25
DSDwindowedButtonPos = (0.0961, 0, -0.221)
DSDfullscreenButtonPos = (0.097, 0, -0.311)
DSDembeddedButtonPos = (0.097, 0, -0.411)
DSDcancel = 0.06
DSDcancelButtonPositionX = 0
# shtiker/DisguisePage.py
DPtab = 0.1
DPdeptLabel = 0.17
DPcogName = 0.093
# shtiker/TrackPage.py
TPstartFrame = 0.12
TPendFrame = 0.12
# shtiker/ShtikerBook.py
SBpageTab = 0.75
# shtiker/OptionsPage.py
OPoptionsTab = 0.07
OPCodesInstructionPanelTextPos = (0, -0.01)
OPCodesInstructionPanelTextWordWrap = 6
OPCodesResultPanelTextPos = (0, .35)
OPCodesResultPanelTextScale = 0.06
OPCodesResultPanelTextWordWrap = 9
OPCodesInputTextScale = 0.8
OPCodesSubmitTextScale = 0.07
OPCodesSubmitTextPos = (0, -0.02)
# shtiker/MapPage.py
MPbackToPlayground = 0.055
MPgoHome = 0.055
MPhoodLabel = 0.06
MPhoodWordwrap = 14
# shtiker/KartPage.py
KPkartTab = 0.07
KPdeleteButton = 0.06
KProtateButton = 0.035
# shtiker/GardenPage.py
GPBasketTabTextScale = 0.07
GPCollectionTabTextScale = 0.07
GPTrophyTabTextScale = 0.07
GPSpecialsTabTextScale = 0.07
# shtiker/GolfPage.py
GFPRecordsTabTextScale = 0.07
GFPRecordsTabPos = (0.92, 0, 0.1)
GFPTrophyTabTextScale = 0.07
GFPRecordsTabTextPos = (0.03, 0.0, 0.0)
GFPRTrophyTabPos = (0.92, 0, -0.3)
# toon/AvatarPanelBase.py
APBignorePanelAddIgnoreTextScale = 0.06
APBignorePanelTitlePosY = 0
# toon/ToonAvatarPanel.py
TAPfriendButton = 0.042
TAPwhisperButton = 0.042
TAPsecretsButton = 0.045
TAPgoToButton = 0.042
TAPignoreButton = 0.042
TAPpetButton = 0.26
TAPdetailButton = 0.04
TAPgroupFrameScale = 0.05
TAPgroupButtonScale = 0.055
# toon/ToonAvatarDetailPanel.py
TADPtrackLabel = 0.066
TADPcancelButton = 0.05
# toon/GroupPanel.py
GPdestFrameScale = 0.05
GPdestScrollListScale = 0.05
GPgoButtonScale = 0.06
# toon/InventoryNew.py
INtrackNameLabels = 0.05
INclickToAttack = 1.0
INpassButton = 0.05
INrunButton = 0.05
INdetailNameLabel = 1.0
# toon/NPCForceAcknowledge.py
NPCFimgLabel = 1.0
# toon/PlayerInfoPanel.py
PIPsecretsButtonScale = 0.045
PIPwisperButton = 0.06
PIPdetailButton = 0.05
# toon/ToonAvatarPanel.py
TAPsecretsButtonScale = 0.045
TAPwisperButtonScale = 0.06
# toon/ToonAvatarDetailPanel.py
TADPcancelPos = (-0.865, 0.0, -0.78)
TADtrackLabelPosZ = 0.08
# toontowngui/ToontownLoadingScreen.py
TLStip = 0.18
# toontowngui/TeaserPanel.py
TSRPdialogWordwrap = 22
TSRPtop = 0.05
TSRPpanelScale = 0.08
TSRPpanelPos = (0., -0.7)
TSRPbrowserPosZ = -0.45
TSRPbutton = 0.05
TSRPteaserBottomScale = 0.06
TSRPhaveFunText = 0.1
TSRPjoinUsText = 0.1
# toontowngui/TeaserPanel.py (OLD)
TPtop = 0.065
TPpanel = 0.055
TPbutton = 0.06
# town/TownBattleSOSPetSearchPanel.py
TBPSpanel = 0.1
# trolley/Trolley.py
TtrolleyHopOff = 0.8 | toontown/toonbase/TTLocalizerEnglishProperty.py | PBPTonscreenText = 0.2
# battle/RewardPanel.py
RPdirectFrame = (1.75, 1, 0.75)
RPtrackLabels = 0.05
RPmeritBarLabels = 0.165
# battle/RewardPanel.py
RPmeritLabelXPosition = 0.55
RPmeritBarsXPosition = 0.825
# battle/BattleBase.py
BBbattleInputTimeout = 20.0
# battle/FireCogPanel.py
FCPtextFrameScale = 0.08
# building/DistributedHQInterior.py
DHtoonName = 0.9
DHtoonNamePos = (-4, 0, 0)
DHscorePos = (-4.6, 0, 0)
DHtrophyPos = (-6.6, 0, 0.3)
# building/Elevator.py
EelevatorHopOff = 0.8
# catalog/CatalogChatItemPicker.py
CCIPmessagePickerCancel = 0.06
# catalog/CatalogItemPanel.py
CIPnameLabel = 1
CIPwordwrapOffset = 0
# catalog/CatalogScreen.py
CSgiftTogglePos = (00.855, -0.13)
CSgiftToggle = 0.08
CSbackCatalogButton = 0.065
# chat/TTChatInputSpeedChat.py
CISCspeedChat = .055
CISCtopLevelOverlap = 0.
# chat/ToontownChatManager.py
CMnormalButton = 0.06
CMscButtonPos = (-1.129, 0, 0.928)
CMscButton = 0.06
CMwhisperFrame = 0.06
CMwhisperButton = 0.05
CMunpaidChatWarningwordwrap = 18
CMunpaidChatWarning = 0.06
CMunpaidChatWarning_text_z = 0.30
CMpayButton = 0.06
CMpayButton_pos_z = -0.13
CMopenChatWarning = 0.06
CMactivateChat = 0.05
CMchatActivated = 0.06
CMNoPasswordContinue_z = -0.28
# coghq/LawbotCogHQLoader.py
LCLdgSign = 0.1 # the scale of the gate name
# coghq/SellbotCogHQLoader.py
SCLfdSign = .075
SCLdgSign = 0.1 # the scale of the gate name
# coghq/DistributedFactory.py
DFfactoryRoomTitle = 0.8
# coghq/DistributedMintElevatorExt.py
DMEEsignText = 2
# coghq/BossbotCogHQLoader.py
BCHQLmakeSign = 1.12
# coghq/DistributedGolfGreenGame.py
DGGGquitButton = 0.045
DGGGhowToButton = 0.045
DGGGscoreLabel = 0.075
# estate/houseDesign.py
HDhelpText = 0.8
HDatticButton = 1.0
HDroomButton = 1.0
HDtrashButton = 1.0
HDscrolledList = 0.1
# estate/PlantingGUI.py
GardeningInstructionScale = 0.07
# estate/FlowerPanel.py
FPBlankLabelPos = -0.35
FPBlankLabelTextScale = 0.05
# estate/FlowerPicker.py
FPFlowerValueTotal = 0.055
# estate/FlowerSellGUI.py
FSGDFTextScale = .06
FSGCancelBtnTextScale = 0.06
FSGOkBtnTextScale = 0.06
# estate/GardenTutorial.py
GardenTutorialPage2Wordwrap = 13.5
GardenTutorialPage4Wordwrap = 16.5
# fishing/BingoCardGui.py
BCGjpText = (0.04)
BCGjpTextWordwrap = 10.5
BCGnextGame = 1.71
# fishing/FishSellGUI.py
FSGokButton = 0.06
FSGcancelButton = 0.06
# fishing/FishPanel.py
FPnewEntry = 0.08
FPnewRecord = 0.08
# fishing/GenusPanel.py
GPgenus = 0.045
# friends/FriendsListPanel.py
FLPnewFriend = 0.045
FLPsecrets = 0.045
FLPsecretsPos = (0.152, 0.0, 0.14)
FLPtitleScale = 0.04
# friends/FriendInviter.py
FIstopButton = 0.05
FIdialog = 0.06
FIcancelButtonPosition = (0.20, 0.0, -0.1)
FIcancelButtonPositionX = 0.20
FIstopButtonPosition = (-0.2, 0.0, 0.05)
FIstopButtonPositionX = -0.2
FIstopTextPosition = (0.075, -0.015)
FIstopTextPositionY = -0.015
FIyesButtonPositionX = -0.15
FIdirectFrameTextWorkWrap = 14
FIdirectFrameTextPosZ = 0.2
# golf/DistributedGolfHole.py
DGHpowerReminder = 0.09
DGHaimInstructions = 0.065
DGHteeInstructions = 0.065
# golf/DistributedGolfHole.py
DGHAimInstructScale = 0.1
DGHTeeInstructScale = 0.1
# golf/GolfScoreBoard.py
GSBExitCourseBTextPose = (0.15, -.01)
GSBtitleLabelScale = 0.07
# golf/GolfScoreBoard.py
GSBexitCourseBPos = (0.19, -.01)
GSBtitleLabel = 0.06
# hood/EstateHood.py
EHpopupInfo = .08
# hood/Hood.py
HDenterTitleTextScale = 0.16
# login/AvatarChoice.py
ACplayThisToon = 0.12
ACmakeAToon = 0.12
ACsubscribersOnly = 0.115
ACdeleteWithPassword = <PASSWORD>
ACstatusText = 1.0
# login/AvatarChooser.py
ACtitle = 0.15
ACquitButton = 0.1
AClogoutButton = 0.1
ACquitButton_pos = -0.035
# minigame/MinigameAvatarScorePanel.py
MASPscoreText = 0.1
MASPnameText = 0.055
# minigame/MinigameRulesPanel.py
MRPplayButton = 0.055
MRPinstructionsText = 0.07
# minigame/MinigamePowerMeter.py
MPMpowerText = 0.07
MPMtooSlow = 0.07
MPMtooFast = 0.07
MPMgaugeA = .35
MPMgaugeTargetTop = .35
MPMgaugeTargetBot = .35
# minigame/Purchase.py
PstatusLabel = 0.08
# minigame/PurchaseBase.py
PBstatusLabel = 0.08
# makeatoon/NameShop.py
NSmaxNameWidth = 8.0
NSdirectScrolleList = 0.1
NSmakeLabel = 0.1
NSmakeCheckBox = 0.8
NSnameEntry = 0.08
NStypeANameButton = 0.06
NStypeANameButton_pos = -0.02
NSnameResult = 0.065
NStypeName = 0.1
NSnewName = 0.08
NScolorPrecede = True
# makeatoon/MakeAToon.py
MATenterGenderShop = 0.18
MATenterBodyShop = 0.18
MATenterColorShop = 0.18
MATenterClothesShop = 0.16
MATenterNameShop = 0.15
MATclothesGUIshirt_scale = 0.06
MATclothesGUIshirt_posL = 0.010
MATclothesGUIshirt_posR = -0.014
MATnextButtonScale = 0.08
# makeatoon\ShuffleButton.py
SBshuffleBtn = 0.08
# minigame/DistributedPairingGame.py
DPGPointsFrameTextScale = 0.7
DPGFlipsFrameTextScale = 0.7
# minigame/DistributedTravelGame.py
DTGVoteBtnTextScale = 0.07
DTGUseLabelTextScale = 0.1
DTGVotesPeriodLabelTextScale = 0.1
DTGVotesToGoLabelTextScale = 0.1
DTGUpLabelTextScale = 0.125
DTGDownLabelTextScale = 0.125
DTGRemainingVotesFrameTextScale = 0.7
# minigame/MinigameRulesPanel.py
MRPGameTitleTextScale = 0.11
MRPGameTitleTextPos = (-0.046, 0.2, 0.092)
MRPInstructionsTextWordwrap = 26.5
MRPInstructionsTextPos = (-0.115, 0.05, 0)
# Stuff for trolley metagame
TravelGameBonusBeansSize = 0.65
# parties/InviteVisual.py
IVwhenTextLabel = 0.06
IVactivityTextLabel = 0.06
# parties/PartyPlanner.py
PPDescriptionScale = 0.06
PPelementTitleLabelScale = 0.07
PPelementBuyButtonTextScale = 0.055
PPtitleScale = 0.1
PPpbulicDescriptionLabel = 0.065
PPprivateDescriptionLabel = 0.065
PPpublicButton = 0.05
PPprivateButton = 0.05
PPcostLabel = 0.065
PPpartyGroundsLabel = 1.0
PPinstructionLabel = 0.07
PPelementPrice = 0.065
# parties/DistributedParty.py
DPpartyCountdownClockTextScale = 1.1
DPpartyCountdownClockMinutesScale = 1.1
DPpartyCountdownClockColonScale = 1.1
DPpartyCountdownClockSecondScale = 1.1
DPpartyCountdownClockMinutesPosY = 0.0
DPpartyCountdownClockColonPosY = 0.0
DPpartyCountdownClockSecondPosY = 0.0
# parties/PublicPartyGui.py
PPGpartyStartButton = 0.065
PPGinstructionsLabel = 0.065
PPGcreatePartyListAndLabel = 0.06
# parties/JukeboxGui.py
JGcurrentlyPlayingLabel = 0.07
JGsongNameLabel = 0.13
JGaddSongButton = 0.1
JGnumItemsVisible = 9
JGlistItem = 1.0
# pets/PetAvatarPanel.py & town/TownBattleSOSPetInfoPanel.py
PAPfeed = 0.5
PAPcall = 0.5
PAPowner = 0.35
PAPscratch = 0.5
PAPstateLabel = 0.4
PAPstateLabelPos = (0.7, 0, 3.5)
PAPstateLabelwordwrap = 7.5
# pets/PetDetailPanel.py
PDPtrickText = 0.17
PDPlaff = 0.17
PDPlaffPos = (0.0, -0.05)
# pets/PetshopGUI.py
PGUItextScale = 1
PGUIchooserTitle = .10
PGUIwordwrap = 14
PGUIdescLabel = 0.9
PGUIreturnConfirm = .07
PGUIpetsopAdopt = 0.6
PGUIadoptSubmit = 0.8
PGUIpetsopAdoptPos = (-0.21, 1.05)
PGUIpetshopCancelPos = (-3.3, 2.95)
PGUIcharLength = 1 # 1 for one byte code 3 for two byte code
# pets/PetTutorial.py
PTtitle = 0.13
PTpage1Pos = (0.15, 0.13)
PTpage2Pos = (-0.27, 0.16)
PTpage3Pos = (0.15, 0.13)
# quest/QuestPoster.py
QPauxText = 0.04
QPtextScale = 0.045
QPtextWordwrap = 15.6
QPinfoZ = -0.0625
# racing/DistributedLeaderBoard.py
DLBtitleRowScale = .4
# racing/DistributedRace.py
DRenterWaiting = .2
DRrollScale = .5
# raceing/DistributedRacePad.py
DRPnodeScale = 0.65
# racing/KartShopGui.py
KSGtextSizeBig = 0.088
KSGtextSizeSmall = 0.055
KSGaccDescriptionWordwrap = 11
# racing/RaceEndPanels.py
REPraceEnd = 0.08
REPraceExit = 0.04
REPticket_text_x = -0.6
# racing/RaceGUI.py
RGphotoFinish = 0.25
RGplaceLabelNumPos = (-1.2, 0, -0.97)
RGplaceLabelStrPos = (-1.05, 0.0, -0.8)
# racing/DistributedRaceAI.py
DRAwaitingForJoin = 60
# safezone/DistributedFishingSpot.py
DFSfailureDialog = 0.06
# safezone/Playground.py
PimgLabel = 1.0
# safezone/GZSafeZoneLoader.py
GSZLbossbotSignScale = 1.5
# shtiker/EventsPage.py
EPtitleLabel = 0.12
EPhostTab = 0.07
EPinvitedTab = 0.07
EPcalendarTab = 0.07
EPnewsTab = 0.07
EPhostingCancelButton = 0.04
EPhostingDateLabel = 0.05
EPpartyGoButton = 0.045
EPpublicPrivateLabel = 0.05
EPpublicButton = 0.5
EPprivateButton = 0.5
EPinvitePartyGoButton = 0.045
EPdecorationItemLabel = 0.055
EPactivityItemLabel = 0.055
EPcreateListAndLabel = 0.055
# shtiker/FishPage.py
FPtankTab = 0.07
FPcollectionTab = 0.07
FPtrophyTab = 0.07
# shtiker/DisplaySettingsDialog.py
DSDintroText = 0.06
DSDintroTextwordwrap = 25
DSDwindowedButtonPos = (0.0961, 0, -0.221)
DSDfullscreenButtonPos = (0.097, 0, -0.311)
DSDembeddedButtonPos = (0.097, 0, -0.411)
DSDcancel = 0.06
DSDcancelButtonPositionX = 0
# shtiker/DisguisePage.py
DPtab = 0.1
DPdeptLabel = 0.17
DPcogName = 0.093
# shtiker/TrackPage.py
TPstartFrame = 0.12
TPendFrame = 0.12
# shtiker/ShtikerBook.py
SBpageTab = 0.75
# shtiker/OptionsPage.py
OPoptionsTab = 0.07
OPCodesInstructionPanelTextPos = (0, -0.01)
OPCodesInstructionPanelTextWordWrap = 6
OPCodesResultPanelTextPos = (0, .35)
OPCodesResultPanelTextScale = 0.06
OPCodesResultPanelTextWordWrap = 9
OPCodesInputTextScale = 0.8
OPCodesSubmitTextScale = 0.07
OPCodesSubmitTextPos = (0, -0.02)
# shtiker/MapPage.py
MPbackToPlayground = 0.055
MPgoHome = 0.055
MPhoodLabel = 0.06
MPhoodWordwrap = 14
# shtiker/KartPage.py
KPkartTab = 0.07
KPdeleteButton = 0.06
KProtateButton = 0.035
# shtiker/GardenPage.py
GPBasketTabTextScale = 0.07
GPCollectionTabTextScale = 0.07
GPTrophyTabTextScale = 0.07
GPSpecialsTabTextScale = 0.07
# shtiker/GolfPage.py
GFPRecordsTabTextScale = 0.07
GFPRecordsTabPos = (0.92, 0, 0.1)
GFPTrophyTabTextScale = 0.07
GFPRecordsTabTextPos = (0.03, 0.0, 0.0)
GFPRTrophyTabPos = (0.92, 0, -0.3)
# toon/AvatarPanelBase.py
APBignorePanelAddIgnoreTextScale = 0.06
APBignorePanelTitlePosY = 0
# toon/ToonAvatarPanel.py
TAPfriendButton = 0.042
TAPwhisperButton = 0.042
TAPsecretsButton = 0.045
TAPgoToButton = 0.042
TAPignoreButton = 0.042
TAPpetButton = 0.26
TAPdetailButton = 0.04
TAPgroupFrameScale = 0.05
TAPgroupButtonScale = 0.055
# toon/ToonAvatarDetailPanel.py
TADPtrackLabel = 0.066
TADPcancelButton = 0.05
# toon/GroupPanel.py
GPdestFrameScale = 0.05
GPdestScrollListScale = 0.05
GPgoButtonScale = 0.06
# toon/InventoryNew.py
INtrackNameLabels = 0.05
INclickToAttack = 1.0
INpassButton = 0.05
INrunButton = 0.05
INdetailNameLabel = 1.0
# toon/NPCForceAcknowledge.py
NPCFimgLabel = 1.0
# toon/PlayerInfoPanel.py
PIPsecretsButtonScale = 0.045
PIPwisperButton = 0.06
PIPdetailButton = 0.05
# toon/ToonAvatarPanel.py
TAPsecretsButtonScale = 0.045
TAPwisperButtonScale = 0.06
# toon/ToonAvatarDetailPanel.py
TADPcancelPos = (-0.865, 0.0, -0.78)
TADtrackLabelPosZ = 0.08
# toontowngui/ToontownLoadingScreen.py
TLStip = 0.18
# toontowngui/TeaserPanel.py
TSRPdialogWordwrap = 22
TSRPtop = 0.05
TSRPpanelScale = 0.08
TSRPpanelPos = (0., -0.7)
TSRPbrowserPosZ = -0.45
TSRPbutton = 0.05
TSRPteaserBottomScale = 0.06
TSRPhaveFunText = 0.1
TSRPjoinUsText = 0.1
# toontowngui/TeaserPanel.py (OLD)
TPtop = 0.065
TPpanel = 0.055
TPbutton = 0.06
# town/TownBattleSOSPetSearchPanel.py
TBPSpanel = 0.1
# trolley/Trolley.py
TtrolleyHopOff = 0.8 | 0.158044 | 0.164987 |
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import DataPlatForm.service_pb2 as service__pb2
class DACStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AddUser = channel.unary_unary(
'/jgproto.DAC/AddUser',
request_serializer=service__pb2.UserRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.ModifyUser = channel.unary_unary(
'/jgproto.DAC/ModifyUser',
request_serializer=service__pb2.UserRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.DeleteUser = channel.unary_unary(
'/jgproto.DAC/DeleteUser',
request_serializer=service__pb2.CommonStringRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.ListUser = channel.unary_unary(
'/jgproto.DAC/ListUser',
request_serializer=service__pb2.LoginInfo.SerializeToString,
response_deserializer=service__pb2.UserInfos.FromString,
)
self.CreateAlphaFactor = channel.unary_unary(
'/jgproto.DAC/CreateAlphaFactor',
request_serializer=service__pb2.FactorRequest.SerializeToString,
response_deserializer=service__pb2.FcAclResult.FromString,
)
self.ModifyAlphaFactor = channel.unary_unary(
'/jgproto.DAC/ModifyAlphaFactor',
request_serializer=service__pb2.FactorRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.DeleteAlphaFactor = channel.unary_unary(
'/jgproto.DAC/DeleteAlphaFactor',
request_serializer=service__pb2.FactorRequest.SerializeToString,
response_deserializer=service__pb2.FcAclResult.FromString,
)
self.ListAlphaFactor = channel.unary_unary(
'/jgproto.DAC/ListAlphaFactor',
request_serializer=service__pb2.LoginInfo.SerializeToString,
response_deserializer=service__pb2.FactorList.FromString,
)
self.GetAlphaFactor = channel.unary_unary(
'/jgproto.DAC/GetAlphaFactor',
request_serializer=service__pb2.FactorRequest.SerializeToString,
response_deserializer=service__pb2.FactorResult.FromString,
)
self.CreateFcGroup = channel.unary_unary(
'/jgproto.DAC/CreateFcGroup',
request_serializer=service__pb2.GroupRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.ModifyFcGroup = channel.unary_unary(
'/jgproto.DAC/ModifyFcGroup',
request_serializer=service__pb2.GroupRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.ModifyFcGroupContent = channel.unary_unary(
'/jgproto.DAC/ModifyFcGroupContent',
request_serializer=service__pb2.GroupFcsRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.DeleteFcGroup = channel.unary_unary(
'/jgproto.DAC/DeleteFcGroup',
request_serializer=service__pb2.CommonStringRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.ListFcGroup = channel.unary_unary(
'/jgproto.DAC/ListFcGroup',
request_serializer=service__pb2.LoginInfo.SerializeToString,
response_deserializer=service__pb2.FcGroups.FromString,
)
self.CheckFcAcl = channel.unary_unary(
'/jgproto.DAC/CheckFcAcl',
request_serializer=service__pb2.FcAclRequest.SerializeToString,
response_deserializer=service__pb2.FcAclResult.FromString,
)
self.CreateNewData = channel.unary_unary(
'/jgproto.DAC/CreateNewData',
request_serializer=service__pb2.DataRequest.SerializeToString,
response_deserializer=service__pb2.AclResult.FromString,
)
self.DeleteData = channel.unary_unary(
'/jgproto.DAC/DeleteData',
request_serializer=service__pb2.DataRequest.SerializeToString,
response_deserializer=service__pb2.AclResult.FromString,
)
self.CheckAcl = channel.unary_unary(
'/jgproto.DAC/CheckAcl',
request_serializer=service__pb2.AclRequest.SerializeToString,
response_deserializer=service__pb2.AclResult.FromString,
)
class DACServicer(object):
"""Missing associated documentation comment in .proto file."""
def AddUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateAlphaFactor(self, request, context):
"""创建高级因子
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyAlphaFactor(self, request, context):
"""改level、改信息等
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteAlphaFactor(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListAlphaFactor(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAlphaFactor(self, request, context):
"""获取高级因子信息
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateFcGroup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyFcGroup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyFcGroupContent(self, request, context):
"""增删改
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteFcGroup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListFcGroup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CheckFcAcl(self, request, context):
"""rpc SetFcAcl (FcAclRequest) returns (CommonResult); // 授权/回收 组、单个因子
检查是否具有操作因子的权限
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateNewData(self, request, context):
"""创建新的其他数据类型
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteData(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CheckAcl(self, request, context):
"""检查是否具有操作数据的权限
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DACServicer_to_server(servicer, server):
rpc_method_handlers = {
'AddUser': grpc.unary_unary_rpc_method_handler(
servicer.AddUser,
request_deserializer=service__pb2.UserRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'ModifyUser': grpc.unary_unary_rpc_method_handler(
servicer.ModifyUser,
request_deserializer=service__pb2.UserRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'DeleteUser': grpc.unary_unary_rpc_method_handler(
servicer.DeleteUser,
request_deserializer=service__pb2.CommonStringRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'ListUser': grpc.unary_unary_rpc_method_handler(
servicer.ListUser,
request_deserializer=service__pb2.LoginInfo.FromString,
response_serializer=service__pb2.UserInfos.SerializeToString,
),
'CreateAlphaFactor': grpc.unary_unary_rpc_method_handler(
servicer.CreateAlphaFactor,
request_deserializer=service__pb2.FactorRequest.FromString,
response_serializer=service__pb2.FcAclResult.SerializeToString,
),
'ModifyAlphaFactor': grpc.unary_unary_rpc_method_handler(
servicer.ModifyAlphaFactor,
request_deserializer=service__pb2.FactorRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'DeleteAlphaFactor': grpc.unary_unary_rpc_method_handler(
servicer.DeleteAlphaFactor,
request_deserializer=service__pb2.FactorRequest.FromString,
response_serializer=service__pb2.FcAclResult.SerializeToString,
),
'ListAlphaFactor': grpc.unary_unary_rpc_method_handler(
servicer.ListAlphaFactor,
request_deserializer=service__pb2.LoginInfo.FromString,
response_serializer=service__pb2.FactorList.SerializeToString,
),
'GetAlphaFactor': grpc.unary_unary_rpc_method_handler(
servicer.GetAlphaFactor,
request_deserializer=service__pb2.FactorRequest.FromString,
response_serializer=service__pb2.FactorResult.SerializeToString,
),
'CreateFcGroup': grpc.unary_unary_rpc_method_handler(
servicer.CreateFcGroup,
request_deserializer=service__pb2.GroupRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'ModifyFcGroup': grpc.unary_unary_rpc_method_handler(
servicer.ModifyFcGroup,
request_deserializer=service__pb2.GroupRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'ModifyFcGroupContent': grpc.unary_unary_rpc_method_handler(
servicer.ModifyFcGroupContent,
request_deserializer=service__pb2.GroupFcsRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'DeleteFcGroup': grpc.unary_unary_rpc_method_handler(
servicer.DeleteFcGroup,
request_deserializer=service__pb2.CommonStringRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'ListFcGroup': grpc.unary_unary_rpc_method_handler(
servicer.ListFcGroup,
request_deserializer=service__pb2.LoginInfo.FromString,
response_serializer=service__pb2.FcGroups.SerializeToString,
),
'CheckFcAcl': grpc.unary_unary_rpc_method_handler(
servicer.CheckFcAcl,
request_deserializer=service__pb2.FcAclRequest.FromString,
response_serializer=service__pb2.FcAclResult.SerializeToString,
),
'CreateNewData': grpc.unary_unary_rpc_method_handler(
servicer.CreateNewData,
request_deserializer=service__pb2.DataRequest.FromString,
response_serializer=service__pb2.AclResult.SerializeToString,
),
'DeleteData': grpc.unary_unary_rpc_method_handler(
servicer.DeleteData,
request_deserializer=service__pb2.DataRequest.FromString,
response_serializer=service__pb2.AclResult.SerializeToString,
),
'CheckAcl': grpc.unary_unary_rpc_method_handler(
servicer.CheckAcl,
request_deserializer=service__pb2.AclRequest.FromString,
response_serializer=service__pb2.AclResult.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'jgproto.DAC', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class DAC(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def AddUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/AddUser',
service__pb2.UserRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModifyUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ModifyUser',
service__pb2.UserRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/DeleteUser',
service__pb2.CommonStringRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ListUser',
service__pb2.LoginInfo.SerializeToString,
service__pb2.UserInfos.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateAlphaFactor(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/CreateAlphaFactor',
service__pb2.FactorRequest.SerializeToString,
service__pb2.FcAclResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModifyAlphaFactor(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ModifyAlphaFactor',
service__pb2.FactorRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteAlphaFactor(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/DeleteAlphaFactor',
service__pb2.FactorRequest.SerializeToString,
service__pb2.FcAclResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListAlphaFactor(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ListAlphaFactor',
service__pb2.LoginInfo.SerializeToString,
service__pb2.FactorList.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetAlphaFactor(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/GetAlphaFactor',
service__pb2.FactorRequest.SerializeToString,
service__pb2.FactorResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateFcGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/CreateFcGroup',
service__pb2.GroupRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModifyFcGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ModifyFcGroup',
service__pb2.GroupRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModifyFcGroupContent(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ModifyFcGroupContent',
service__pb2.GroupFcsRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteFcGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/DeleteFcGroup',
service__pb2.CommonStringRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListFcGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ListFcGroup',
service__pb2.LoginInfo.SerializeToString,
service__pb2.FcGroups.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CheckFcAcl(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/CheckFcAcl',
service__pb2.FcAclRequest.SerializeToString,
service__pb2.FcAclResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateNewData(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/CreateNewData',
service__pb2.DataRequest.SerializeToString,
service__pb2.AclResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteData(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/DeleteData',
service__pb2.DataRequest.SerializeToString,
service__pb2.AclResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CheckAcl(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/CheckAcl',
service__pb2.AclRequest.SerializeToString,
service__pb2.AclResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | src/DataPlatForm/service_pb2_grpc.py | """Client and server classes corresponding to protobuf-defined services."""
import grpc
import DataPlatForm.service_pb2 as service__pb2
class DACStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AddUser = channel.unary_unary(
'/jgproto.DAC/AddUser',
request_serializer=service__pb2.UserRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.ModifyUser = channel.unary_unary(
'/jgproto.DAC/ModifyUser',
request_serializer=service__pb2.UserRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.DeleteUser = channel.unary_unary(
'/jgproto.DAC/DeleteUser',
request_serializer=service__pb2.CommonStringRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.ListUser = channel.unary_unary(
'/jgproto.DAC/ListUser',
request_serializer=service__pb2.LoginInfo.SerializeToString,
response_deserializer=service__pb2.UserInfos.FromString,
)
self.CreateAlphaFactor = channel.unary_unary(
'/jgproto.DAC/CreateAlphaFactor',
request_serializer=service__pb2.FactorRequest.SerializeToString,
response_deserializer=service__pb2.FcAclResult.FromString,
)
self.ModifyAlphaFactor = channel.unary_unary(
'/jgproto.DAC/ModifyAlphaFactor',
request_serializer=service__pb2.FactorRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.DeleteAlphaFactor = channel.unary_unary(
'/jgproto.DAC/DeleteAlphaFactor',
request_serializer=service__pb2.FactorRequest.SerializeToString,
response_deserializer=service__pb2.FcAclResult.FromString,
)
self.ListAlphaFactor = channel.unary_unary(
'/jgproto.DAC/ListAlphaFactor',
request_serializer=service__pb2.LoginInfo.SerializeToString,
response_deserializer=service__pb2.FactorList.FromString,
)
self.GetAlphaFactor = channel.unary_unary(
'/jgproto.DAC/GetAlphaFactor',
request_serializer=service__pb2.FactorRequest.SerializeToString,
response_deserializer=service__pb2.FactorResult.FromString,
)
self.CreateFcGroup = channel.unary_unary(
'/jgproto.DAC/CreateFcGroup',
request_serializer=service__pb2.GroupRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.ModifyFcGroup = channel.unary_unary(
'/jgproto.DAC/ModifyFcGroup',
request_serializer=service__pb2.GroupRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.ModifyFcGroupContent = channel.unary_unary(
'/jgproto.DAC/ModifyFcGroupContent',
request_serializer=service__pb2.GroupFcsRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.DeleteFcGroup = channel.unary_unary(
'/jgproto.DAC/DeleteFcGroup',
request_serializer=service__pb2.CommonStringRequest.SerializeToString,
response_deserializer=service__pb2.CommonResult.FromString,
)
self.ListFcGroup = channel.unary_unary(
'/jgproto.DAC/ListFcGroup',
request_serializer=service__pb2.LoginInfo.SerializeToString,
response_deserializer=service__pb2.FcGroups.FromString,
)
self.CheckFcAcl = channel.unary_unary(
'/jgproto.DAC/CheckFcAcl',
request_serializer=service__pb2.FcAclRequest.SerializeToString,
response_deserializer=service__pb2.FcAclResult.FromString,
)
self.CreateNewData = channel.unary_unary(
'/jgproto.DAC/CreateNewData',
request_serializer=service__pb2.DataRequest.SerializeToString,
response_deserializer=service__pb2.AclResult.FromString,
)
self.DeleteData = channel.unary_unary(
'/jgproto.DAC/DeleteData',
request_serializer=service__pb2.DataRequest.SerializeToString,
response_deserializer=service__pb2.AclResult.FromString,
)
self.CheckAcl = channel.unary_unary(
'/jgproto.DAC/CheckAcl',
request_serializer=service__pb2.AclRequest.SerializeToString,
response_deserializer=service__pb2.AclResult.FromString,
)
class DACServicer(object):
"""Missing associated documentation comment in .proto file."""
def AddUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateAlphaFactor(self, request, context):
"""创建高级因子
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyAlphaFactor(self, request, context):
"""改level、改信息等
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteAlphaFactor(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListAlphaFactor(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAlphaFactor(self, request, context):
"""获取高级因子信息
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateFcGroup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyFcGroup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyFcGroupContent(self, request, context):
"""增删改
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteFcGroup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListFcGroup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CheckFcAcl(self, request, context):
"""rpc SetFcAcl (FcAclRequest) returns (CommonResult); // 授权/回收 组、单个因子
检查是否具有操作因子的权限
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateNewData(self, request, context):
"""创建新的其他数据类型
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteData(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CheckAcl(self, request, context):
"""检查是否具有操作数据的权限
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DACServicer_to_server(servicer, server):
rpc_method_handlers = {
'AddUser': grpc.unary_unary_rpc_method_handler(
servicer.AddUser,
request_deserializer=service__pb2.UserRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'ModifyUser': grpc.unary_unary_rpc_method_handler(
servicer.ModifyUser,
request_deserializer=service__pb2.UserRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'DeleteUser': grpc.unary_unary_rpc_method_handler(
servicer.DeleteUser,
request_deserializer=service__pb2.CommonStringRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'ListUser': grpc.unary_unary_rpc_method_handler(
servicer.ListUser,
request_deserializer=service__pb2.LoginInfo.FromString,
response_serializer=service__pb2.UserInfos.SerializeToString,
),
'CreateAlphaFactor': grpc.unary_unary_rpc_method_handler(
servicer.CreateAlphaFactor,
request_deserializer=service__pb2.FactorRequest.FromString,
response_serializer=service__pb2.FcAclResult.SerializeToString,
),
'ModifyAlphaFactor': grpc.unary_unary_rpc_method_handler(
servicer.ModifyAlphaFactor,
request_deserializer=service__pb2.FactorRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'DeleteAlphaFactor': grpc.unary_unary_rpc_method_handler(
servicer.DeleteAlphaFactor,
request_deserializer=service__pb2.FactorRequest.FromString,
response_serializer=service__pb2.FcAclResult.SerializeToString,
),
'ListAlphaFactor': grpc.unary_unary_rpc_method_handler(
servicer.ListAlphaFactor,
request_deserializer=service__pb2.LoginInfo.FromString,
response_serializer=service__pb2.FactorList.SerializeToString,
),
'GetAlphaFactor': grpc.unary_unary_rpc_method_handler(
servicer.GetAlphaFactor,
request_deserializer=service__pb2.FactorRequest.FromString,
response_serializer=service__pb2.FactorResult.SerializeToString,
),
'CreateFcGroup': grpc.unary_unary_rpc_method_handler(
servicer.CreateFcGroup,
request_deserializer=service__pb2.GroupRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'ModifyFcGroup': grpc.unary_unary_rpc_method_handler(
servicer.ModifyFcGroup,
request_deserializer=service__pb2.GroupRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'ModifyFcGroupContent': grpc.unary_unary_rpc_method_handler(
servicer.ModifyFcGroupContent,
request_deserializer=service__pb2.GroupFcsRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'DeleteFcGroup': grpc.unary_unary_rpc_method_handler(
servicer.DeleteFcGroup,
request_deserializer=service__pb2.CommonStringRequest.FromString,
response_serializer=service__pb2.CommonResult.SerializeToString,
),
'ListFcGroup': grpc.unary_unary_rpc_method_handler(
servicer.ListFcGroup,
request_deserializer=service__pb2.LoginInfo.FromString,
response_serializer=service__pb2.FcGroups.SerializeToString,
),
'CheckFcAcl': grpc.unary_unary_rpc_method_handler(
servicer.CheckFcAcl,
request_deserializer=service__pb2.FcAclRequest.FromString,
response_serializer=service__pb2.FcAclResult.SerializeToString,
),
'CreateNewData': grpc.unary_unary_rpc_method_handler(
servicer.CreateNewData,
request_deserializer=service__pb2.DataRequest.FromString,
response_serializer=service__pb2.AclResult.SerializeToString,
),
'DeleteData': grpc.unary_unary_rpc_method_handler(
servicer.DeleteData,
request_deserializer=service__pb2.DataRequest.FromString,
response_serializer=service__pb2.AclResult.SerializeToString,
),
'CheckAcl': grpc.unary_unary_rpc_method_handler(
servicer.CheckAcl,
request_deserializer=service__pb2.AclRequest.FromString,
response_serializer=service__pb2.AclResult.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'jgproto.DAC', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class DAC(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def AddUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/AddUser',
service__pb2.UserRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModifyUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ModifyUser',
service__pb2.UserRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/DeleteUser',
service__pb2.CommonStringRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ListUser',
service__pb2.LoginInfo.SerializeToString,
service__pb2.UserInfos.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateAlphaFactor(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/CreateAlphaFactor',
service__pb2.FactorRequest.SerializeToString,
service__pb2.FcAclResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModifyAlphaFactor(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ModifyAlphaFactor',
service__pb2.FactorRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteAlphaFactor(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/DeleteAlphaFactor',
service__pb2.FactorRequest.SerializeToString,
service__pb2.FcAclResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListAlphaFactor(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ListAlphaFactor',
service__pb2.LoginInfo.SerializeToString,
service__pb2.FactorList.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetAlphaFactor(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/GetAlphaFactor',
service__pb2.FactorRequest.SerializeToString,
service__pb2.FactorResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateFcGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/CreateFcGroup',
service__pb2.GroupRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModifyFcGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ModifyFcGroup',
service__pb2.GroupRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModifyFcGroupContent(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ModifyFcGroupContent',
service__pb2.GroupFcsRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteFcGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/DeleteFcGroup',
service__pb2.CommonStringRequest.SerializeToString,
service__pb2.CommonResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListFcGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/ListFcGroup',
service__pb2.LoginInfo.SerializeToString,
service__pb2.FcGroups.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CheckFcAcl(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/CheckFcAcl',
service__pb2.FcAclRequest.SerializeToString,
service__pb2.FcAclResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateNewData(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/CreateNewData',
service__pb2.DataRequest.SerializeToString,
service__pb2.AclResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteData(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/DeleteData',
service__pb2.DataRequest.SerializeToString,
service__pb2.AclResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CheckAcl(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/jgproto.DAC/CheckAcl',
service__pb2.AclRequest.SerializeToString,
service__pb2.AclResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | 0.761627 | 0.099164 |
from bigo import questions as big_o
from binsearch import questions as bsearch
from binsearchtree import questions as bst
from graph import questions as graphs
from hashing import questions as hashing
from insertion import insertion_sort as isort
from quicksort import questions as qsort
from stringsearch import questions as strsearch
from random import randint
BIGO_QUESTIONS = {1: big_o.equality,
2: big_o.worse,
3: big_o.bigo,
4: big_o.order
}
GRAPH_QUESTIONS = {1: graphs.trace_breadth,
2: graphs.trace_depth,
3: graphs.trace_best,
4: graphs.trace_mst,
5: graphs.trace_tsp}
HASH_QUESTIONS = {1: hashing.produced,
2: hashing.result,
3: hashing.search,
4: hashing.deletion}
ISORT_QUESTIONS = {1: isort.recognition,
2: isort.traces,
3: isort.iteration_recognition,
# 4: isort.explain_stability
}
QSORT_QUESTIONS = {1: qsort.trace,
2: qsort.spot_wrong
}
STR_QUESTIONS = {1: strsearch.bf_comps,
2: strsearch.create_table,
3: strsearch.create_string,
4: strsearch.apply_kmp}
BINSEARCH_QUESTIONS = {1: bsearch.trace,
2: bsearch.recursive_calls}
BST_QUESTIONS = {1: bst.bst,
2: bst.insert,
3: bst.search}
ALL_QUESTIONS = {1: BIGO_QUESTIONS,
2: ISORT_QUESTIONS,
3: QSORT_QUESTIONS,
4: BINSEARCH_QUESTIONS,
5: BST_QUESTIONS,
6: HASH_QUESTIONS,
7: STR_QUESTIONS,
8: GRAPH_QUESTIONS
}
def qsort_trace_only():
while True:
QSORT_QUESTIONS.get(1)()
def random_questions():
topic = ALL_QUESTIONS.get(randint(1, len(ALL_QUESTIONS)))
quest = topic.get(randint(1, len(topic)))
quest()
del topic, quest # GC topic, quest to prevent case of being stuck
if __name__ == '__main__':
# User determines whether random questions or not
user_inp = ''
while (user_inp != 'random') and (user_inp != 'order') and (user_inp != 'orderx'):
print(f"\n\nPlease enter either of the following strings below: ")
print(f" 'random' - Questions will be chosen at random")
print(f" 'order' - Questions will be called in order learned")
print(f" 'orderx' - Each question will be called x times. Questions are in order learned.")
user_inp = str(input("\nEnter choice: "))
# Picks random question from all hashes storing question functions
if (user_inp == 'random'):
while True:
random_questions()
# Goes through each question once
elif (user_inp == 'order'):
for topic in ALL_QUESTIONS.values():
for question in topic.values():
question()
elif (user_inp == 'orderx'):
try:
x = int(input("Input integer: "))
except ValueError:
print("Invalid input. X set as 1.")
x = 1
for topic in ALL_QUESTIONS.values():
for call in range(x):
for question in topic.values():
question() | main.py | from bigo import questions as big_o
from binsearch import questions as bsearch
from binsearchtree import questions as bst
from graph import questions as graphs
from hashing import questions as hashing
from insertion import insertion_sort as isort
from quicksort import questions as qsort
from stringsearch import questions as strsearch
from random import randint
BIGO_QUESTIONS = {1: big_o.equality,
2: big_o.worse,
3: big_o.bigo,
4: big_o.order
}
GRAPH_QUESTIONS = {1: graphs.trace_breadth,
2: graphs.trace_depth,
3: graphs.trace_best,
4: graphs.trace_mst,
5: graphs.trace_tsp}
HASH_QUESTIONS = {1: hashing.produced,
2: hashing.result,
3: hashing.search,
4: hashing.deletion}
ISORT_QUESTIONS = {1: isort.recognition,
2: isort.traces,
3: isort.iteration_recognition,
# 4: isort.explain_stability
}
QSORT_QUESTIONS = {1: qsort.trace,
2: qsort.spot_wrong
}
STR_QUESTIONS = {1: strsearch.bf_comps,
2: strsearch.create_table,
3: strsearch.create_string,
4: strsearch.apply_kmp}
BINSEARCH_QUESTIONS = {1: bsearch.trace,
2: bsearch.recursive_calls}
BST_QUESTIONS = {1: bst.bst,
2: bst.insert,
3: bst.search}
ALL_QUESTIONS = {1: BIGO_QUESTIONS,
2: ISORT_QUESTIONS,
3: QSORT_QUESTIONS,
4: BINSEARCH_QUESTIONS,
5: BST_QUESTIONS,
6: HASH_QUESTIONS,
7: STR_QUESTIONS,
8: GRAPH_QUESTIONS
}
def qsort_trace_only():
while True:
QSORT_QUESTIONS.get(1)()
def random_questions():
topic = ALL_QUESTIONS.get(randint(1, len(ALL_QUESTIONS)))
quest = topic.get(randint(1, len(topic)))
quest()
del topic, quest # GC topic, quest to prevent case of being stuck
if __name__ == '__main__':
# User determines whether random questions or not
user_inp = ''
while (user_inp != 'random') and (user_inp != 'order') and (user_inp != 'orderx'):
print(f"\n\nPlease enter either of the following strings below: ")
print(f" 'random' - Questions will be chosen at random")
print(f" 'order' - Questions will be called in order learned")
print(f" 'orderx' - Each question will be called x times. Questions are in order learned.")
user_inp = str(input("\nEnter choice: "))
# Picks random question from all hashes storing question functions
if (user_inp == 'random'):
while True:
random_questions()
# Goes through each question once
elif (user_inp == 'order'):
for topic in ALL_QUESTIONS.values():
for question in topic.values():
question()
elif (user_inp == 'orderx'):
try:
x = int(input("Input integer: "))
except ValueError:
print("Invalid input. X set as 1.")
x = 1
for topic in ALL_QUESTIONS.values():
for call in range(x):
for question in topic.values():
question() | 0.273769 | 0.225257 |
from unittest import TestCase
import numpy as np
from scipy.sparse import csr_matrix
from amfe.solver.integrator import *
from numpy.testing import assert_allclose
import matplotlib.pyplot as plt
from amfe.linalg.linearsolvers import ScipySparseLinearSolver
def M(q, dq, t):
return 2 * csr_matrix(np.eye(4))
def K(q, dq, t):
return csr_matrix(np.array([[2, -0.3, 0, 0], [-0.3, 4, -0.3, 0], [0, -0.3, 4, -0.3], [0, 0, -0.3, 2]])) + np.diag(q)
def D(q, dq, t):
return csr_matrix(np.array([[0.2, 0.1, 0, 0], [0.1, 0.2, 0.1, 0], [0, 0.1, 0.2, 0.1], [0, 0, 0.1, 0.2]]))
def f_int(q, dq, t):
return K(q, dq, t) @ q + D(q, dq, t) @ dq
def f_ext(q, dq, t):
f = 2 * t
return np.array([0, 0, 0, f])
class GeneralizedAlphaTest(TestCase):
def setUp(self):
self.integrator = GeneralizedAlpha(M, f_int, f_ext, K, D)
self.integrator.dt = 0.1
def tearDown(self):
pass
def test_set_integration_parameters(self):
alpha_m = 0.3
alpha_f = 0.4
beta = 0.5
gamma = 0.6
integrator = GeneralizedAlpha(M, f_int, f_ext, K, D, alpha_m, alpha_f, beta, gamma)
self.assertEqual(integrator.alpha_m, alpha_m)
self.assertEqual(integrator.alpha_f, alpha_f)
self.assertEqual(integrator.beta, beta)
self.assertEqual(integrator.gamma, gamma)
def test_get_midstep(self):
q_0 = np.array([0, 0.2, 0, 0.1])
q_1 = np.array([0.1, 0, -0.25, 0.4])
alpha = 0.4
q_mid = self.integrator._get_midstep(alpha, q_0, q_1)
assert_allclose(q_mid, np.array([0.06, 0.08, -0.15, 0.28]))
def test_set_prediction(self):
t_0 = 0.0
q_0 = np.array([0, 0, 0, 0], dtype=float)
dq_0 = np.array([0, 0, 0, 0], dtype=float)
ddq_0 = np.array([0, 0, 0, 1], dtype=float)
self.integrator.set_prediction(q_0, dq_0, ddq_0, t_0)
self.assertEqual(self.integrator.t_p, 0.1)
assert_allclose(self.integrator.q_p, np.array([0, 0, 0, 0]))
assert_allclose(self.integrator.dq_p, np.array([0, 0, 0, 0.00025]))
assert_allclose(self.integrator.ddq_p, np.array([0, 0, 0, -0.805], dtype=float))
def test_residual(self):
t_0 = 0.0
q_0 = np.array([0, 0, 0, 0], dtype=float)
dq_0 = np.array([0, 0, 0, 0], dtype=float)
ddq_0 = np.array([0, 0, 0, 1], dtype=float)
self.integrator.set_prediction(q_0, dq_0, ddq_0, t_0)
q_n = np.array([0, 0.1, 0.2, 0.3])
res = self.integrator.residual(q_n)
assert_allclose(res, np.array([[-0.015789, 0.181717, 0.368988, 0.113904]]), 1e-06, 1e-06)
def test_jacobian(self):
t_0 = 0.0
q_0 = np.array([0, 0, 0, 0], dtype=float)
dq_0 = np.array([0, 0, 0, 0], dtype=float)
ddq_0 = np.array([0, 0, 0, 1], dtype=float)
self.integrator.set_prediction(q_0, dq_0, ddq_0, t_0)
q_n = np.array([0, 0.1, 0.2, 0.3])
jac = self.integrator.jacobian(q_n)
assert_allclose(jac, np.array([[421.152632, 0.892105, 0, 0], [0.892105, 422.232964, 0.892105, 0],
[0, 0.892105, 422.260665, 0.892105], [0, 0, 0.892105, 421.235734]]), 1e-06)
def test_set_correction(self):
t_0 = 0.0
q_0 = np.array([0, 0, 0, 0], dtype=float)
dq_0 = np.array([0, 0, 0, 0], dtype=float)
ddq_0 = np.array([0, 0, 0, 1], dtype=float)
self.integrator.set_prediction(q_0, dq_0, ddq_0, t_0)
q_p = np.array([0, 0.1, -0.2, 0.3])
self.integrator.set_correction(q_p)
self.assertEqual(self.integrator.t_p, 0.1)
assert_allclose(self.integrator.q_p, np.array([0, 0.1, -0.2, 0.3]))
assert_allclose(self.integrator.dq_p, np.array([0, 1.995, -3.99, 5.98525]))
assert_allclose(self.integrator.ddq_p, np.array([0, 36.1, -72.2, 107.495]))
class NewmarkBetaTest(TestCase):
def setUp(self):
self.integrator = NewmarkBeta(M, f_int, f_ext, K, D)
self.integrator.dt = 0.1
def tearDown(self):
pass
def test_set_integration_parameters(self):
beta = 0.5
gamma = 0.6
integrator = NewmarkBeta(M, f_int, f_ext, K, D, beta, gamma)
self.assertEqual(integrator.beta, beta)
self.assertEqual(integrator.gamma, gamma)
class WBZAlphaTest(TestCase):
def setUp(self):
self.integrator = WBZAlpha(M, f_int, f_ext, K, D)
self.integrator.dt = 0.1
def tearDown(self):
pass
def test_set_integration_parameters(self):
self.assertAlmostEqual(self.integrator.alpha_m, -0.05263157894736841)
self.assertAlmostEqual(self.integrator.alpha_f, 0.0)
self.assertAlmostEqual(self.integrator.beta, 0.27700831)
self.assertAlmostEqual(self.integrator.gamma, 0.55263157894736841)
class HHTAlphaTest(TestCase):
def setUp(self):
self.integrator = HHTAlpha(M, f_int, f_ext, K, D)
self.integrator.dt = 0.1
def tearDown(self):
pass
def test_set_integration_parameters(self):
self.assertAlmostEqual(self.integrator.alpha_m, 0.0)
self.assertAlmostEqual(self.integrator.alpha_f, 0.052631579)
self.assertAlmostEqual(self.integrator.beta, 0.27700831024930744)
self.assertAlmostEqual(self.integrator.gamma, 0.55263157894736841)
class LinearOneMassOscillatorTest(TestCase):
r"""
_____
/| k | |
/|-/\/\/\-| m |
/| |_____|
"""
def setUp(self):
self.m = 0.1
self.k = 0.5
def M(q, dq, t):
return np.array([self.m])
def K(q, dq, t):
return np.array([self.k])
def f_int(q, dq, t):
return np.array([self.k]).dot(q)
def f_ext(q, dq, t):
return np.array([0])
def D(q, dq, t):
return np.array([0])
self.integrator = GeneralizedAlpha(M, f_int, f_ext, K, D)
self.integrator.dt = 0.001
self.integration_stepper = LinearIntegrationStepper(self.integrator)
self.linear_solver = ScipySparseLinearSolver()
self.integration_stepper.linear_solver_func = self.linear_solver.solve
def test_linear_oscillator(self):
t_end = 2
t0 = 0.0
q0 = 0.1
q = np.array([q0])
dq = np.array([0.0])
ddq = np.array([0.0])
N_dt = int((t_end-t0)/self.integrator.dt)
t = t0
q_numerical = np.zeros(N_dt)
q_numerical[0] = q
q_analytical = q_numerical.copy()
for i in range(1, N_dt):
t, q, dq, ddq = self.integration_stepper.step(t, q, dq, ddq)
q_numerical[i] = q[0]
q_analytical[i] = q0*np.cos(np.sqrt(self.k/self.m)*t)
for num, ana in zip(q_numerical, q_analytical):
assert_allclose(num, ana, atol=1e-5)
def plot_oscillator_path(u_plot, label_name):
plt.plot(range(0, N_dt), u_plot, label=label_name)
plt.title('Linear oscillator-test')
return
# UNCOMMENT THESE LINES IF YOU LIKE TO SEE A TRAJECTORY (THIS CAN NOT BE DONE FOR GITLAB-RUNNER
plot_oscillator_path(q_analytical, 'Analytisch')
plot_oscillator_path(q_numerical, 'Numerisch')
plt.legend()
# plt.show() | tests/test_integrator.py | from unittest import TestCase
import numpy as np
from scipy.sparse import csr_matrix
from amfe.solver.integrator import *
from numpy.testing import assert_allclose
import matplotlib.pyplot as plt
from amfe.linalg.linearsolvers import ScipySparseLinearSolver
def M(q, dq, t):
return 2 * csr_matrix(np.eye(4))
def K(q, dq, t):
return csr_matrix(np.array([[2, -0.3, 0, 0], [-0.3, 4, -0.3, 0], [0, -0.3, 4, -0.3], [0, 0, -0.3, 2]])) + np.diag(q)
def D(q, dq, t):
return csr_matrix(np.array([[0.2, 0.1, 0, 0], [0.1, 0.2, 0.1, 0], [0, 0.1, 0.2, 0.1], [0, 0, 0.1, 0.2]]))
def f_int(q, dq, t):
return K(q, dq, t) @ q + D(q, dq, t) @ dq
def f_ext(q, dq, t):
f = 2 * t
return np.array([0, 0, 0, f])
class GeneralizedAlphaTest(TestCase):
def setUp(self):
self.integrator = GeneralizedAlpha(M, f_int, f_ext, K, D)
self.integrator.dt = 0.1
def tearDown(self):
pass
def test_set_integration_parameters(self):
alpha_m = 0.3
alpha_f = 0.4
beta = 0.5
gamma = 0.6
integrator = GeneralizedAlpha(M, f_int, f_ext, K, D, alpha_m, alpha_f, beta, gamma)
self.assertEqual(integrator.alpha_m, alpha_m)
self.assertEqual(integrator.alpha_f, alpha_f)
self.assertEqual(integrator.beta, beta)
self.assertEqual(integrator.gamma, gamma)
def test_get_midstep(self):
q_0 = np.array([0, 0.2, 0, 0.1])
q_1 = np.array([0.1, 0, -0.25, 0.4])
alpha = 0.4
q_mid = self.integrator._get_midstep(alpha, q_0, q_1)
assert_allclose(q_mid, np.array([0.06, 0.08, -0.15, 0.28]))
def test_set_prediction(self):
t_0 = 0.0
q_0 = np.array([0, 0, 0, 0], dtype=float)
dq_0 = np.array([0, 0, 0, 0], dtype=float)
ddq_0 = np.array([0, 0, 0, 1], dtype=float)
self.integrator.set_prediction(q_0, dq_0, ddq_0, t_0)
self.assertEqual(self.integrator.t_p, 0.1)
assert_allclose(self.integrator.q_p, np.array([0, 0, 0, 0]))
assert_allclose(self.integrator.dq_p, np.array([0, 0, 0, 0.00025]))
assert_allclose(self.integrator.ddq_p, np.array([0, 0, 0, -0.805], dtype=float))
def test_residual(self):
t_0 = 0.0
q_0 = np.array([0, 0, 0, 0], dtype=float)
dq_0 = np.array([0, 0, 0, 0], dtype=float)
ddq_0 = np.array([0, 0, 0, 1], dtype=float)
self.integrator.set_prediction(q_0, dq_0, ddq_0, t_0)
q_n = np.array([0, 0.1, 0.2, 0.3])
res = self.integrator.residual(q_n)
assert_allclose(res, np.array([[-0.015789, 0.181717, 0.368988, 0.113904]]), 1e-06, 1e-06)
def test_jacobian(self):
t_0 = 0.0
q_0 = np.array([0, 0, 0, 0], dtype=float)
dq_0 = np.array([0, 0, 0, 0], dtype=float)
ddq_0 = np.array([0, 0, 0, 1], dtype=float)
self.integrator.set_prediction(q_0, dq_0, ddq_0, t_0)
q_n = np.array([0, 0.1, 0.2, 0.3])
jac = self.integrator.jacobian(q_n)
assert_allclose(jac, np.array([[421.152632, 0.892105, 0, 0], [0.892105, 422.232964, 0.892105, 0],
[0, 0.892105, 422.260665, 0.892105], [0, 0, 0.892105, 421.235734]]), 1e-06)
def test_set_correction(self):
t_0 = 0.0
q_0 = np.array([0, 0, 0, 0], dtype=float)
dq_0 = np.array([0, 0, 0, 0], dtype=float)
ddq_0 = np.array([0, 0, 0, 1], dtype=float)
self.integrator.set_prediction(q_0, dq_0, ddq_0, t_0)
q_p = np.array([0, 0.1, -0.2, 0.3])
self.integrator.set_correction(q_p)
self.assertEqual(self.integrator.t_p, 0.1)
assert_allclose(self.integrator.q_p, np.array([0, 0.1, -0.2, 0.3]))
assert_allclose(self.integrator.dq_p, np.array([0, 1.995, -3.99, 5.98525]))
assert_allclose(self.integrator.ddq_p, np.array([0, 36.1, -72.2, 107.495]))
class NewmarkBetaTest(TestCase):
def setUp(self):
self.integrator = NewmarkBeta(M, f_int, f_ext, K, D)
self.integrator.dt = 0.1
def tearDown(self):
pass
def test_set_integration_parameters(self):
beta = 0.5
gamma = 0.6
integrator = NewmarkBeta(M, f_int, f_ext, K, D, beta, gamma)
self.assertEqual(integrator.beta, beta)
self.assertEqual(integrator.gamma, gamma)
class WBZAlphaTest(TestCase):
def setUp(self):
self.integrator = WBZAlpha(M, f_int, f_ext, K, D)
self.integrator.dt = 0.1
def tearDown(self):
pass
def test_set_integration_parameters(self):
self.assertAlmostEqual(self.integrator.alpha_m, -0.05263157894736841)
self.assertAlmostEqual(self.integrator.alpha_f, 0.0)
self.assertAlmostEqual(self.integrator.beta, 0.27700831)
self.assertAlmostEqual(self.integrator.gamma, 0.55263157894736841)
class HHTAlphaTest(TestCase):
def setUp(self):
self.integrator = HHTAlpha(M, f_int, f_ext, K, D)
self.integrator.dt = 0.1
def tearDown(self):
pass
def test_set_integration_parameters(self):
self.assertAlmostEqual(self.integrator.alpha_m, 0.0)
self.assertAlmostEqual(self.integrator.alpha_f, 0.052631579)
self.assertAlmostEqual(self.integrator.beta, 0.27700831024930744)
self.assertAlmostEqual(self.integrator.gamma, 0.55263157894736841)
class LinearOneMassOscillatorTest(TestCase):
r"""
_____
/| k | |
/|-/\/\/\-| m |
/| |_____|
"""
def setUp(self):
self.m = 0.1
self.k = 0.5
def M(q, dq, t):
return np.array([self.m])
def K(q, dq, t):
return np.array([self.k])
def f_int(q, dq, t):
return np.array([self.k]).dot(q)
def f_ext(q, dq, t):
return np.array([0])
def D(q, dq, t):
return np.array([0])
self.integrator = GeneralizedAlpha(M, f_int, f_ext, K, D)
self.integrator.dt = 0.001
self.integration_stepper = LinearIntegrationStepper(self.integrator)
self.linear_solver = ScipySparseLinearSolver()
self.integration_stepper.linear_solver_func = self.linear_solver.solve
def test_linear_oscillator(self):
t_end = 2
t0 = 0.0
q0 = 0.1
q = np.array([q0])
dq = np.array([0.0])
ddq = np.array([0.0])
N_dt = int((t_end-t0)/self.integrator.dt)
t = t0
q_numerical = np.zeros(N_dt)
q_numerical[0] = q
q_analytical = q_numerical.copy()
for i in range(1, N_dt):
t, q, dq, ddq = self.integration_stepper.step(t, q, dq, ddq)
q_numerical[i] = q[0]
q_analytical[i] = q0*np.cos(np.sqrt(self.k/self.m)*t)
for num, ana in zip(q_numerical, q_analytical):
assert_allclose(num, ana, atol=1e-5)
def plot_oscillator_path(u_plot, label_name):
plt.plot(range(0, N_dt), u_plot, label=label_name)
plt.title('Linear oscillator-test')
return
# UNCOMMENT THESE LINES IF YOU LIKE TO SEE A TRAJECTORY (THIS CAN NOT BE DONE FOR GITLAB-RUNNER
plot_oscillator_path(q_analytical, 'Analytisch')
plot_oscillator_path(q_numerical, 'Numerisch')
plt.legend()
# plt.show() | 0.729327 | 0.664903 |
import boto3
import time
import os
from hypchat import HypChat
# given how modules work with python it was easiest to use globals
# I know, I know
messages = []
hc_room = None
# yeah this is a mess and should have been fully static sometimes
# it is easier to just avoid side effects, you know?
class KLog(object):
def __init__(self, bucket_name, key, region="us-east-1"):
self.conn = boto3.resource("s3", region)
self.bucket = self.conn.Bucket(bucket_name)
self.key = key
self.log_file = self.bucket.Object(key)
# add a log msg to the list
# because we are doing unique files per run we store all messages in mem
# then before krampus exits we upload to the specified key
@staticmethod
def log(msg, level="info"):
levels = ["info", "warn", "critical"] # keep it simple
level = level.lower()
if level not in levels:
level = "info" # don't allow random stuff
# print the stdout part
# stdout print prepends
prepends = {
"info": "[i]",
"warn": "[-]",
"critical": "[!]"
}
print "%s %s" % (prepends[level], msg)
# see if it should go to the hipchat room
if level == "critical":
KLog.hipLog(msg)
# due to interesting decisions log message stay in mem until run finish
messages.append({
"level": level,
"msg": msg,
"timestamp": int(time.time())
})
# log something to the hipchat room
@staticmethod
def hipLog(msg):
if not hc_room:
# don't change below to critical, think about it...
KLog.log("tried to log to hipchat without a working connection", "warn")
return False
# otherwise let's set as red
hc_room.notification("KRAMPUS: " + msg, "red")
# write the final product
def writeLogFile(self):
# we will need to go through each of the entries to make them into a
# friendly-ish log format. instead of dumping json objs from the
# array of messages, we'll create newline delimited log messages
# to write to our key
buff = ""
for m in messages:
buff += "[%d] %s: %s\n" % (m['timestamp'], m['level'].upper(), m['msg'])
# now we can worry about putting to s3
resp = self.bucket.Object(self.key).put(Body=buff)
return resp
# just trust me when I say at the time I was out of options and needed global namespace
# should have planned better man
if os.getenv('HIPCHAT_ACCESS_TOKEN') and os.getenv('HIPCHAT_ROOM'):
try:
hc_room = HypChat(os.getenv('HIPCHAT_ACCESS_TOKEN')).get_room(os.getenv('HIPCHAT_ROOM'))
except:
KLog.log("problem starting hipchat, check env vars and connection", "warn") | lib/krampus_logging.py | import boto3
import time
import os
from hypchat import HypChat
# given how modules work with python it was easiest to use globals
# I know, I know
messages = []
hc_room = None
# yeah this is a mess and should have been fully static sometimes
# it is easier to just avoid side effects, you know?
class KLog(object):
def __init__(self, bucket_name, key, region="us-east-1"):
self.conn = boto3.resource("s3", region)
self.bucket = self.conn.Bucket(bucket_name)
self.key = key
self.log_file = self.bucket.Object(key)
# add a log msg to the list
# because we are doing unique files per run we store all messages in mem
# then before krampus exits we upload to the specified key
@staticmethod
def log(msg, level="info"):
levels = ["info", "warn", "critical"] # keep it simple
level = level.lower()
if level not in levels:
level = "info" # don't allow random stuff
# print the stdout part
# stdout print prepends
prepends = {
"info": "[i]",
"warn": "[-]",
"critical": "[!]"
}
print "%s %s" % (prepends[level], msg)
# see if it should go to the hipchat room
if level == "critical":
KLog.hipLog(msg)
# due to interesting decisions log message stay in mem until run finish
messages.append({
"level": level,
"msg": msg,
"timestamp": int(time.time())
})
# log something to the hipchat room
@staticmethod
def hipLog(msg):
if not hc_room:
# don't change below to critical, think about it...
KLog.log("tried to log to hipchat without a working connection", "warn")
return False
# otherwise let's set as red
hc_room.notification("KRAMPUS: " + msg, "red")
# write the final product
def writeLogFile(self):
# we will need to go through each of the entries to make them into a
# friendly-ish log format. instead of dumping json objs from the
# array of messages, we'll create newline delimited log messages
# to write to our key
buff = ""
for m in messages:
buff += "[%d] %s: %s\n" % (m['timestamp'], m['level'].upper(), m['msg'])
# now we can worry about putting to s3
resp = self.bucket.Object(self.key).put(Body=buff)
return resp
# just trust me when I say at the time I was out of options and needed global namespace
# should have planned better man
if os.getenv('HIPCHAT_ACCESS_TOKEN') and os.getenv('HIPCHAT_ROOM'):
try:
hc_room = HypChat(os.getenv('HIPCHAT_ACCESS_TOKEN')).get_room(os.getenv('HIPCHAT_ROOM'))
except:
KLog.log("problem starting hipchat, check env vars and connection", "warn") | 0.133698 | 0.169337 |
from collections import namedtuple
from prophyc.generators import base, word_wrap
INDENT_STR = u" "
MAX_LINE_WIDTH = 100
DocStr = namedtuple("DocStr", "block, inline")
def _form_doc(model_node, max_inl_docstring_len, indent_level):
block_doc, inline_doc = "", ""
if model_node.docstring:
if len(model_node.docstring) <= max_inl_docstring_len and "\n" not in model_node.docstring:
inline_doc = u" // {}".format(model_node.docstring)
elif model_node.docstring:
block_doc = u"\n" + "".join(
_gen_multi_line_doc(model_node.docstring, indent_level=indent_level, block_header=model_node.name))
return DocStr(block_doc, inline_doc)
schema_line_breaker = word_wrap.BreakLinesByWidth(MAX_LINE_WIDTH, " ", "/* ", " * ", " ", " */")
@schema_line_breaker
def _gen_multi_line_doc(block_comment_text, indent_level=0, block_header=""):
assert "\n" not in block_header, "Will not work with line breaks in header bar."
if block_header:
if len(block_comment_text) >= 250:
schema_line_breaker.make_a_bar("-" if indent_level else "=", block_header)
yield block_header
for paragraph in block_comment_text.split("\n"):
yield paragraph
def _columnizer(model_node, column_splitter, max_line_width=100):
members_table = [column_splitter(m) for m in model_node.members]
widths = [max(len(str(r)) for r in g) for g in zip(*members_table)]
max_inline_comment_width = max_line_width - sum(widths)
for member, columns in zip(model_node.members, members_table):
doc = _form_doc(member, max_inline_comment_width, indent_level=1)
if doc.block:
yield doc.block
yield u"\n" + INDENT_STR
for is_not_last, (cell_width, cell_str) in enumerate(zip(widths, columns), 1 - len(columns)):
yield cell_str
padding = u" " * (max(0, cell_width - len(cell_str)))
if is_not_last:
yield padding
elif doc.inline:
yield padding + doc.inline
if model_node.members:
yield "\n"
def generate_schema_container(model_node, designator, column_splitter):
if model_node.docstring:
block_docstring = u"".join(_gen_multi_line_doc(model_node.docstring, indent_level=0,
block_header=model_node.name))
if block_docstring:
block_docstring += u"\n"
else:
block_docstring = u""
members = u"".join(_columnizer(model_node, column_splitter, max_line_width=100))
return u"{}{} {} {{{}}};".format(block_docstring, designator, model_node.name, members)
class SchemaTranslator(base.TranslatorBase):
block_template = u'''{content}'''
@staticmethod
def translate_include(include):
doc = _form_doc(include, 50, indent_level=0)
return u"{d.block}#include \"{0.name}\"{d.inline}".format(include, d=doc)
@staticmethod
def translate_constant(constant):
doc = _form_doc(constant, max_inl_docstring_len=50, indent_level=0)
return u"{d.block}\n{0.name} = {0.value};{d.inline}".format(constant, d=doc)
@staticmethod
def translate_enum(enumerator):
def column_selector(member):
value = u" = {};".format(member.value)
return member.name, value
return generate_schema_container(enumerator, "enum", column_selector)
@staticmethod
def translate_struct(struct):
def column_selector(member):
type_ = member.value
if member.optional:
type_ += u"*"
if member.is_fixed:
name = u"{m.name}[{m.size}];"
elif member.is_limited:
name = u"{m.name}<{m.size}>;"
elif member.is_dynamic:
name = u"{m.name}<@{m.bound}>;"
elif member.greedy:
name = u"{m.name}<...>;"
else:
name = u"{m.name};"
return type_, u" ", name.format(m=member)
return generate_schema_container(struct, u"struct", column_selector)
@staticmethod
def translate_union(union):
def column_selector(member):
discriminator = u"{}: ".format(member.discriminator)
field_type = member.value
field_name = u" {};".format(member.name)
return discriminator, field_type, field_name
return generate_schema_container(union, u"union", column_selector)
@classmethod
def _make_lines_splitter(cls, previous_node_type, current_node_type):
if not previous_node_type:
return u""
if previous_node_type == "Include" and current_node_type != "Include":
return u"\n\n"
if previous_node_type in ("Struct", "Union") or current_node_type in ("Enum", "Struct", "Union"):
return u"\n\n\n"
if previous_node_type != current_node_type:
return u"\n\n"
return u"\n"
class SchemaGenerator(base.GeneratorBase):
top_level_translators = {
'.prophy': SchemaTranslator,
} | prophyc/generators/prophy.py | from collections import namedtuple
from prophyc.generators import base, word_wrap
INDENT_STR = u" "
MAX_LINE_WIDTH = 100
DocStr = namedtuple("DocStr", "block, inline")
def _form_doc(model_node, max_inl_docstring_len, indent_level):
block_doc, inline_doc = "", ""
if model_node.docstring:
if len(model_node.docstring) <= max_inl_docstring_len and "\n" not in model_node.docstring:
inline_doc = u" // {}".format(model_node.docstring)
elif model_node.docstring:
block_doc = u"\n" + "".join(
_gen_multi_line_doc(model_node.docstring, indent_level=indent_level, block_header=model_node.name))
return DocStr(block_doc, inline_doc)
schema_line_breaker = word_wrap.BreakLinesByWidth(MAX_LINE_WIDTH, " ", "/* ", " * ", " ", " */")
@schema_line_breaker
def _gen_multi_line_doc(block_comment_text, indent_level=0, block_header=""):
assert "\n" not in block_header, "Will not work with line breaks in header bar."
if block_header:
if len(block_comment_text) >= 250:
schema_line_breaker.make_a_bar("-" if indent_level else "=", block_header)
yield block_header
for paragraph in block_comment_text.split("\n"):
yield paragraph
def _columnizer(model_node, column_splitter, max_line_width=100):
members_table = [column_splitter(m) for m in model_node.members]
widths = [max(len(str(r)) for r in g) for g in zip(*members_table)]
max_inline_comment_width = max_line_width - sum(widths)
for member, columns in zip(model_node.members, members_table):
doc = _form_doc(member, max_inline_comment_width, indent_level=1)
if doc.block:
yield doc.block
yield u"\n" + INDENT_STR
for is_not_last, (cell_width, cell_str) in enumerate(zip(widths, columns), 1 - len(columns)):
yield cell_str
padding = u" " * (max(0, cell_width - len(cell_str)))
if is_not_last:
yield padding
elif doc.inline:
yield padding + doc.inline
if model_node.members:
yield "\n"
def generate_schema_container(model_node, designator, column_splitter):
if model_node.docstring:
block_docstring = u"".join(_gen_multi_line_doc(model_node.docstring, indent_level=0,
block_header=model_node.name))
if block_docstring:
block_docstring += u"\n"
else:
block_docstring = u""
members = u"".join(_columnizer(model_node, column_splitter, max_line_width=100))
return u"{}{} {} {{{}}};".format(block_docstring, designator, model_node.name, members)
class SchemaTranslator(base.TranslatorBase):
block_template = u'''{content}'''
@staticmethod
def translate_include(include):
doc = _form_doc(include, 50, indent_level=0)
return u"{d.block}#include \"{0.name}\"{d.inline}".format(include, d=doc)
@staticmethod
def translate_constant(constant):
doc = _form_doc(constant, max_inl_docstring_len=50, indent_level=0)
return u"{d.block}\n{0.name} = {0.value};{d.inline}".format(constant, d=doc)
@staticmethod
def translate_enum(enumerator):
def column_selector(member):
value = u" = {};".format(member.value)
return member.name, value
return generate_schema_container(enumerator, "enum", column_selector)
@staticmethod
def translate_struct(struct):
def column_selector(member):
type_ = member.value
if member.optional:
type_ += u"*"
if member.is_fixed:
name = u"{m.name}[{m.size}];"
elif member.is_limited:
name = u"{m.name}<{m.size}>;"
elif member.is_dynamic:
name = u"{m.name}<@{m.bound}>;"
elif member.greedy:
name = u"{m.name}<...>;"
else:
name = u"{m.name};"
return type_, u" ", name.format(m=member)
return generate_schema_container(struct, u"struct", column_selector)
@staticmethod
def translate_union(union):
def column_selector(member):
discriminator = u"{}: ".format(member.discriminator)
field_type = member.value
field_name = u" {};".format(member.name)
return discriminator, field_type, field_name
return generate_schema_container(union, u"union", column_selector)
@classmethod
def _make_lines_splitter(cls, previous_node_type, current_node_type):
if not previous_node_type:
return u""
if previous_node_type == "Include" and current_node_type != "Include":
return u"\n\n"
if previous_node_type in ("Struct", "Union") or current_node_type in ("Enum", "Struct", "Union"):
return u"\n\n\n"
if previous_node_type != current_node_type:
return u"\n\n"
return u"\n"
class SchemaGenerator(base.GeneratorBase):
top_level_translators = {
'.prophy': SchemaTranslator,
} | 0.584745 | 0.177704 |
import pytest
import numpy as np
import pandas as pd
from trees.RobustTree import RobustTreeClassifier
def test_RobustTree_X_noninteger_error():
"""Test whether X is integer-valued"""
clf = RobustTreeClassifier(depth=1, time_limit=20)
with pytest.raises(
ValueError,
match="Found non-integer values.",
):
data = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 0.1, 1], "y": [1, 1, -1, -1, -1]}
)
y = data.pop("y")
clf.fit(data, y)
def test_RobustTree_cost_shape_error():
"""Test whether X and cost have the same size and columns"""
clf = RobustTreeClassifier(depth=1, time_limit=20)
data = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 0, 1], "y": [1, 1, -1, -1, -1]},
index=["A", "B", "C", "D", "E"],
)
y = data.pop("y")
# Different number of data samples
with pytest.raises(
ValueError,
match="Input covariates has 5 samples, but uncertainty costs has 4",
):
costs = pd.DataFrame(
{"x1": [1, 2, 2, 2], "x2": [1, 2, 1, 1]}, index=["A", "B", "C", "D"]
)
clf.fit(data, y, costs=costs, budget=5, verbose=False)
# Different number of features
with pytest.raises(
ValueError,
match="Input covariates has 2 columns but uncertainty costs has 3 columns",
):
costs = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 7, 1], "x3": [1, 1, 1, 1, 1]},
index=["A", "B", "C", "D", "E"],
)
clf.fit(data, y, costs=costs, budget=5, verbose=False)
# Different column names
with pytest.raises(
KeyError,
match="uncertainty costs should have the same columns as the input covariates",
):
costs = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x3": [1, 2, 1, 7, 1]},
index=["A", "B", "C", "D", "E"],
)
clf.fit(data, y, costs=costs, budget=5, verbose=False)
# When X is not a dataframe, but costs is a dataframe with column names
with pytest.raises(
KeyError,
match="uncertainty costs should have the same columns as the input covariates",
):
data_np = np.array([[1, 2, 2, 2, 3], [1, 2, 1, 0, 1]]).transpose()
costs = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 7, 1]},
index=["A", "B", "C", "D", "E"],
)
clf.fit(data_np, y, costs=costs, budget=5, verbose=False)
# When X is a dataframe, but costs are not
with pytest.raises(
TypeError,
match="uncertainty costs should be a Pandas DataFrame with the same columns as the input covariates",
):
costs = np.transpose([[1, 2, 2, 2, 3], [1, 2, 1, 7, 1]])
clf.fit(data, y, costs=costs, budget=5, verbose=False)
@pytest.mark.test_gurobi
def test_RobustTree_prediction_shape_error():
"""Test whether X and cost have the same size and columns"""
# Run some quick model that finishes in 1 second
clf = RobustTreeClassifier(depth=1, time_limit=20)
train = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 0, 1], "y": [1, 1, -1, -1, -1]},
index=["A", "B", "C", "D", "E"],
)
y = train.pop("y")
clf.fit(train, y, verbose=False)
# Non-integer data
with pytest.raises(
ValueError,
match="Found non-integer values.",
):
test = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 0.1, 1]},
index=["F", "G", "H", "I", "J"],
)
clf.predict(test)
# Different number of features
with pytest.raises(
ValueError,
match="Input covariates has 2 columns but test covariates has 3 columns",
):
test = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 7, 1], "x3": [1, 1, 1, 1, 1]},
index=["F", "G", "H", "I", "J"],
)
clf.predict(test)
# Different column names
with pytest.raises(
KeyError,
match="test covariates should have the same columns as the input covariates",
):
test = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x3": [1, 2, 1, 7, 1]},
index=["F", "G", "H", "I", "J"],
)
clf.predict(test)
# When X is a dataframe, but test is not
with pytest.raises(
TypeError,
match="test covariates should be a Pandas DataFrame with the same columns as the input covariates",
):
test = np.transpose([[1, 2, 2, 2, 3], [1, 2, 1, 7, 1]])
clf.predict(test)
# When X is not a dataframe, but test is a dataframe with column names
with pytest.raises(
KeyError,
match="test covariates should have the same columns as the input covariates",
):
test = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 7, 1]},
index=["F", "G", "H", "I", "J"],
)
train_nodf = np.transpose([[1, 2, 2, 2, 3], [1, 2, 1, 0, 1]])
clf.fit(train_nodf, y, verbose=False)
clf.predict(test)
@pytest.mark.test_gurobi
def test_RobustTree_with_uncertainty_success():
clf = RobustTreeClassifier(depth=1, time_limit=20)
train = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 0, 1], "y": [1, 1, -1, -1, -1]},
index=["A", "B", "C", "D", "E"],
)
test = pd.DataFrame(
{"x1": [1, 2, 2, 2], "x2": [1, 2, 1, 7]}, index=["F", "G", "H", "I"]
)
y = train.pop("y")
costs = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 7, 1]}, index=["A", "B", "C", "D", "E"]
)
clf.fit(train, y, costs=costs, budget=5, verbose=False)
assert hasattr(clf, "model")
y_pred = clf.predict(test)
assert y_pred.shape[0] == test.shape[0]
@pytest.mark.test_gurobi
def test_RobustTree_no_uncertainty_success():
clf = RobustTreeClassifier(depth=1, time_limit=20)
train = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 0, 1], "y": [1, 1, -1, -1, -1]},
index=["A", "B", "C", "D", "E"],
)
test = pd.DataFrame(
{"x1": [1, 2, 2, 2], "x2": [1, 2, 1, 7]}, index=["F", "G", "H", "I"]
)
y = train.pop("y")
clf.fit(train, y, verbose=False)
assert hasattr(clf, "model")
y_pred = clf.predict(test)
assert y_pred.shape[0] == test.shape[0] | trees/tests/test_RobustTree.py | import pytest
import numpy as np
import pandas as pd
from trees.RobustTree import RobustTreeClassifier
def test_RobustTree_X_noninteger_error():
"""Test whether X is integer-valued"""
clf = RobustTreeClassifier(depth=1, time_limit=20)
with pytest.raises(
ValueError,
match="Found non-integer values.",
):
data = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 0.1, 1], "y": [1, 1, -1, -1, -1]}
)
y = data.pop("y")
clf.fit(data, y)
def test_RobustTree_cost_shape_error():
"""Test whether X and cost have the same size and columns"""
clf = RobustTreeClassifier(depth=1, time_limit=20)
data = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 0, 1], "y": [1, 1, -1, -1, -1]},
index=["A", "B", "C", "D", "E"],
)
y = data.pop("y")
# Different number of data samples
with pytest.raises(
ValueError,
match="Input covariates has 5 samples, but uncertainty costs has 4",
):
costs = pd.DataFrame(
{"x1": [1, 2, 2, 2], "x2": [1, 2, 1, 1]}, index=["A", "B", "C", "D"]
)
clf.fit(data, y, costs=costs, budget=5, verbose=False)
# Different number of features
with pytest.raises(
ValueError,
match="Input covariates has 2 columns but uncertainty costs has 3 columns",
):
costs = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 7, 1], "x3": [1, 1, 1, 1, 1]},
index=["A", "B", "C", "D", "E"],
)
clf.fit(data, y, costs=costs, budget=5, verbose=False)
# Different column names
with pytest.raises(
KeyError,
match="uncertainty costs should have the same columns as the input covariates",
):
costs = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x3": [1, 2, 1, 7, 1]},
index=["A", "B", "C", "D", "E"],
)
clf.fit(data, y, costs=costs, budget=5, verbose=False)
# When X is not a dataframe, but costs is a dataframe with column names
with pytest.raises(
KeyError,
match="uncertainty costs should have the same columns as the input covariates",
):
data_np = np.array([[1, 2, 2, 2, 3], [1, 2, 1, 0, 1]]).transpose()
costs = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 7, 1]},
index=["A", "B", "C", "D", "E"],
)
clf.fit(data_np, y, costs=costs, budget=5, verbose=False)
# When X is a dataframe, but costs are not
with pytest.raises(
TypeError,
match="uncertainty costs should be a Pandas DataFrame with the same columns as the input covariates",
):
costs = np.transpose([[1, 2, 2, 2, 3], [1, 2, 1, 7, 1]])
clf.fit(data, y, costs=costs, budget=5, verbose=False)
@pytest.mark.test_gurobi
def test_RobustTree_prediction_shape_error():
"""Test whether X and cost have the same size and columns"""
# Run some quick model that finishes in 1 second
clf = RobustTreeClassifier(depth=1, time_limit=20)
train = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 0, 1], "y": [1, 1, -1, -1, -1]},
index=["A", "B", "C", "D", "E"],
)
y = train.pop("y")
clf.fit(train, y, verbose=False)
# Non-integer data
with pytest.raises(
ValueError,
match="Found non-integer values.",
):
test = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 0.1, 1]},
index=["F", "G", "H", "I", "J"],
)
clf.predict(test)
# Different number of features
with pytest.raises(
ValueError,
match="Input covariates has 2 columns but test covariates has 3 columns",
):
test = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 7, 1], "x3": [1, 1, 1, 1, 1]},
index=["F", "G", "H", "I", "J"],
)
clf.predict(test)
# Different column names
with pytest.raises(
KeyError,
match="test covariates should have the same columns as the input covariates",
):
test = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x3": [1, 2, 1, 7, 1]},
index=["F", "G", "H", "I", "J"],
)
clf.predict(test)
# When X is a dataframe, but test is not
with pytest.raises(
TypeError,
match="test covariates should be a Pandas DataFrame with the same columns as the input covariates",
):
test = np.transpose([[1, 2, 2, 2, 3], [1, 2, 1, 7, 1]])
clf.predict(test)
# When X is not a dataframe, but test is a dataframe with column names
with pytest.raises(
KeyError,
match="test covariates should have the same columns as the input covariates",
):
test = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 7, 1]},
index=["F", "G", "H", "I", "J"],
)
train_nodf = np.transpose([[1, 2, 2, 2, 3], [1, 2, 1, 0, 1]])
clf.fit(train_nodf, y, verbose=False)
clf.predict(test)
@pytest.mark.test_gurobi
def test_RobustTree_with_uncertainty_success():
clf = RobustTreeClassifier(depth=1, time_limit=20)
train = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 0, 1], "y": [1, 1, -1, -1, -1]},
index=["A", "B", "C", "D", "E"],
)
test = pd.DataFrame(
{"x1": [1, 2, 2, 2], "x2": [1, 2, 1, 7]}, index=["F", "G", "H", "I"]
)
y = train.pop("y")
costs = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 7, 1]}, index=["A", "B", "C", "D", "E"]
)
clf.fit(train, y, costs=costs, budget=5, verbose=False)
assert hasattr(clf, "model")
y_pred = clf.predict(test)
assert y_pred.shape[0] == test.shape[0]
@pytest.mark.test_gurobi
def test_RobustTree_no_uncertainty_success():
clf = RobustTreeClassifier(depth=1, time_limit=20)
train = pd.DataFrame(
{"x1": [1, 2, 2, 2, 3], "x2": [1, 2, 1, 0, 1], "y": [1, 1, -1, -1, -1]},
index=["A", "B", "C", "D", "E"],
)
test = pd.DataFrame(
{"x1": [1, 2, 2, 2], "x2": [1, 2, 1, 7]}, index=["F", "G", "H", "I"]
)
y = train.pop("y")
clf.fit(train, y, verbose=False)
assert hasattr(clf, "model")
y_pred = clf.predict(test)
assert y_pred.shape[0] == test.shape[0] | 0.621656 | 0.741884 |
from __future__ import print_function
import os
import sys
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, GlobalMaxPooling1D
from keras.layers import Conv1D, MaxPooling1D, Embedding, Conv2D, MaxPool2D
from keras.models import Model, load_model
from keras.initializers import Constant
from keras.layers import Reshape, Flatten, Dropout, Concatenate
from keras.layers import SpatialDropout1D, concatenate
from keras.layers import GlobalMaxPooling1D
from keras.callbacks import Callback
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.utils.vis_utils import plot_model
from process_dataset import get_label_encoded_training_test_sets, get_reddit_dataset
import pandas as pd
import unittest
from parameters import *
from utils import get_dashed_time, get_model_save_name
class CNNTest(unittest.TestCase):
def test_get_labels_index(self):
xs = 'yo boyz I am sing song'.split()
self.assertEqual(get_labels_index(xs),
{'I': 2, 'am': 3, 'boyz': 1, 'sing': 4, 'song': 5, 'yo': 0})
def get_labels_index(labels):
return {x:i for i,x in enumerate(labels)}
def get_texts_and_labels():
"Return (texts, labels, labels_index)"
print('Processing text dataset')
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
args = {} if sys.version_info < (3,) else {'encoding': 'latin-1'}
with open(fpath, **args) as f:
t = f.read()
i = t.find('\n\n') # skip header
if 0 < i:
t = t[i:]
texts.append(t)
labels.append(label_id)
print('Found %s texts.' % len(texts))
overall_labels = [name for name in sorted(os.listdir(TEXT_DATA_DIR))
if os.path.isdir(os.path.join(TEXT_DATA_DIR, name))]
assert labels_index == get_labels_index(overall_labels)
return (texts, labels, get_labels_index(overall_labels))
def get_embeddings_index():
embeddings_index = {}
with open(os.path.join(GLOVE_DIR, f'glove.6B.{EMBEDDING_DIM}d.txt')) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, 'f', sep=' ')
embeddings_index[word] = coefs
print('Found %s word vectors.' % len(embeddings_index))
return embeddings_index
def save_model(model, basename='model'):
model_filename = f'{basename}.h5'
model.save(model_filename)
print("Saved model to disk")
def get_vectorized_text_and_labels(texts, labels):
num_labels = len(np.unique(labels))
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
# TODO(pradeep): Use the training and test split already done.
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
num_validation_samples = int(VALIDATION_FRACTION * data.shape[0])
x_train = data[:-num_validation_samples]
y_train = labels[:-num_validation_samples]
x_val = data[-num_validation_samples:]
y_val = labels[-num_validation_samples:]
return (x_train, x_val, y_train, y_val, num_labels, word_index)
def train_CNN(texts, labels):
(x_train, x_val, y_train, y_val, num_labels, word_index) = get_vectorized_text_and_labels(texts, labels)
print('Preparing embedding matrix.')
embeddings_index = get_embeddings_index()
# prepare embedding matrix
num_words = min(MAX_NUM_WORDS, len(word_index)) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
embeddings_initializer=Constant(embedding_matrix),
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = GlobalMaxPooling1D()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(num_labels, activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc', 'top_k_categorical_accuracy'])
model.fit(x_train, y_train,
batch_size=128,
epochs=NUM_EPOCHS,
validation_data=(x_val, y_val))
return model
def get_multi_channel_CNN_model(num_labels, word_index):
print('Preparing embedding matrix.')
embeddings_index = get_embeddings_index()
# prepare embedding matrix
num_words = min(MAX_NUM_WORDS, len(word_index)) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
filter_sizes = [2, 3, 5]
num_filters = BATCH_SIZE
drop = 0.3
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
# Note(pradeep): He is training the embedding matrix too.
embedding_layer = Embedding(input_dim=num_words,
output_dim=EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True)
embedded_sequences = embedding_layer(sequence_input)
reshape = Reshape((MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, 1))(embedded_sequences)
conv_0 = Conv2D(num_filters,
kernel_size=(filter_sizes[0], EMBEDDING_DIM),
padding='valid', kernel_initializer='normal',
activation='relu')(reshape)
conv_1 = Conv2D(num_filters,
kernel_size=(filter_sizes[1], EMBEDDING_DIM),
padding='valid', kernel_initializer='normal',
activation='relu')(reshape)
conv_2 = Conv2D(num_filters,
kernel_size=(filter_sizes[2], EMBEDDING_DIM),
padding='valid', kernel_initializer='normal',
activation='relu')(reshape)
maxpool_0 = MaxPool2D(pool_size=(MAX_SEQUENCE_LENGTH - filter_sizes[0] + 1, 1),
strides=(1,1), padding='valid')(conv_0)
maxpool_1 = MaxPool2D(pool_size=(MAX_SEQUENCE_LENGTH - filter_sizes[1] + 1, 1),
strides=(1,1), padding='valid')(conv_1)
maxpool_2 = MaxPool2D(pool_size=(MAX_SEQUENCE_LENGTH - filter_sizes[2] + 1, 1),
strides=(1,1), padding='valid')(conv_2)
concatenated_tensor = Concatenate(axis=1)(
[maxpool_0, maxpool_1, maxpool_2])
flatten = Flatten()(concatenated_tensor)
dropout = Dropout(drop)(flatten)
# output = Dense(units=1, activation='sigmoid')(dropout)
output = Dense(num_labels, activation='softmax')(dropout)
model = Model(inputs=sequence_input, outputs=output)
# TODO(pradeep): Extract the hyperparameters.
adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=adam, loss='categorical_crossentropy',
metrics=['acc', 'top_k_categorical_accuracy'])
return model
def train_multi_channel_CNN(texts, labels):
(x_train, x_val, y_train, y_val, num_labels, word_index) = get_vectorized_text_and_labels(texts, labels)
model = get_multi_channel_CNN_model(num_labels, word_index)
model_file_name = get_model_save_name('CNN-multi-channel')
checkpoint = ModelCheckpoint(model_file_name, monitor='val_acc',
verbose=1, save_best_only=True, mode='max')
history = model.fit(x=x_train,
y=y_train,
validation_data=(x_val, y_val),
batch_size=BATCH_SIZE,
callbacks=[checkpoint],
epochs=NUM_EPOCHS)
def main(is_newsgroups_dataset=False, mode='train-from-scratch'):
# first, build index mapping words in the embeddings set
# to their embedding vector
print('Indexing word vectors.')
if is_newsgroups_dataset:
texts, labels, labels_index = get_texts_and_labels()
else:
X_train, X_test, y_train, y_test = get_label_encoded_training_test_sets(get_reddit_dataset(size=DATASET_SIZE))
texts = pd.concat([X_train, X_test])
labels = np.concatenate([y_train, y_test])
if mode == 'train-from-scratch':
model = train_CNN(texts, labels)
model_file_name = get_model_save_name()
model.save(model_file_name)
elif mode == 'train-from-scratch-multi-channel':
train_multi_channel_CNN(texts, labels)
elif mode == 'load-model':
model_file_name = 'models/CNN-10-epochs-100000-rows-2019-04-14-16:27:45-567600.h5'
model = load_model(model_file_name)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc', 'top_k_categorical_accuracy'])
# TODO(pradeep): This is not the old training set because we are
# shuffling again.
(x_train, x_val, y_train, y_val, num_labels, word_index) = get_vectorized_text_and_labels(
texts, labels)
score = model.evaluate(x_train, y_train, batch_size=128)
print(f'Training set: {model.metrics_names}: {score}')
score = model.evaluate(x_val, y_val, batch_size=128)
print(f'Validation set: {model.metrics_names}: {score}')
if __name__ == '__main__':
main(mode=CNN_mode) | src/pretrained_word_embeddings.py | from __future__ import print_function
import os
import sys
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, GlobalMaxPooling1D
from keras.layers import Conv1D, MaxPooling1D, Embedding, Conv2D, MaxPool2D
from keras.models import Model, load_model
from keras.initializers import Constant
from keras.layers import Reshape, Flatten, Dropout, Concatenate
from keras.layers import SpatialDropout1D, concatenate
from keras.layers import GlobalMaxPooling1D
from keras.callbacks import Callback
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.utils.vis_utils import plot_model
from process_dataset import get_label_encoded_training_test_sets, get_reddit_dataset
import pandas as pd
import unittest
from parameters import *
from utils import get_dashed_time, get_model_save_name
class CNNTest(unittest.TestCase):
def test_get_labels_index(self):
xs = 'yo boyz I am sing song'.split()
self.assertEqual(get_labels_index(xs),
{'I': 2, 'am': 3, 'boyz': 1, 'sing': 4, 'song': 5, 'yo': 0})
def get_labels_index(labels):
return {x:i for i,x in enumerate(labels)}
def get_texts_and_labels():
"Return (texts, labels, labels_index)"
print('Processing text dataset')
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
args = {} if sys.version_info < (3,) else {'encoding': 'latin-1'}
with open(fpath, **args) as f:
t = f.read()
i = t.find('\n\n') # skip header
if 0 < i:
t = t[i:]
texts.append(t)
labels.append(label_id)
print('Found %s texts.' % len(texts))
overall_labels = [name for name in sorted(os.listdir(TEXT_DATA_DIR))
if os.path.isdir(os.path.join(TEXT_DATA_DIR, name))]
assert labels_index == get_labels_index(overall_labels)
return (texts, labels, get_labels_index(overall_labels))
def get_embeddings_index():
embeddings_index = {}
with open(os.path.join(GLOVE_DIR, f'glove.6B.{EMBEDDING_DIM}d.txt')) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, 'f', sep=' ')
embeddings_index[word] = coefs
print('Found %s word vectors.' % len(embeddings_index))
return embeddings_index
def save_model(model, basename='model'):
model_filename = f'{basename}.h5'
model.save(model_filename)
print("Saved model to disk")
def get_vectorized_text_and_labels(texts, labels):
num_labels = len(np.unique(labels))
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
# TODO(pradeep): Use the training and test split already done.
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
num_validation_samples = int(VALIDATION_FRACTION * data.shape[0])
x_train = data[:-num_validation_samples]
y_train = labels[:-num_validation_samples]
x_val = data[-num_validation_samples:]
y_val = labels[-num_validation_samples:]
return (x_train, x_val, y_train, y_val, num_labels, word_index)
def train_CNN(texts, labels):
(x_train, x_val, y_train, y_val, num_labels, word_index) = get_vectorized_text_and_labels(texts, labels)
print('Preparing embedding matrix.')
embeddings_index = get_embeddings_index()
# prepare embedding matrix
num_words = min(MAX_NUM_WORDS, len(word_index)) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
embeddings_initializer=Constant(embedding_matrix),
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = GlobalMaxPooling1D()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(num_labels, activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc', 'top_k_categorical_accuracy'])
model.fit(x_train, y_train,
batch_size=128,
epochs=NUM_EPOCHS,
validation_data=(x_val, y_val))
return model
def get_multi_channel_CNN_model(num_labels, word_index):
print('Preparing embedding matrix.')
embeddings_index = get_embeddings_index()
# prepare embedding matrix
num_words = min(MAX_NUM_WORDS, len(word_index)) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
filter_sizes = [2, 3, 5]
num_filters = BATCH_SIZE
drop = 0.3
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
# Note(pradeep): He is training the embedding matrix too.
embedding_layer = Embedding(input_dim=num_words,
output_dim=EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True)
embedded_sequences = embedding_layer(sequence_input)
reshape = Reshape((MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, 1))(embedded_sequences)
conv_0 = Conv2D(num_filters,
kernel_size=(filter_sizes[0], EMBEDDING_DIM),
padding='valid', kernel_initializer='normal',
activation='relu')(reshape)
conv_1 = Conv2D(num_filters,
kernel_size=(filter_sizes[1], EMBEDDING_DIM),
padding='valid', kernel_initializer='normal',
activation='relu')(reshape)
conv_2 = Conv2D(num_filters,
kernel_size=(filter_sizes[2], EMBEDDING_DIM),
padding='valid', kernel_initializer='normal',
activation='relu')(reshape)
maxpool_0 = MaxPool2D(pool_size=(MAX_SEQUENCE_LENGTH - filter_sizes[0] + 1, 1),
strides=(1,1), padding='valid')(conv_0)
maxpool_1 = MaxPool2D(pool_size=(MAX_SEQUENCE_LENGTH - filter_sizes[1] + 1, 1),
strides=(1,1), padding='valid')(conv_1)
maxpool_2 = MaxPool2D(pool_size=(MAX_SEQUENCE_LENGTH - filter_sizes[2] + 1, 1),
strides=(1,1), padding='valid')(conv_2)
concatenated_tensor = Concatenate(axis=1)(
[maxpool_0, maxpool_1, maxpool_2])
flatten = Flatten()(concatenated_tensor)
dropout = Dropout(drop)(flatten)
# output = Dense(units=1, activation='sigmoid')(dropout)
output = Dense(num_labels, activation='softmax')(dropout)
model = Model(inputs=sequence_input, outputs=output)
# TODO(pradeep): Extract the hyperparameters.
adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=adam, loss='categorical_crossentropy',
metrics=['acc', 'top_k_categorical_accuracy'])
return model
def train_multi_channel_CNN(texts, labels):
(x_train, x_val, y_train, y_val, num_labels, word_index) = get_vectorized_text_and_labels(texts, labels)
model = get_multi_channel_CNN_model(num_labels, word_index)
model_file_name = get_model_save_name('CNN-multi-channel')
checkpoint = ModelCheckpoint(model_file_name, monitor='val_acc',
verbose=1, save_best_only=True, mode='max')
history = model.fit(x=x_train,
y=y_train,
validation_data=(x_val, y_val),
batch_size=BATCH_SIZE,
callbacks=[checkpoint],
epochs=NUM_EPOCHS)
def main(is_newsgroups_dataset=False, mode='train-from-scratch'):
# first, build index mapping words in the embeddings set
# to their embedding vector
print('Indexing word vectors.')
if is_newsgroups_dataset:
texts, labels, labels_index = get_texts_and_labels()
else:
X_train, X_test, y_train, y_test = get_label_encoded_training_test_sets(get_reddit_dataset(size=DATASET_SIZE))
texts = pd.concat([X_train, X_test])
labels = np.concatenate([y_train, y_test])
if mode == 'train-from-scratch':
model = train_CNN(texts, labels)
model_file_name = get_model_save_name()
model.save(model_file_name)
elif mode == 'train-from-scratch-multi-channel':
train_multi_channel_CNN(texts, labels)
elif mode == 'load-model':
model_file_name = 'models/CNN-10-epochs-100000-rows-2019-04-14-16:27:45-567600.h5'
model = load_model(model_file_name)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc', 'top_k_categorical_accuracy'])
# TODO(pradeep): This is not the old training set because we are
# shuffling again.
(x_train, x_val, y_train, y_val, num_labels, word_index) = get_vectorized_text_and_labels(
texts, labels)
score = model.evaluate(x_train, y_train, batch_size=128)
print(f'Training set: {model.metrics_names}: {score}')
score = model.evaluate(x_val, y_val, batch_size=128)
print(f'Validation set: {model.metrics_names}: {score}')
if __name__ == '__main__':
main(mode=CNN_mode) | 0.575946 | 0.367327 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
from measurement_stats import value
class Angle(object):
"""A type for representing angular measurements with uncertainties"""
def __init__(self, **kwargs):
self._angle = 0.0
self._unc = 1.0
if 'uncertainty' in kwargs:
self.uncertainty = kwargs.get('uncertainty')
elif 'uncertainty_degrees' in kwargs:
self.uncertainty_degrees = kwargs.get('uncertainty_degrees')
if 'degrees' in kwargs:
self.degrees = kwargs.get('degrees')
elif 'radians' in kwargs:
self.radians = kwargs.get('radians')
@property
def value(self):
return value.ValueUncertainty(self.radians, self.uncertainty)
@property
def value_degrees(self):
return value.ValueUncertainty(self.degrees, self.uncertainty_degrees)
@property
def uncertainty(self):
return self._unc
@uncertainty.setter
def uncertainty(self, v):
self._unc = v
@property
def uncertainty_degrees(self):
return math.degrees(self._unc)
@uncertainty_degrees.setter
def uncertainty_degrees(self, v):
self._unc = math.radians(v)
@property
def radians(self):
return self._angle
@radians.setter
def radians(self, v):
self._angle = float(v)
@property
def degrees(self):
return math.degrees(self._angle)
@degrees.setter
def degrees(self, v):
self._angle = math.radians(float(v))
@property
def pretty_print(self):
return value.round_significant(self.degrees, 3)
def clone(self):
"""clone doc..."""
return self.__class__(radians=self._angle, uncertainty=self._unc)
def constrain_to_revolution(self):
"""
Constrains the angle to within the bounds [0, 360] by removing
revolutions.
"""
radians = self.radians
while radians < 0:
radians += 2.0*math.pi
degrees = math.degrees(radians) % 360.0
self.radians = math.radians(degrees)
return self
def difference_between(self, angle):
"""
Returns a new Angle instance that is the smallest difference between
this angle the one specified in the arguments.
:param angle:
:return:
:rtype: Angle
"""
a = self.clone().constrain_to_revolution()
b = angle.clone().constrain_to_revolution()
result = a - b
return Angle(
degrees=((result.degrees + 180.0) % 360.0) - 180.0,
uncertainty=result.uncertainty
)
def __pow__(self, power, modulo=None):
v = self.value ** power
return self.__class__(
radians=v.raw,
uncertainty=v.raw_uncertainty
)
def __add__(self, other):
v = self.value + other.value
return self.__class__(
radians=v.raw,
uncertainty=v.raw_uncertainty
)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
v = self.value - other.value
return self.__class__(
radians=v.raw,
uncertainty=v.raw_uncertainty
)
def __rsub__(self, other):
return self.__sub__(other)
def __mul__(self, other):
v = self.value * other.value
return self.__class__(
radians=v.raw,
uncertainty=v.raw_uncertainty
)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
v = self.value / other.value
return self.__class__(
radians=v.raw,
uncertainty=v.raw_uncertainty
)
def __rtruediv__(self, other):
return self.__truediv__(other)
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
def __repr__(self):
return self.__str__()
def __unicode__(self):
return '<{} {}>'.format(self.__class__.__name__, self.value.label)
def __str__(self):
return '{}'.format(self.__unicode__()) | measurement_stats/angle.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
from measurement_stats import value
class Angle(object):
"""A type for representing angular measurements with uncertainties"""
def __init__(self, **kwargs):
self._angle = 0.0
self._unc = 1.0
if 'uncertainty' in kwargs:
self.uncertainty = kwargs.get('uncertainty')
elif 'uncertainty_degrees' in kwargs:
self.uncertainty_degrees = kwargs.get('uncertainty_degrees')
if 'degrees' in kwargs:
self.degrees = kwargs.get('degrees')
elif 'radians' in kwargs:
self.radians = kwargs.get('radians')
@property
def value(self):
return value.ValueUncertainty(self.radians, self.uncertainty)
@property
def value_degrees(self):
return value.ValueUncertainty(self.degrees, self.uncertainty_degrees)
@property
def uncertainty(self):
return self._unc
@uncertainty.setter
def uncertainty(self, v):
self._unc = v
@property
def uncertainty_degrees(self):
return math.degrees(self._unc)
@uncertainty_degrees.setter
def uncertainty_degrees(self, v):
self._unc = math.radians(v)
@property
def radians(self):
return self._angle
@radians.setter
def radians(self, v):
self._angle = float(v)
@property
def degrees(self):
return math.degrees(self._angle)
@degrees.setter
def degrees(self, v):
self._angle = math.radians(float(v))
@property
def pretty_print(self):
return value.round_significant(self.degrees, 3)
def clone(self):
"""clone doc..."""
return self.__class__(radians=self._angle, uncertainty=self._unc)
def constrain_to_revolution(self):
"""
Constrains the angle to within the bounds [0, 360] by removing
revolutions.
"""
radians = self.radians
while radians < 0:
radians += 2.0*math.pi
degrees = math.degrees(radians) % 360.0
self.radians = math.radians(degrees)
return self
def difference_between(self, angle):
"""
Returns a new Angle instance that is the smallest difference between
this angle the one specified in the arguments.
:param angle:
:return:
:rtype: Angle
"""
a = self.clone().constrain_to_revolution()
b = angle.clone().constrain_to_revolution()
result = a - b
return Angle(
degrees=((result.degrees + 180.0) % 360.0) - 180.0,
uncertainty=result.uncertainty
)
def __pow__(self, power, modulo=None):
v = self.value ** power
return self.__class__(
radians=v.raw,
uncertainty=v.raw_uncertainty
)
def __add__(self, other):
v = self.value + other.value
return self.__class__(
radians=v.raw,
uncertainty=v.raw_uncertainty
)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
v = self.value - other.value
return self.__class__(
radians=v.raw,
uncertainty=v.raw_uncertainty
)
def __rsub__(self, other):
return self.__sub__(other)
def __mul__(self, other):
v = self.value * other.value
return self.__class__(
radians=v.raw,
uncertainty=v.raw_uncertainty
)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
v = self.value / other.value
return self.__class__(
radians=v.raw,
uncertainty=v.raw_uncertainty
)
def __rtruediv__(self, other):
return self.__truediv__(other)
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
def __repr__(self):
return self.__str__()
def __unicode__(self):
return '<{} {}>'.format(self.__class__.__name__, self.value.label)
def __str__(self):
return '{}'.format(self.__unicode__()) | 0.918809 | 0.428801 |
from collections import defaultdict
import math
from typing import Mapping, List, Union
import numpy as np
import tensorflow as tf
from neuromorphy.train.data_info import DataInfo
from neuromorphy.train.corpus_iterator import CorpusIterator
class BatchGenerator:
class Generator:
def __init__(self, generator, max_count):
self._generator = generator
self._cur_count = 0
self._max_count = max_count
def get_next(self):
assert self._cur_count < self._max_count
self._cur_count += 1
return next(self._generator)
def has_next(self):
return self._cur_count < self._max_count
def left_count(self):
return self._max_count - self._cur_count
def reset(self):
assert self._cur_count == self._max_count
self._cur_count = 0
@property
def batchs_count(self):
return self._max_count
def __init__(self, data_info: DataInfo, corpus_iterator: CorpusIterator, batch_size: int=1024):
self._data_info = data_info
self._batch_size = batch_size
buckets = self._build_buckets(corpus_iterator)
self._generators = \
[self._build_data_generator(bucket_data, sent_len) for sent_len, bucket_data in buckets.items()]
def _build_buckets(self, corpus_iterator: CorpusIterator) -> Mapping[int, List[np.ndarray]]:
buckets = defaultdict(list)
with corpus_iterator:
for sentence in corpus_iterator:
bucket_size = self._get_bucket_size(len(sentence))
data = [self._data_info.get_word_index(tok.token) for tok in sentence]
labels = [self._data_info.get_label_index(tok.grammar_value) for tok in sentence]
lemma_labels = [self._data_info.get_lemmatize_rule_index(tok.token, tok.lemma) for tok in sentence]
data = [data, labels, lemma_labels]
if bucket_size in buckets:
for i, feat_data in enumerate(data):
buckets[bucket_size][i].append(feat_data)
else:
buckets[bucket_size] = [[feat_data] for feat_data in data]
return {sent_len: [self._concatenate(data, sent_len) for data in bucket]
for sent_len, bucket in buckets.items()}
@staticmethod
def _get_bucket_size(sent_len: int) -> int:
bucket_upper_limit = 4
while bucket_upper_limit < sent_len:
bucket_upper_limit *= 2
return bucket_upper_limit
@staticmethod
def _concatenate(data: List[Union[List, np.ndarray]], sent_len: int) -> np.ndarray:
matrix_shape = (sent_len, len(data))
if isinstance(data[0], np.ndarray):
matrix_shape += data[0].shape[1:]
matrix = np.zeros(matrix_shape)
for i, sent in enumerate(data):
matrix[:len(sent), i] = sent
return matrix
def _build_data_generator(self, data: List[np.ndarray], sent_len: int) -> Generator:
indices = np.arange(data[0].shape[1])
batch_size = max(self._batch_size // sent_len, 1)
batchs_count = int(math.ceil(len(indices) / batch_size))
def _batch_generator():
while True:
np.random.shuffle(indices)
for i in range(batchs_count):
batch_begin = i * batch_size
batch_end = min((i + 1) * batch_size, len(indices))
batch_indices = indices[batch_begin: batch_end]
batch_data = data[0][:, batch_indices]
batch_labels = data[1][:, batch_indices]
batch_lemma_labels = data[2][:, batch_indices]
seq_lengths = (batch_labels == 0).argmax(axis=0)
seq_lengths[seq_lengths == 0] = batch_labels.shape[0]
max_seq_len = seq_lengths.max()
batch_data = batch_data[:max_seq_len]
batch_labels, batch_lemma_labels = batch_labels[:max_seq_len], batch_lemma_labels[:max_seq_len]
yield batch_data, batch_labels, batch_lemma_labels
return self.Generator(_batch_generator(), batchs_count)
def __iter__(self):
return self
def __next__(self):
while True:
while self._has_generators():
generator = self._sample_generator()
assert generator.has_next()
return generator.get_next()
self._reset_generators()
def _has_generators(self) -> bool:
return any(gen.has_next() for gen in self._generators)
def _sample_generator(self) -> Generator:
dist = np.array([gen.left_count() for gen in self._generators], dtype=np.float)
dist /= dist.sum()
return np.random.choice(self._generators, p=dist)
def _reset_generators(self):
for gen in self._generators:
gen.reset()
@property
def batchs_count(self) -> int:
return sum(gen.batchs_count for gen in self._generators)
def make_dataset(self) -> tf.data.Dataset:
return tf.data.Dataset() \
.from_generator(lambda: self,
output_types=(tf.int64, tf.int64, tf.int64),
output_shapes=(tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]))) | neuromorphy/train/batch_generator.py |
from collections import defaultdict
import math
from typing import Mapping, List, Union
import numpy as np
import tensorflow as tf
from neuromorphy.train.data_info import DataInfo
from neuromorphy.train.corpus_iterator import CorpusIterator
class BatchGenerator:
class Generator:
def __init__(self, generator, max_count):
self._generator = generator
self._cur_count = 0
self._max_count = max_count
def get_next(self):
assert self._cur_count < self._max_count
self._cur_count += 1
return next(self._generator)
def has_next(self):
return self._cur_count < self._max_count
def left_count(self):
return self._max_count - self._cur_count
def reset(self):
assert self._cur_count == self._max_count
self._cur_count = 0
@property
def batchs_count(self):
return self._max_count
def __init__(self, data_info: DataInfo, corpus_iterator: CorpusIterator, batch_size: int=1024):
self._data_info = data_info
self._batch_size = batch_size
buckets = self._build_buckets(corpus_iterator)
self._generators = \
[self._build_data_generator(bucket_data, sent_len) for sent_len, bucket_data in buckets.items()]
def _build_buckets(self, corpus_iterator: CorpusIterator) -> Mapping[int, List[np.ndarray]]:
buckets = defaultdict(list)
with corpus_iterator:
for sentence in corpus_iterator:
bucket_size = self._get_bucket_size(len(sentence))
data = [self._data_info.get_word_index(tok.token) for tok in sentence]
labels = [self._data_info.get_label_index(tok.grammar_value) for tok in sentence]
lemma_labels = [self._data_info.get_lemmatize_rule_index(tok.token, tok.lemma) for tok in sentence]
data = [data, labels, lemma_labels]
if bucket_size in buckets:
for i, feat_data in enumerate(data):
buckets[bucket_size][i].append(feat_data)
else:
buckets[bucket_size] = [[feat_data] for feat_data in data]
return {sent_len: [self._concatenate(data, sent_len) for data in bucket]
for sent_len, bucket in buckets.items()}
@staticmethod
def _get_bucket_size(sent_len: int) -> int:
bucket_upper_limit = 4
while bucket_upper_limit < sent_len:
bucket_upper_limit *= 2
return bucket_upper_limit
@staticmethod
def _concatenate(data: List[Union[List, np.ndarray]], sent_len: int) -> np.ndarray:
matrix_shape = (sent_len, len(data))
if isinstance(data[0], np.ndarray):
matrix_shape += data[0].shape[1:]
matrix = np.zeros(matrix_shape)
for i, sent in enumerate(data):
matrix[:len(sent), i] = sent
return matrix
def _build_data_generator(self, data: List[np.ndarray], sent_len: int) -> Generator:
indices = np.arange(data[0].shape[1])
batch_size = max(self._batch_size // sent_len, 1)
batchs_count = int(math.ceil(len(indices) / batch_size))
def _batch_generator():
while True:
np.random.shuffle(indices)
for i in range(batchs_count):
batch_begin = i * batch_size
batch_end = min((i + 1) * batch_size, len(indices))
batch_indices = indices[batch_begin: batch_end]
batch_data = data[0][:, batch_indices]
batch_labels = data[1][:, batch_indices]
batch_lemma_labels = data[2][:, batch_indices]
seq_lengths = (batch_labels == 0).argmax(axis=0)
seq_lengths[seq_lengths == 0] = batch_labels.shape[0]
max_seq_len = seq_lengths.max()
batch_data = batch_data[:max_seq_len]
batch_labels, batch_lemma_labels = batch_labels[:max_seq_len], batch_lemma_labels[:max_seq_len]
yield batch_data, batch_labels, batch_lemma_labels
return self.Generator(_batch_generator(), batchs_count)
def __iter__(self):
return self
def __next__(self):
while True:
while self._has_generators():
generator = self._sample_generator()
assert generator.has_next()
return generator.get_next()
self._reset_generators()
def _has_generators(self) -> bool:
return any(gen.has_next() for gen in self._generators)
def _sample_generator(self) -> Generator:
dist = np.array([gen.left_count() for gen in self._generators], dtype=np.float)
dist /= dist.sum()
return np.random.choice(self._generators, p=dist)
def _reset_generators(self):
for gen in self._generators:
gen.reset()
@property
def batchs_count(self) -> int:
return sum(gen.batchs_count for gen in self._generators)
def make_dataset(self) -> tf.data.Dataset:
return tf.data.Dataset() \
.from_generator(lambda: self,
output_types=(tf.int64, tf.int64, tf.int64),
output_shapes=(tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]))) | 0.882333 | 0.397588 |
from __future__ import absolute_import
import os
import re
import textwrap
from sybil import Region
from testfixtures import diff
FILEBLOCK_START = re.compile(r'^\.\.\s*topic::?\s*(.+)\b', re.MULTILINE)
FILEBLOCK_END = re.compile(r'(\n\Z|\n(?=\S))')
CLASS = re.compile(r'\s+:class:\s*(read|write)-file')
class FileBlock(object):
def __init__(self, path, content, action):
self.path, self.content, self.action = path, content, action
class FileParser(object):
"""
A `Sybil <http://sybil.readthedocs.io>`__ parser that
parses certain ReST sections to read and write files in the
configured :class:`TempDirectory`.
:param name: This is the name of the :class:`TempDirectory` to use
in the Sybil test namespace.
"""
def __init__(self, name):
self.name = name
def __call__(self, document):
for start_match, end_match, source in document.find_region_sources(
FILEBLOCK_START, FILEBLOCK_END
):
lines = source.splitlines()
class_ = CLASS.match(lines[1])
if not class_:
continue
index = 3
if lines[index].strip() == '::':
index += 1
source = textwrap.dedent('\n'.join(lines[index:])).lstrip()
if source[-1] != '\n':
source += '\n'
parsed = FileBlock(
path=start_match.group(1),
content=source,
action=class_.group(1)
)
yield Region(
start_match.start(),
end_match.end(),
parsed,
self.evaluate
)
def evaluate(self, example):
block = example.parsed
dir = example.namespace[self.name]
if block.action == 'read':
actual = dir.read(block.path, 'ascii').replace(os.linesep, '\n')
if actual != block.content:
return diff(
block.content,
actual,
'File %r, line %i:' % (example.path, example.line),
'Reading from "%s":' % dir.getpath(block.path)
)
if block.action == 'write':
dir.write(block.path, block.content, 'ascii') | testfixtures/sybil.py | from __future__ import absolute_import
import os
import re
import textwrap
from sybil import Region
from testfixtures import diff
FILEBLOCK_START = re.compile(r'^\.\.\s*topic::?\s*(.+)\b', re.MULTILINE)
FILEBLOCK_END = re.compile(r'(\n\Z|\n(?=\S))')
CLASS = re.compile(r'\s+:class:\s*(read|write)-file')
class FileBlock(object):
def __init__(self, path, content, action):
self.path, self.content, self.action = path, content, action
class FileParser(object):
"""
A `Sybil <http://sybil.readthedocs.io>`__ parser that
parses certain ReST sections to read and write files in the
configured :class:`TempDirectory`.
:param name: This is the name of the :class:`TempDirectory` to use
in the Sybil test namespace.
"""
def __init__(self, name):
self.name = name
def __call__(self, document):
for start_match, end_match, source in document.find_region_sources(
FILEBLOCK_START, FILEBLOCK_END
):
lines = source.splitlines()
class_ = CLASS.match(lines[1])
if not class_:
continue
index = 3
if lines[index].strip() == '::':
index += 1
source = textwrap.dedent('\n'.join(lines[index:])).lstrip()
if source[-1] != '\n':
source += '\n'
parsed = FileBlock(
path=start_match.group(1),
content=source,
action=class_.group(1)
)
yield Region(
start_match.start(),
end_match.end(),
parsed,
self.evaluate
)
def evaluate(self, example):
block = example.parsed
dir = example.namespace[self.name]
if block.action == 'read':
actual = dir.read(block.path, 'ascii').replace(os.linesep, '\n')
if actual != block.content:
return diff(
block.content,
actual,
'File %r, line %i:' % (example.path, example.line),
'Reading from "%s":' % dir.getpath(block.path)
)
if block.action == 'write':
dir.write(block.path, block.content, 'ascii') | 0.560493 | 0.128116 |
import sys
sys.path.append('../src/ptprobe')
import logging
import threading
import time
import argparse
import board
from sinks import InfluxDBSampleSink
import json
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
parser = argparse.ArgumentParser(description='Read PTProbe sensors over serial')
parser.add_argument('-m','--max-count', type=int, default=0,
help='Maximum number of samples to record. Default 0 (no maximum)')
parser.add_argument('-t','--timeout', type=int, default=0,
help='Collection time for sampling (s). Default is 0 (no timeout). The nominal sample rate is 5Hz.')
parser.add_argument('-p', '--port', default='/dev/ttyACM0',
help='Serial port name. Default is /dev/ttyACM0.')
parser.add_argument('filename', help='JSON file for InfluxDB Cloud config')
args = parser.parse_args()
logging.info("Starting demo")
logging.info(args)
logging.info("Loading InfluxDB config")
influxdb_cfg = None
with open(args.filename) as jf:
influxdb_cfg = json.load(jf)
logging.info(" + org: {}".format(influxdb_cfg['org']))
logging.info(" + bucket: {}".format(influxdb_cfg['bucket']))
logging.info("Creating sink")
db = InfluxDBSampleSink(
token=influxdb_cfg['token'],
org=influxdb_cfg['org'],
bucket=influxdb_cfg['bucket'])
pt = board.Controller(port=args.port, sinks=[db])
board_id = pt.board_id()
logging.info("Board ID: {}".format(board_id))
db.set_board_id(board_id)
db.open()
logging.info("Main: creating thread")
x = threading.Thread(target=pt.collect_samples, args=(args.max_count,))
logging.info("Main: starting thread")
x.start()
t = threading.Timer(args.timeout, pt.stop_collection)
if args.timeout > 0:
t.start()
heartbeat = 0
while x.is_alive():
heartbeat += 1
logging.info("..{}".format(heartbeat))
time.sleep(1.)
# here either the timer expired and called halt or we processed
# max_steps messages and exited
logging.info("Main: cancel timer")
t.cancel()
logging.info("Main: calling join")
x.join()
logging.info("Main: closing sink")
db.close()
logging.info("Main: done") | client/examples/read_to_influxdb.py | import sys
sys.path.append('../src/ptprobe')
import logging
import threading
import time
import argparse
import board
from sinks import InfluxDBSampleSink
import json
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
parser = argparse.ArgumentParser(description='Read PTProbe sensors over serial')
parser.add_argument('-m','--max-count', type=int, default=0,
help='Maximum number of samples to record. Default 0 (no maximum)')
parser.add_argument('-t','--timeout', type=int, default=0,
help='Collection time for sampling (s). Default is 0 (no timeout). The nominal sample rate is 5Hz.')
parser.add_argument('-p', '--port', default='/dev/ttyACM0',
help='Serial port name. Default is /dev/ttyACM0.')
parser.add_argument('filename', help='JSON file for InfluxDB Cloud config')
args = parser.parse_args()
logging.info("Starting demo")
logging.info(args)
logging.info("Loading InfluxDB config")
influxdb_cfg = None
with open(args.filename) as jf:
influxdb_cfg = json.load(jf)
logging.info(" + org: {}".format(influxdb_cfg['org']))
logging.info(" + bucket: {}".format(influxdb_cfg['bucket']))
logging.info("Creating sink")
db = InfluxDBSampleSink(
token=influxdb_cfg['token'],
org=influxdb_cfg['org'],
bucket=influxdb_cfg['bucket'])
pt = board.Controller(port=args.port, sinks=[db])
board_id = pt.board_id()
logging.info("Board ID: {}".format(board_id))
db.set_board_id(board_id)
db.open()
logging.info("Main: creating thread")
x = threading.Thread(target=pt.collect_samples, args=(args.max_count,))
logging.info("Main: starting thread")
x.start()
t = threading.Timer(args.timeout, pt.stop_collection)
if args.timeout > 0:
t.start()
heartbeat = 0
while x.is_alive():
heartbeat += 1
logging.info("..{}".format(heartbeat))
time.sleep(1.)
# here either the timer expired and called halt or we processed
# max_steps messages and exited
logging.info("Main: cancel timer")
t.cancel()
logging.info("Main: calling join")
x.join()
logging.info("Main: closing sink")
db.close()
logging.info("Main: done") | 0.263031 | 0.061509 |
from htm.bindings.algorithms import TemporalMemory
from htm.bindings.sdr import SDR
from htm.encoders.rdse import RDSE, RDSE_Parameters
import argparse
import itertools
import numpy as np
import random
import time
default_parameters = {
'num_cells': 2000,
'local_sparsity': .02,
'apical_denrites': {
'activationThreshold': 20,
'minThreshold': 14,
'maxNewSynapseCount': 32,
'connectedPermanence': 0.5,
'initialPermanence': 0.21,
'permanenceIncrement': 0.05,
'permanenceDecrement': 0.025,
'predictedSegmentDecrement': 0.002,
'maxSynapsesPerSegment': 40,
'maxSegmentsPerCell': 40,
},
}
conscious_threshold = 20/100
BACKGROUND = " ."
class World:
def __init__(self, dims, objects):
# Initialize game board.
self.dims = tuple(dims)
self.coordinates = list(itertools.product(*(range(x) for x in self.dims)))
# Put the objects at random locations.
locations = random.sample(self.coordinates, len(objects))
self.objects = {p: locations.pop() for p in objects}
def state(self):
""" Returns 2D grid of the objects and the BACKGROUND """
data = np.full(self.dims, BACKGROUND, dtype=object)
for character, location in self.objects.items():
data[location] = character
return data
def draw(self):
data = self.state()
string = ""
for row in range(self.dims[0]):
for col in range(self.dims[1]):
string += data[row, col]
string += "\n"
return string[:-1]
def draw_colored(self, colors):
data = self.state()
string = ""
for row in range(self.dims[0]):
for col in range(self.dims[1]):
character = data[row, col]
color = colors[row, col]
if not bool(color):
string += character
elif color == "red":
string += '\033[1m\033[41m' + character + '\033[0m'
string += "\n"
return string[:-1]
def advance(self):
# Make the players walk in random directions.
for character, (row, col) in self.objects.items():
destinations = []
for (offset_row, offset_col) in [
[-1, -1],
[-1, 0],
[-1, +1],
[ 0, -1],
[ 0, 0],
[ 0, +1],
[+1, -1],
[+1, 0],
[+1, +1],
]:
next_row = row + offset_row
next_col = col + offset_col
if next_row not in range(self.dims[0]): continue
if next_col not in range(self.dims[1]): continue
if self.state()[next_row, next_col] not in (character, BACKGROUND): continue
destinations.append((next_row, next_col))
self.objects[character] = random.choice(destinations)
class Model:
def __init__(self, world, parameters=default_parameters):
self.world = world
self.area_size = parameters['num_cells']
self.num_areas = len(self.world.coordinates)
# Make an RDSE for every location.
self.enc = np.zeros(self.world.dims, dtype=object)
enc_parameters = RDSE_Parameters()
enc_parameters.size = self.area_size
enc_parameters.sparsity = parameters['local_sparsity']
enc_parameters.category = True
for coords in self.world.coordinates:
self.enc[coords] = RDSE(enc_parameters)
# Make empty buffers for the working data.
self.local = np.zeros(self.world.dims, dtype=object)
self.gnw = np.zeros(self.world.dims, dtype=object)
for coords in self.world.coordinates:
self.local[coords] = SDR((self.area_size,))
self.gnw[coords] = SDR((self.area_size,))
# Make an instance of the model at every location.
self.apical_denrites = np.zeros(self.world.dims, dtype=object)
self.gnw_size = self.num_areas * self.area_size
for coords in self.world.coordinates:
self.apical_denrites[coords] = TemporalMemory(
[self.area_size], # column_dimensions
cellsPerColumn = 1,
externalPredictiveInputs = self.gnw_size,
seed = 0,
**parameters['apical_denrites'])
def reset_attention(self):
for coords in self.world.coordinates:
self.gnw[coords] = SDR((self.area_size,))
def advance(self, learn=True):
self.world.advance()
world_data = self.world.state()
# Compute the local activity by encoding the sensory data into an SDR.
self.local = np.zeros(self.world.dims, dtype=object)
for idx, coords in enumerate(self.world.coordinates):
character = world_data[coords]
enc = self.enc[coords]
if character == BACKGROUND:
self.local[coords] = SDR((self.area_size,))
else:
self.local[coords] = enc.encode(ord(character))
# Compute the apical dendrites.
prev_gnw = SDR((self.gnw_size,)).concatenate(list(self.gnw.flat))
self.gnw = np.zeros(self.world.dims, dtype=object)
for coords in self.world.coordinates:
self.apical_denrites[coords].reset()
self.apical_denrites[coords].activateDendrites(True, prev_gnw, prev_gnw)
apical_activity = self.apical_denrites[coords].getPredictiveCells().reshape((self.area_size,))
self.gnw[coords] = SDR((self.area_size,)).intersection(self.local[coords], apical_activity)
self.apical_denrites[coords].activateCells(self.local[coords], True)
def attention_map(self):
percent = np.empty(self.world.dims)
for coords in self.world.coordinates:
gnw_sparsity = self.gnw[coords].getSparsity()
local_sparsity = self.local[coords].getSparsity()
if local_sparsity == 0:
percent[coords] = 0.;
else:
percent[coords] = gnw_sparsity / local_sparsity
return percent
def draw_heatmap(self):
attention = np.zeros(self.world.dims, dtype=object)
attention[self.attention_map() >= conscious_threshold] = "red"
return self.world.draw_colored(attention)
def promote_region(self, coordinates=None, verbose=False):
if coordinates is None:
coordinates = random.choice(self.world.coordinates)
self.gnw[coordinates] = self.local[coordinates]
if verbose: print("Promote (%d, %d) to attention."%coordinates)
def promote_object(self, character=None, verbose=False):
if character is None:
character, location = random.choice(list(self.world.objects.items()))
else:
location = self.world.objects[character]
self.promote_region(location)
if verbose: print("Promote %s to attention."%character)
return character
def run(self, iterations, train_not_test, character=None, verbose=True):
attention_spans = [];
self.reset_attention()
for t in range(iterations):
message = ""
if not any(x >= conscious_threshold for x in self.attention_map().flat):
current_episode_length = 0
obj = self.promote_object(character)
# self.promote_region(verbose=verbose)
message += "Promoted %s to \nconscious attention.\n"%obj
self.advance()
if verbose:
heatmap = self.draw_heatmap()
attn = self.attention_map()
if train_not_test:
if any(x >= conscious_threshold for x in self.attention_map().flat):
current_episode_length += 1
else:
attention_spans.append(current_episode_length)
current_episode_length = None
else:
if attn[self.world.objects[obj]] >= conscious_threshold and (
sum(attn.flat >= conscious_threshold) == 1):
current_episode_length += 1
else:
message += "Attention failed.\n"
attention_spans.append(current_episode_length)
current_episode_length = None
self.reset_attention()
if verbose:
if not train_not_test: print("\033c", end='')
print("=="*self.world.dims[1])
print(heatmap)
print("=="*self.world.dims[1])
while message.count("\n") < 3: message += "\n"
print(message, end='')
print("=="*self.world.dims[1])
print("\n")
if train_not_test: pass
elif message.strip(): time.sleep(1)
else: time.sleep(1/7)
if current_episode_length is not None: attention_spans.append(current_episode_length)
avg_attention_span = np.mean(attention_spans)
return avg_attention_span
def main(parameters, argv=None, verbose=False):
parser = argparse.ArgumentParser()
args = parser.parse_args(argv)
w = World([6,10], "🐱🐶🏀🦍")
m = Model(w)
session_length = 20
num_actions = w.dims[0]*w.dims[1]*len(w.objects) * 10
for epoch in range(num_actions):
m.run(session_length, train_not_test=True, verbose=False)
if verbose and epoch % 2 == 0:
print("\033cTraining %d%%"%int(100 * epoch / num_actions))
if verbose: m.run(100, train_not_test=False, verbose=verbose)
score = m.run(2000, train_not_test=False, verbose=False)
if verbose: print(str(m.apical_denrites[0,0].connections))
if verbose: print("Average attention span:", score)
return score
if __name__ == "__main__": main(default_parameters, verbose=True) | main.py | from htm.bindings.algorithms import TemporalMemory
from htm.bindings.sdr import SDR
from htm.encoders.rdse import RDSE, RDSE_Parameters
import argparse
import itertools
import numpy as np
import random
import time
default_parameters = {
'num_cells': 2000,
'local_sparsity': .02,
'apical_denrites': {
'activationThreshold': 20,
'minThreshold': 14,
'maxNewSynapseCount': 32,
'connectedPermanence': 0.5,
'initialPermanence': 0.21,
'permanenceIncrement': 0.05,
'permanenceDecrement': 0.025,
'predictedSegmentDecrement': 0.002,
'maxSynapsesPerSegment': 40,
'maxSegmentsPerCell': 40,
},
}
conscious_threshold = 20/100
BACKGROUND = " ."
class World:
def __init__(self, dims, objects):
# Initialize game board.
self.dims = tuple(dims)
self.coordinates = list(itertools.product(*(range(x) for x in self.dims)))
# Put the objects at random locations.
locations = random.sample(self.coordinates, len(objects))
self.objects = {p: locations.pop() for p in objects}
def state(self):
""" Returns 2D grid of the objects and the BACKGROUND """
data = np.full(self.dims, BACKGROUND, dtype=object)
for character, location in self.objects.items():
data[location] = character
return data
def draw(self):
data = self.state()
string = ""
for row in range(self.dims[0]):
for col in range(self.dims[1]):
string += data[row, col]
string += "\n"
return string[:-1]
def draw_colored(self, colors):
data = self.state()
string = ""
for row in range(self.dims[0]):
for col in range(self.dims[1]):
character = data[row, col]
color = colors[row, col]
if not bool(color):
string += character
elif color == "red":
string += '\033[1m\033[41m' + character + '\033[0m'
string += "\n"
return string[:-1]
def advance(self):
# Make the players walk in random directions.
for character, (row, col) in self.objects.items():
destinations = []
for (offset_row, offset_col) in [
[-1, -1],
[-1, 0],
[-1, +1],
[ 0, -1],
[ 0, 0],
[ 0, +1],
[+1, -1],
[+1, 0],
[+1, +1],
]:
next_row = row + offset_row
next_col = col + offset_col
if next_row not in range(self.dims[0]): continue
if next_col not in range(self.dims[1]): continue
if self.state()[next_row, next_col] not in (character, BACKGROUND): continue
destinations.append((next_row, next_col))
self.objects[character] = random.choice(destinations)
class Model:
def __init__(self, world, parameters=default_parameters):
self.world = world
self.area_size = parameters['num_cells']
self.num_areas = len(self.world.coordinates)
# Make an RDSE for every location.
self.enc = np.zeros(self.world.dims, dtype=object)
enc_parameters = RDSE_Parameters()
enc_parameters.size = self.area_size
enc_parameters.sparsity = parameters['local_sparsity']
enc_parameters.category = True
for coords in self.world.coordinates:
self.enc[coords] = RDSE(enc_parameters)
# Make empty buffers for the working data.
self.local = np.zeros(self.world.dims, dtype=object)
self.gnw = np.zeros(self.world.dims, dtype=object)
for coords in self.world.coordinates:
self.local[coords] = SDR((self.area_size,))
self.gnw[coords] = SDR((self.area_size,))
# Make an instance of the model at every location.
self.apical_denrites = np.zeros(self.world.dims, dtype=object)
self.gnw_size = self.num_areas * self.area_size
for coords in self.world.coordinates:
self.apical_denrites[coords] = TemporalMemory(
[self.area_size], # column_dimensions
cellsPerColumn = 1,
externalPredictiveInputs = self.gnw_size,
seed = 0,
**parameters['apical_denrites'])
def reset_attention(self):
for coords in self.world.coordinates:
self.gnw[coords] = SDR((self.area_size,))
def advance(self, learn=True):
self.world.advance()
world_data = self.world.state()
# Compute the local activity by encoding the sensory data into an SDR.
self.local = np.zeros(self.world.dims, dtype=object)
for idx, coords in enumerate(self.world.coordinates):
character = world_data[coords]
enc = self.enc[coords]
if character == BACKGROUND:
self.local[coords] = SDR((self.area_size,))
else:
self.local[coords] = enc.encode(ord(character))
# Compute the apical dendrites.
prev_gnw = SDR((self.gnw_size,)).concatenate(list(self.gnw.flat))
self.gnw = np.zeros(self.world.dims, dtype=object)
for coords in self.world.coordinates:
self.apical_denrites[coords].reset()
self.apical_denrites[coords].activateDendrites(True, prev_gnw, prev_gnw)
apical_activity = self.apical_denrites[coords].getPredictiveCells().reshape((self.area_size,))
self.gnw[coords] = SDR((self.area_size,)).intersection(self.local[coords], apical_activity)
self.apical_denrites[coords].activateCells(self.local[coords], True)
def attention_map(self):
percent = np.empty(self.world.dims)
for coords in self.world.coordinates:
gnw_sparsity = self.gnw[coords].getSparsity()
local_sparsity = self.local[coords].getSparsity()
if local_sparsity == 0:
percent[coords] = 0.;
else:
percent[coords] = gnw_sparsity / local_sparsity
return percent
def draw_heatmap(self):
attention = np.zeros(self.world.dims, dtype=object)
attention[self.attention_map() >= conscious_threshold] = "red"
return self.world.draw_colored(attention)
def promote_region(self, coordinates=None, verbose=False):
if coordinates is None:
coordinates = random.choice(self.world.coordinates)
self.gnw[coordinates] = self.local[coordinates]
if verbose: print("Promote (%d, %d) to attention."%coordinates)
def promote_object(self, character=None, verbose=False):
if character is None:
character, location = random.choice(list(self.world.objects.items()))
else:
location = self.world.objects[character]
self.promote_region(location)
if verbose: print("Promote %s to attention."%character)
return character
def run(self, iterations, train_not_test, character=None, verbose=True):
attention_spans = [];
self.reset_attention()
for t in range(iterations):
message = ""
if not any(x >= conscious_threshold for x in self.attention_map().flat):
current_episode_length = 0
obj = self.promote_object(character)
# self.promote_region(verbose=verbose)
message += "Promoted %s to \nconscious attention.\n"%obj
self.advance()
if verbose:
heatmap = self.draw_heatmap()
attn = self.attention_map()
if train_not_test:
if any(x >= conscious_threshold for x in self.attention_map().flat):
current_episode_length += 1
else:
attention_spans.append(current_episode_length)
current_episode_length = None
else:
if attn[self.world.objects[obj]] >= conscious_threshold and (
sum(attn.flat >= conscious_threshold) == 1):
current_episode_length += 1
else:
message += "Attention failed.\n"
attention_spans.append(current_episode_length)
current_episode_length = None
self.reset_attention()
if verbose:
if not train_not_test: print("\033c", end='')
print("=="*self.world.dims[1])
print(heatmap)
print("=="*self.world.dims[1])
while message.count("\n") < 3: message += "\n"
print(message, end='')
print("=="*self.world.dims[1])
print("\n")
if train_not_test: pass
elif message.strip(): time.sleep(1)
else: time.sleep(1/7)
if current_episode_length is not None: attention_spans.append(current_episode_length)
avg_attention_span = np.mean(attention_spans)
return avg_attention_span
def main(parameters, argv=None, verbose=False):
parser = argparse.ArgumentParser()
args = parser.parse_args(argv)
w = World([6,10], "🐱🐶🏀🦍")
m = Model(w)
session_length = 20
num_actions = w.dims[0]*w.dims[1]*len(w.objects) * 10
for epoch in range(num_actions):
m.run(session_length, train_not_test=True, verbose=False)
if verbose and epoch % 2 == 0:
print("\033cTraining %d%%"%int(100 * epoch / num_actions))
if verbose: m.run(100, train_not_test=False, verbose=verbose)
score = m.run(2000, train_not_test=False, verbose=False)
if verbose: print(str(m.apical_denrites[0,0].connections))
if verbose: print("Average attention span:", score)
return score
if __name__ == "__main__": main(default_parameters, verbose=True) | 0.720467 | 0.266441 |
def lex_sexp(sexp_string: str):
"""
Tokenizes an s-expression string not containing comments.
Example: lex_sexp transforms the string " \n \t (a ( bc d) 1)" into the sequence
("(", "a", "(", "bc", "d", ")", "1", ")").
:param sexp_string: An s-expression string that does not contain comments.
:return: An iterator iterating over the tokens contained in sexp_string.
"""
cursor = 0
while cursor != len(sexp_string):
if sexp_string[cursor].isspace():
cursor += 1
continue
if sexp_string[cursor] == '(':
yield "("
cursor += 1
elif sexp_string[cursor] == ')':
yield ")"
cursor += 1
else:
cursor_ahead = cursor+1
while cursor_ahead != len(sexp_string) \
and (not sexp_string[cursor_ahead].isspace()) \
and sexp_string[cursor_ahead] != '(' \
and sexp_string[cursor_ahead] != ')':
cursor_ahead += 1
yield sexp_string[cursor:cursor_ahead]
cursor = cursor_ahead
def parse_sexp(sexp_iter):
"""
Transforms a sequence of s-expression tokens (given in string form) into a corresponding tree of strings.
The given sequence is interpreted as the elements of an enclosing list expression, i.e.
with a prepended "(" token and an appended ")" token.
Example: parse_sexp transforms the sequence ("(", "a", "(", "bc", "d", ")", "1", ")", "5")
into the structure [["a" ["bc" "d"] "1"], "5"].
:param sexp_iter: An iterator iterating over the tokens contained in in a s-expression.
:return: The tree-structure representation of sexp_iter, with s-expression lists being represented
using Python lists.
:raises ValueError when sexp_string is malformed.
"""
results = []
def __recursively_parse_sexp():
result = []
found_expression_end = False
for token_ in sexp_iter:
if token_ == ")":
found_expression_end = True
break
elif token_ == "(":
result.append(__recursively_parse_sexp())
else:
result.append(token_)
if not found_expression_end:
raise ValueError("Unterminated symbolic expression")
return result
for token in sexp_iter:
if token == "(":
results.append(__recursively_parse_sexp())
else:
results.append(token)
return results | cscl_examples/smt_qfbv_solver/sexp_parser.py | def lex_sexp(sexp_string: str):
"""
Tokenizes an s-expression string not containing comments.
Example: lex_sexp transforms the string " \n \t (a ( bc d) 1)" into the sequence
("(", "a", "(", "bc", "d", ")", "1", ")").
:param sexp_string: An s-expression string that does not contain comments.
:return: An iterator iterating over the tokens contained in sexp_string.
"""
cursor = 0
while cursor != len(sexp_string):
if sexp_string[cursor].isspace():
cursor += 1
continue
if sexp_string[cursor] == '(':
yield "("
cursor += 1
elif sexp_string[cursor] == ')':
yield ")"
cursor += 1
else:
cursor_ahead = cursor+1
while cursor_ahead != len(sexp_string) \
and (not sexp_string[cursor_ahead].isspace()) \
and sexp_string[cursor_ahead] != '(' \
and sexp_string[cursor_ahead] != ')':
cursor_ahead += 1
yield sexp_string[cursor:cursor_ahead]
cursor = cursor_ahead
def parse_sexp(sexp_iter):
"""
Transforms a sequence of s-expression tokens (given in string form) into a corresponding tree of strings.
The given sequence is interpreted as the elements of an enclosing list expression, i.e.
with a prepended "(" token and an appended ")" token.
Example: parse_sexp transforms the sequence ("(", "a", "(", "bc", "d", ")", "1", ")", "5")
into the structure [["a" ["bc" "d"] "1"], "5"].
:param sexp_iter: An iterator iterating over the tokens contained in in a s-expression.
:return: The tree-structure representation of sexp_iter, with s-expression lists being represented
using Python lists.
:raises ValueError when sexp_string is malformed.
"""
results = []
def __recursively_parse_sexp():
result = []
found_expression_end = False
for token_ in sexp_iter:
if token_ == ")":
found_expression_end = True
break
elif token_ == "(":
result.append(__recursively_parse_sexp())
else:
result.append(token_)
if not found_expression_end:
raise ValueError("Unterminated symbolic expression")
return result
for token in sexp_iter:
if token == "(":
results.append(__recursively_parse_sexp())
else:
results.append(token)
return results | 0.875255 | 0.613526 |
import sys
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import scipy
import imageio
import cv2
import pathlib
import skimage
import PIL
from PIL import Image
from interactive_crop import interactive_crop
def crop_datastack(exp_folder, alignment_channel):
# Assert alignment_channel is correct
assert alignment_channel in [561, 488]
path = exp_folder + "\\rigid_align\\"
storm_path = path + "\\storm_merged\\"
conv_path = path + "\\conv_merged\\"
conv_path_ds = path + "\\conv_merged_ds\\"
align_path = path + "\\for_align\\"
wga_path = path + "\\conv_{}\\".format(str(alignment_channel))
tif_ext = r"*.tif"
png_ext = r"*.png"
wga_files = glob.glob(wga_path + tif_ext) + glob.glob(wga_path + png_ext)
storm_files = glob.glob(storm_path + tif_ext) + glob.glob(storm_path + png_ext)
conv_files = glob.glob(conv_path + tif_ext) + glob.glob(conv_path + png_ext)
conv_files_ds = glob.glob(conv_path_ds + tif_ext) + glob.glob(conv_path_ds + png_ext)
align_files = glob.glob(align_path + tif_ext) + glob.glob(align_path + png_ext)
num_images = len(storm_files)
C1 = []
print("normalizing conv-1")
# normalize intensity of conv_merged images
for k in range(num_images):
A = imageio.imread(conv_files_ds[k])
C1.append(A)
C1 = np.stack(C1, -1) # h x w x 3 x n_images
C2 = C1.max(axis=3)
# determine angle of rotation
ang = -8
C3 = skimage.transform.rotate(C2,ang)
# interactive crop
crop_reg = interactive_crop(C3)
x_start, y_start, x_end, y_end = crop_reg
C4 = C3[x_start:x_end, y_start:y_end, :]
print('Cropping Coordinates: {}, Angle: {}'.format(crop_reg, ang))
# Change here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# crop_storm = np.ceil(crop_reg*10)
crop_storm = [0,0,0,0]
crop_storm[0] = int(np.ceil(crop_reg[0]*10))
crop_storm[1] = int(np.ceil(crop_reg[1]*10))
crop_storm[2] = int(np.ceil(crop_reg[2]*10))
crop_storm[3] = int(np.ceil(crop_reg[3]*10))
#crop_storm_x_range = list(range(crop_storm[0], crop_storm[2]))
#crop_storm_y_range = list(range(crop_storm[1], crop_storm[3]))
crop_storm_x_range = range(crop_storm[1], crop_storm[3])
crop_storm_y_range = range(crop_storm[0], crop_storm[2])
#crop_reg = np.ceil(crop_reg)
print('Making directories!')
if not os.path.exists(exp_folder + "\\cropped\\"):
os.mkdir(exp_folder + "\\cropped\\")
os.mkdir(exp_folder + "\\cropped\\storm_merged\\")
os.mkdir(exp_folder + "\\cropped\\storm_merged_ds\\")
os.mkdir(exp_folder + "\\cropped\\conv_merged\\")
os.mkdir(exp_folder + "\\cropped\\conv_merged_ds\\")
os.mkdir(exp_folder + "\\cropped\\conv_{}\\".format(alignment_channel))
os.mkdir(exp_folder + "\\cropped\\conv_{}_ds\\".format(alignment_channel))
os.mkdir(exp_folder + "\\cropped\\for_align\\")
os.mkdir(exp_folder + "\\cropped\\for_align_ds\\")
print('Applying angle and crop to full size images and then saving out!')
# Change from i to k here !!!!!!!!!!!!!!!!!!!!!!!!!!!
# Delete 'storm_path+'!!!!!!!!!!!!!!!!!!!!!
# cropping was incorrect.
for k in range(num_images):
print(k)
A1 = cv2.imread(storm_files[k])
A2 = skimage.transform.rotate(A1,ang)
#A3 = (A2[crop_storm_x_range, crop_storm_y_range]).astype(np.float32)
A3_1 = (A2[crop_storm_x_range]).astype(np.float32)
A3 = (A3_1[:,crop_storm_y_range]).astype(np.float32)
R_small = skimage.transform.rescale(A3[:,:,0] , 0.1)
A3_small = np.zeros([R_small.shape[0],R_small.shape[1],3])
A3_small[:,:,0] = R_small
A3_small[:,:,1] = skimage.transform.rescale(A3[:,:,1], 0.1)
A3_small[:,:,2] = skimage.transform.rescale(A3[:,:,2], 0.1)
A3 *= 255
A3 = A3.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/storm_merged/"+str(k).zfill(3) +'.tif', A3)
A3_small *= 255
A3_small = A3_small.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/storm_merged_ds/"+str(k).zfill(3) +'.tif', A3_small)
A1c = cv2.imread(conv_files[k])
A2c = skimage.transform.rotate(A1c,ang)
A3_1c = (A2c[crop_storm_x_range]).astype(np.float32)
A3c = (A3_1c[:,crop_storm_y_range]).astype(np.float32)
Rc_small = skimage.transform.rescale(A3c[:,:,0] , 0.1)
A3c_small = np.zeros([Rc_small.shape[0],Rc_small.shape[1],3])
A3c_small[:,:,0] = Rc_small
A3c_small[:,:,1] = skimage.transform.rescale(A3c[:,:,1], 0.1)
A3c_small[:,:,2] = skimage.transform.rescale(A3c[:,:,2], 0.1)
A3c *= 255
A3c = A3c.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/conv_merged/"+str(k).zfill(3) +'.tif', A3c)
A3c_small *= 255
A3c_small = A3c_small.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/conv_merged_ds/"+str(k).zfill(3) +'.tif', A3c_small)
A1w1 = cv2.imread(align_files[k])
A2w1 = skimage.transform.rotate(A1w1, ang)
A3_1w1 = (A2w1[crop_storm_x_range]).astype(np.float32)
A3w1 = (A3_1w1[:,crop_storm_y_range]).astype(np.float32)
Rw1_small = skimage.transform.rescale(A3w1[:,:,0] , 0.1)
A3w1_small = np.zeros([Rw1_small.shape[0],Rw1_small.shape[1],3])
A3w1_small[:,:,0] = Rw1_small
A3w1_small[:,:,1] = skimage.transform.rescale(A3w1[:,:,1], 0.1)
A3w1_small[:,:,2] = skimage.transform.rescale(A3w1[:,:,2], 0.1)
A3w1 *= 255
A3w1 = A3w1.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/for_align/"+str(k).zfill(3) +'.tif', A3w1)
A3w1_small *= 255
A3w1_small = A3w1_small.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/for_align_ds/"+str(k).zfill(3) +'.tif', A3w1_small)
A1w = cv2.imread(wga_files[k])
A2w = skimage.transform.rotate(A1w,ang)
A3_1w = (A2w[crop_storm_x_range]).astype(np.float32)
A3w = (A3_1w[:,crop_storm_y_range]).astype(np.float32)
Rw_small = skimage.transform.rescale(A3w[:,:,0] , 0.1)
A3w_small = np.zeros([Rw_small.shape[0],Rw_small.shape[1],3])
A3w_small[:,:,0] = Rw_small
A3w_small[:,:,1] = skimage.transform.rescale(A3w[:,:,1], 0.1)
A3w_small[:,:,2] = skimage.transform.rescale(A3w[:,:,2], 0.1)
A3w *= 255
A3w = A3w.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/conv_{}/".format(alignment_channel)+str(k).zfill(3) +'.tif', A3w)
A3w_small *= 255
A3w_small = A3w_small.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/conv_{}_ds/".format(alignment_channel)+str(k).zfill(3) +'.tif', A3w_small)
## # Load in new ds images and check max project
##
## path = exp_folder + "\\cropped\\"
## storm_path = exp_folder + "\\cropped\\storm_merged_ds\\"
## conv_path = exp_folder + "\\cropped\\conv_merged_ds\\"
## conv_path_ds = exp_folder + "\\cropped\\conv_{}_ds\\".format(alignment_channel)
## wga_path = exp_folder + "\\cropped\\for_align_ds\\"
##
## wga_files = glob.glob(wga_path + tif_ext) + glob.glob(wga_path + png_ext)
## storm_files = glob.glob(storm_path + tif_ext) + glob.glob(storm_path + png_ext)
## conv_files = glob.glob(conv_path + tif_ext) + glob.glob(conv_path + png_ext)
## conv_files_ds = glob.glob(conv_path_ds + tif_ext) + glob.glob(conv_path_ds + png_ext)
##
## num_images = len(storm_files)
##
## C1 = []
##
## print("normalizing conv-1 2.0")
##
## # normalize intensity of conv_merged images
## for k in range(num_images):
## A = imageio.imread(conv_files_ds[k])
## C1.append(A)
## C1 = np.stack(C1, -1) # h x w x 3 x n_images
##
## cv2.imwrite(exp_folder + '/cropped/conv_xy_projection.tif',
## C1.max(axis=3)) # h x w x 3
## cv2.imwrite(exp_folder + '/cropped/conv_xz_projection.tif',
## C1.max(axis=1).squeeze().transpose(0,2,1)) # h x 3 x n_images -> h x n_images x 3
## cv2.imwrite(exp_folder + '/cropped/conv_yz_projection.tif',
## C1.max(axis=0).squeeze().transpose(0,2,1)) # w x 3 x n_images -> w x n_images x 3
##
##
## print("normalizing storm-1 2.0")
##
## # normalize intensity of storm_merged images
## for k in range(num_images):
## A = imageio.imread(storm_files[k])
## C1.append(A)
## C1 = np.stack(C1, -1) # h x w x 3 x n_images
##
## cv2.imwrite(exp_folder + '/cropped/storm_xy_projection.tif',
## C1.max(axis=3)) # h x w x 3
## cv2.imwrite(exp_folder + '/cropped/storm_xz_projection.tif',
## C1.max(axis=1).squeeze().transpose(0,2,1)) # h x 3 x n_images -> h x n_images x 3
## cv2.imwrite(exp_folder + '/cropped/storm_yz_projection.tif',
## C1.max(axis=0).squeeze().transpose(0,2,1)) # w x 3 x n_images -> w x n_images x 3
##
## print("normalizing wga-1 2.0")
## # normalize intensity of aligned images
## for k in range(num_images):
## A = imageio.imread(wga_files[k])
## C1.append(A)
## C1 = np.stack(C1, -1) # h x w x 3 x n_images
##
## cv2.imwrite(exp_folder + '/cropped/wga_xy_projection.tif',
## C1.max(axis=3)) # h x w x 3
## cv2.imwrite(exp_folder + '/cropped/wga_xz_projection.tif',
## C1.max(axis=1).squeeze().transpose(0,2,1)) # h x 3 x n_images -> h x n_images x 3
## cv2.imwrite(exp_folder + '/cropped/wga_yz_projection.tif',
## C1.max(axis=0).squeeze().transpose(0,2,1)) # w x 3 x n_images -> w x n_images x 3
print('Done with cropping!')
return True | analysis_scripts/crop_datastack.py | import sys
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import scipy
import imageio
import cv2
import pathlib
import skimage
import PIL
from PIL import Image
from interactive_crop import interactive_crop
def crop_datastack(exp_folder, alignment_channel):
# Assert alignment_channel is correct
assert alignment_channel in [561, 488]
path = exp_folder + "\\rigid_align\\"
storm_path = path + "\\storm_merged\\"
conv_path = path + "\\conv_merged\\"
conv_path_ds = path + "\\conv_merged_ds\\"
align_path = path + "\\for_align\\"
wga_path = path + "\\conv_{}\\".format(str(alignment_channel))
tif_ext = r"*.tif"
png_ext = r"*.png"
wga_files = glob.glob(wga_path + tif_ext) + glob.glob(wga_path + png_ext)
storm_files = glob.glob(storm_path + tif_ext) + glob.glob(storm_path + png_ext)
conv_files = glob.glob(conv_path + tif_ext) + glob.glob(conv_path + png_ext)
conv_files_ds = glob.glob(conv_path_ds + tif_ext) + glob.glob(conv_path_ds + png_ext)
align_files = glob.glob(align_path + tif_ext) + glob.glob(align_path + png_ext)
num_images = len(storm_files)
C1 = []
print("normalizing conv-1")
# normalize intensity of conv_merged images
for k in range(num_images):
A = imageio.imread(conv_files_ds[k])
C1.append(A)
C1 = np.stack(C1, -1) # h x w x 3 x n_images
C2 = C1.max(axis=3)
# determine angle of rotation
ang = -8
C3 = skimage.transform.rotate(C2,ang)
# interactive crop
crop_reg = interactive_crop(C3)
x_start, y_start, x_end, y_end = crop_reg
C4 = C3[x_start:x_end, y_start:y_end, :]
print('Cropping Coordinates: {}, Angle: {}'.format(crop_reg, ang))
# Change here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# crop_storm = np.ceil(crop_reg*10)
crop_storm = [0,0,0,0]
crop_storm[0] = int(np.ceil(crop_reg[0]*10))
crop_storm[1] = int(np.ceil(crop_reg[1]*10))
crop_storm[2] = int(np.ceil(crop_reg[2]*10))
crop_storm[3] = int(np.ceil(crop_reg[3]*10))
#crop_storm_x_range = list(range(crop_storm[0], crop_storm[2]))
#crop_storm_y_range = list(range(crop_storm[1], crop_storm[3]))
crop_storm_x_range = range(crop_storm[1], crop_storm[3])
crop_storm_y_range = range(crop_storm[0], crop_storm[2])
#crop_reg = np.ceil(crop_reg)
print('Making directories!')
if not os.path.exists(exp_folder + "\\cropped\\"):
os.mkdir(exp_folder + "\\cropped\\")
os.mkdir(exp_folder + "\\cropped\\storm_merged\\")
os.mkdir(exp_folder + "\\cropped\\storm_merged_ds\\")
os.mkdir(exp_folder + "\\cropped\\conv_merged\\")
os.mkdir(exp_folder + "\\cropped\\conv_merged_ds\\")
os.mkdir(exp_folder + "\\cropped\\conv_{}\\".format(alignment_channel))
os.mkdir(exp_folder + "\\cropped\\conv_{}_ds\\".format(alignment_channel))
os.mkdir(exp_folder + "\\cropped\\for_align\\")
os.mkdir(exp_folder + "\\cropped\\for_align_ds\\")
print('Applying angle and crop to full size images and then saving out!')
# Change from i to k here !!!!!!!!!!!!!!!!!!!!!!!!!!!
# Delete 'storm_path+'!!!!!!!!!!!!!!!!!!!!!
# cropping was incorrect.
for k in range(num_images):
print(k)
A1 = cv2.imread(storm_files[k])
A2 = skimage.transform.rotate(A1,ang)
#A3 = (A2[crop_storm_x_range, crop_storm_y_range]).astype(np.float32)
A3_1 = (A2[crop_storm_x_range]).astype(np.float32)
A3 = (A3_1[:,crop_storm_y_range]).astype(np.float32)
R_small = skimage.transform.rescale(A3[:,:,0] , 0.1)
A3_small = np.zeros([R_small.shape[0],R_small.shape[1],3])
A3_small[:,:,0] = R_small
A3_small[:,:,1] = skimage.transform.rescale(A3[:,:,1], 0.1)
A3_small[:,:,2] = skimage.transform.rescale(A3[:,:,2], 0.1)
A3 *= 255
A3 = A3.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/storm_merged/"+str(k).zfill(3) +'.tif', A3)
A3_small *= 255
A3_small = A3_small.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/storm_merged_ds/"+str(k).zfill(3) +'.tif', A3_small)
A1c = cv2.imread(conv_files[k])
A2c = skimage.transform.rotate(A1c,ang)
A3_1c = (A2c[crop_storm_x_range]).astype(np.float32)
A3c = (A3_1c[:,crop_storm_y_range]).astype(np.float32)
Rc_small = skimage.transform.rescale(A3c[:,:,0] , 0.1)
A3c_small = np.zeros([Rc_small.shape[0],Rc_small.shape[1],3])
A3c_small[:,:,0] = Rc_small
A3c_small[:,:,1] = skimage.transform.rescale(A3c[:,:,1], 0.1)
A3c_small[:,:,2] = skimage.transform.rescale(A3c[:,:,2], 0.1)
A3c *= 255
A3c = A3c.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/conv_merged/"+str(k).zfill(3) +'.tif', A3c)
A3c_small *= 255
A3c_small = A3c_small.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/conv_merged_ds/"+str(k).zfill(3) +'.tif', A3c_small)
A1w1 = cv2.imread(align_files[k])
A2w1 = skimage.transform.rotate(A1w1, ang)
A3_1w1 = (A2w1[crop_storm_x_range]).astype(np.float32)
A3w1 = (A3_1w1[:,crop_storm_y_range]).astype(np.float32)
Rw1_small = skimage.transform.rescale(A3w1[:,:,0] , 0.1)
A3w1_small = np.zeros([Rw1_small.shape[0],Rw1_small.shape[1],3])
A3w1_small[:,:,0] = Rw1_small
A3w1_small[:,:,1] = skimage.transform.rescale(A3w1[:,:,1], 0.1)
A3w1_small[:,:,2] = skimage.transform.rescale(A3w1[:,:,2], 0.1)
A3w1 *= 255
A3w1 = A3w1.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/for_align/"+str(k).zfill(3) +'.tif', A3w1)
A3w1_small *= 255
A3w1_small = A3w1_small.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/for_align_ds/"+str(k).zfill(3) +'.tif', A3w1_small)
A1w = cv2.imread(wga_files[k])
A2w = skimage.transform.rotate(A1w,ang)
A3_1w = (A2w[crop_storm_x_range]).astype(np.float32)
A3w = (A3_1w[:,crop_storm_y_range]).astype(np.float32)
Rw_small = skimage.transform.rescale(A3w[:,:,0] , 0.1)
A3w_small = np.zeros([Rw_small.shape[0],Rw_small.shape[1],3])
A3w_small[:,:,0] = Rw_small
A3w_small[:,:,1] = skimage.transform.rescale(A3w[:,:,1], 0.1)
A3w_small[:,:,2] = skimage.transform.rescale(A3w[:,:,2], 0.1)
A3w *= 255
A3w = A3w.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/conv_{}/".format(alignment_channel)+str(k).zfill(3) +'.tif', A3w)
A3w_small *= 255
A3w_small = A3w_small.astype(np.uint8)
cv2.imwrite(exp_folder+"/cropped/conv_{}_ds/".format(alignment_channel)+str(k).zfill(3) +'.tif', A3w_small)
## # Load in new ds images and check max project
##
## path = exp_folder + "\\cropped\\"
## storm_path = exp_folder + "\\cropped\\storm_merged_ds\\"
## conv_path = exp_folder + "\\cropped\\conv_merged_ds\\"
## conv_path_ds = exp_folder + "\\cropped\\conv_{}_ds\\".format(alignment_channel)
## wga_path = exp_folder + "\\cropped\\for_align_ds\\"
##
## wga_files = glob.glob(wga_path + tif_ext) + glob.glob(wga_path + png_ext)
## storm_files = glob.glob(storm_path + tif_ext) + glob.glob(storm_path + png_ext)
## conv_files = glob.glob(conv_path + tif_ext) + glob.glob(conv_path + png_ext)
## conv_files_ds = glob.glob(conv_path_ds + tif_ext) + glob.glob(conv_path_ds + png_ext)
##
## num_images = len(storm_files)
##
## C1 = []
##
## print("normalizing conv-1 2.0")
##
## # normalize intensity of conv_merged images
## for k in range(num_images):
## A = imageio.imread(conv_files_ds[k])
## C1.append(A)
## C1 = np.stack(C1, -1) # h x w x 3 x n_images
##
## cv2.imwrite(exp_folder + '/cropped/conv_xy_projection.tif',
## C1.max(axis=3)) # h x w x 3
## cv2.imwrite(exp_folder + '/cropped/conv_xz_projection.tif',
## C1.max(axis=1).squeeze().transpose(0,2,1)) # h x 3 x n_images -> h x n_images x 3
## cv2.imwrite(exp_folder + '/cropped/conv_yz_projection.tif',
## C1.max(axis=0).squeeze().transpose(0,2,1)) # w x 3 x n_images -> w x n_images x 3
##
##
## print("normalizing storm-1 2.0")
##
## # normalize intensity of storm_merged images
## for k in range(num_images):
## A = imageio.imread(storm_files[k])
## C1.append(A)
## C1 = np.stack(C1, -1) # h x w x 3 x n_images
##
## cv2.imwrite(exp_folder + '/cropped/storm_xy_projection.tif',
## C1.max(axis=3)) # h x w x 3
## cv2.imwrite(exp_folder + '/cropped/storm_xz_projection.tif',
## C1.max(axis=1).squeeze().transpose(0,2,1)) # h x 3 x n_images -> h x n_images x 3
## cv2.imwrite(exp_folder + '/cropped/storm_yz_projection.tif',
## C1.max(axis=0).squeeze().transpose(0,2,1)) # w x 3 x n_images -> w x n_images x 3
##
## print("normalizing wga-1 2.0")
## # normalize intensity of aligned images
## for k in range(num_images):
## A = imageio.imread(wga_files[k])
## C1.append(A)
## C1 = np.stack(C1, -1) # h x w x 3 x n_images
##
## cv2.imwrite(exp_folder + '/cropped/wga_xy_projection.tif',
## C1.max(axis=3)) # h x w x 3
## cv2.imwrite(exp_folder + '/cropped/wga_xz_projection.tif',
## C1.max(axis=1).squeeze().transpose(0,2,1)) # h x 3 x n_images -> h x n_images x 3
## cv2.imwrite(exp_folder + '/cropped/wga_yz_projection.tif',
## C1.max(axis=0).squeeze().transpose(0,2,1)) # w x 3 x n_images -> w x n_images x 3
print('Done with cropping!')
return True | 0.108862 | 0.213275 |
import sys, inspect
from core.config.enumerator import EnumeratationConfig
from core.recon.subenum.reverser.revip import *
from core.recon.subenum.sublist3r.sublist3r import *
class IDomainEnumerator:
def __init__(self, config: EnumeratationConfig) -> None:
self.config = config
self.domain = self.config.GetTarget()
self.__enumerators = self.GetAllEnumerators()
self.__sources = list()
self.handleSources()
def isEnumerator(self, obj):
pass
def GetAllEnumerators(self):
pass
def SetSources(self, enumerators):
if isinstance(enumerators, list):
# Clear sources from its values
self.__sources.clear()
# Check enumerators one by one before push it to the list
for enumerator in enumerators:
if self.isEnumerator(enumerator):
self.__sources.append(enumerator)
else:
raise TypeError("Enumerators must be in a list")
def GetSources(self):
return self.__sources
def GetAllSources(self):
enumerators = []
callers_module = sys._getframe(1).f_globals['__name__']
classes = inspect.getmembers(sys.modules[callers_module], inspect.isclass)
for _, obj in classes:
if self.isEnumerator(obj):
enumerators.append(obj)
return enumerators
def handleSources(self):
sources = self.config.GetSources()
exclude_case = False
excluded_src = []
if sources:
for src in sources:
if src.startswith('-'):
exclude_case = True
excluded_src.append(src[1:])
if exclude_case:
self.SetSources([src for src in self.GetAllSources() if src not in [self.__enumerators[key] for key in self.__enumerators.keys() if key in excluded_src]])
elif sources:
self.SetSources([self.__enumerators[src] for src in sources if src in self.__enumerators.keys()])
else:
self.SetSources(self.GetAllSources())
def Start(self):
pass | core/recon/subenum/enumerator.py | import sys, inspect
from core.config.enumerator import EnumeratationConfig
from core.recon.subenum.reverser.revip import *
from core.recon.subenum.sublist3r.sublist3r import *
class IDomainEnumerator:
def __init__(self, config: EnumeratationConfig) -> None:
self.config = config
self.domain = self.config.GetTarget()
self.__enumerators = self.GetAllEnumerators()
self.__sources = list()
self.handleSources()
def isEnumerator(self, obj):
pass
def GetAllEnumerators(self):
pass
def SetSources(self, enumerators):
if isinstance(enumerators, list):
# Clear sources from its values
self.__sources.clear()
# Check enumerators one by one before push it to the list
for enumerator in enumerators:
if self.isEnumerator(enumerator):
self.__sources.append(enumerator)
else:
raise TypeError("Enumerators must be in a list")
def GetSources(self):
return self.__sources
def GetAllSources(self):
enumerators = []
callers_module = sys._getframe(1).f_globals['__name__']
classes = inspect.getmembers(sys.modules[callers_module], inspect.isclass)
for _, obj in classes:
if self.isEnumerator(obj):
enumerators.append(obj)
return enumerators
def handleSources(self):
sources = self.config.GetSources()
exclude_case = False
excluded_src = []
if sources:
for src in sources:
if src.startswith('-'):
exclude_case = True
excluded_src.append(src[1:])
if exclude_case:
self.SetSources([src for src in self.GetAllSources() if src not in [self.__enumerators[key] for key in self.__enumerators.keys() if key in excluded_src]])
elif sources:
self.SetSources([self.__enumerators[src] for src in sources if src in self.__enumerators.keys()])
else:
self.SetSources(self.GetAllSources())
def Start(self):
pass | 0.235548 | 0.065336 |
from __future__ import annotations
import json
from typing import List
from .exceptions import MaterialExists, MaterialNotFound
class MaterialMeta(type):
"""Material metaclass."""
@property
def all(cls) -> List["Material"]:
"""Return list of all materials registered."""
return list(cls.__dict__["__materials__"])
def __prepare__(cls, name):
"""Implement prepare function."""
return {"__materials__": set()}
def __getitem__(cls, name: str):
"""Return a material given a name."""
for material in cls.__dict__["__materials__"]:
if name == material.name:
return material
raise MaterialNotFound(f"Material {name} not found.")
class Material(metaclass=MaterialMeta):
"""Implement a material.
Parameters
----------
name : str
The name of the material.
eps_r : float
Relative permissivity.
mu_r : float
Relative permeability.
sigma_e : float
Electric condutivity.
sigma_m : float
Magnetic condutivity.
color : str
RGB color.
"""
def __init__(
self,
name: str,
eps_r: float = 1,
mu_r: float = 1,
sigma_e: float = 0,
sigma_m: float = 0,
color: str = "#000000",
):
"""Initialize a material object."""
self.name = name
self.eps_r = eps_r
self.mu_r = mu_r
self.sigma_e = sigma_e + 1e-20
self.sigma_m = sigma_m + 1e-20
self.color = color
self._register(self)
def _register(self, material: Material) -> None:
"""Register material."""
materials_set = self.__class__.__dict__["__materials__"]
if material not in materials_set:
materials_set.add(material)
else:
raise MaterialExists(f"Material {material.name} already exists.")
def _unregister(self, material):
"""Unregister material."""
materials_set = self.__class__.__dict__["__materials__"]
materials_set.discard(material)
def _delete(self) -> None:
"""Delete material."""
self._unregister(self)
def __del__(self):
"""Implement destructor."""
self._delete()
@classmethod
def load(cls, file_name: str):
"""Load materials from a .json file.
Parameters
----------
file_name : str
Filename in the root directory.
"""
with open(file_name, "r") as f:
data = json.load(f)
for props in data:
cls(**props)
@classmethod
def from_name(cls, name: str) -> Material:
"""Initialize material from a name.
Parameters
----------
name : str
Name of material.
Returns
-------
Material
Raises
-------
MaterialNotFound
If material is not found.
Notes
-------
Material[name] is equivalent to Material.from_name(name).
"""
for material in cls.__dict__["__materials__"]:
if name == material.name:
return material
raise MaterialNotFound(f"Material {name} not found.")
def __hash__(self) -> int:
"""Implement hash function."""
return hash(self.name)
def __eq__(self, other) -> bool:
"""Implement equal function."""
return self.name == other.name
def __repr__(self) -> str:
"""Representation of the material."""
return f"Material({self.name})" | fdtd/materials.py | from __future__ import annotations
import json
from typing import List
from .exceptions import MaterialExists, MaterialNotFound
class MaterialMeta(type):
"""Material metaclass."""
@property
def all(cls) -> List["Material"]:
"""Return list of all materials registered."""
return list(cls.__dict__["__materials__"])
def __prepare__(cls, name):
"""Implement prepare function."""
return {"__materials__": set()}
def __getitem__(cls, name: str):
"""Return a material given a name."""
for material in cls.__dict__["__materials__"]:
if name == material.name:
return material
raise MaterialNotFound(f"Material {name} not found.")
class Material(metaclass=MaterialMeta):
"""Implement a material.
Parameters
----------
name : str
The name of the material.
eps_r : float
Relative permissivity.
mu_r : float
Relative permeability.
sigma_e : float
Electric condutivity.
sigma_m : float
Magnetic condutivity.
color : str
RGB color.
"""
def __init__(
self,
name: str,
eps_r: float = 1,
mu_r: float = 1,
sigma_e: float = 0,
sigma_m: float = 0,
color: str = "#000000",
):
"""Initialize a material object."""
self.name = name
self.eps_r = eps_r
self.mu_r = mu_r
self.sigma_e = sigma_e + 1e-20
self.sigma_m = sigma_m + 1e-20
self.color = color
self._register(self)
def _register(self, material: Material) -> None:
"""Register material."""
materials_set = self.__class__.__dict__["__materials__"]
if material not in materials_set:
materials_set.add(material)
else:
raise MaterialExists(f"Material {material.name} already exists.")
def _unregister(self, material):
"""Unregister material."""
materials_set = self.__class__.__dict__["__materials__"]
materials_set.discard(material)
def _delete(self) -> None:
"""Delete material."""
self._unregister(self)
def __del__(self):
"""Implement destructor."""
self._delete()
@classmethod
def load(cls, file_name: str):
"""Load materials from a .json file.
Parameters
----------
file_name : str
Filename in the root directory.
"""
with open(file_name, "r") as f:
data = json.load(f)
for props in data:
cls(**props)
@classmethod
def from_name(cls, name: str) -> Material:
"""Initialize material from a name.
Parameters
----------
name : str
Name of material.
Returns
-------
Material
Raises
-------
MaterialNotFound
If material is not found.
Notes
-------
Material[name] is equivalent to Material.from_name(name).
"""
for material in cls.__dict__["__materials__"]:
if name == material.name:
return material
raise MaterialNotFound(f"Material {name} not found.")
def __hash__(self) -> int:
"""Implement hash function."""
return hash(self.name)
def __eq__(self, other) -> bool:
"""Implement equal function."""
return self.name == other.name
def __repr__(self) -> str:
"""Representation of the material."""
return f"Material({self.name})" | 0.946448 | 0.196575 |
import socket
from odoo.addons.test_mail.data.test_mail_data import MAIL_TEMPLATE
from odoo.addons.test_mail.tests.test_mail_gateway import TestMailgateway
from odoo.tools import mute_logger
from email.utils import formataddr
class TestFetchmailNotifyErrorToSender(TestMailgateway):
def setUp(self):
super(TestFetchmailNotifyErrorToSender, self).setUp()
self.fetchmail_server = self.env['fetchmail.server'].create({
'name': 'Test Fetchmail Server',
'type': 'imap',
'error_notice_template_id': self.env.ref('%s.%s' % (
'fetchmail_notify_error_to_sender',
'email_template_error_notice',
)).id
})
def format_and_process_with_context(
self, template, to_email='<EMAIL>, <EMAIL>',
subject='Frogs', extra='',
email_from='<NAME> <<EMAIL>>',
cc_email='',
msg_id='<<EMAIL>8923581.41972151344608186760.<EMAIL>>',
model=None, target_model='mail.test.simple', target_field='name',
ctx=None,
):
self.assertFalse(self.env[target_model].search([
(target_field, '=', subject),
]))
mail = template.format(
to=to_email,
subject=subject,
cc=cc_email,
extra=extra,
email_from=email_from,
msg_id=msg_id,
)
self.env['mail.thread'].with_context(ctx or {}).message_process(
model,
mail,
)
return self.env[target_model].search([(target_field, '=', subject)])
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process(self):
email_from = formataddr((self.partner_1.name, self.partner_1.email))
count_return_mails_before = self.env['mail.mail'].search_count([
('email_to', '=', email_from),
])
with self.assertRaises(ValueError):
self.format_and_process_with_context(
MAIL_TEMPLATE,
email_from=email_from,
to_email='<EMAIL>',
subject='spam',
extra='In-Reply-To: <12321321-openerp-%d-mail.<EMAIL>@%s'
'>' % (self.test_record.id,
socket.gethostname(),
),
ctx={
'fetchmail_server_id': self.fetchmail_server.id,
}
)
count_return_mails_after = self.env['mail.mail'].search_count([
('email_to', '=', email_from),
])
self.assertEqual(
count_return_mails_after,
count_return_mails_before + 1,
) | addons/fetchmail_notify_error_to_sender/tests/test_fetchmail_notify_error_to_sender.py |
import socket
from odoo.addons.test_mail.data.test_mail_data import MAIL_TEMPLATE
from odoo.addons.test_mail.tests.test_mail_gateway import TestMailgateway
from odoo.tools import mute_logger
from email.utils import formataddr
class TestFetchmailNotifyErrorToSender(TestMailgateway):
def setUp(self):
super(TestFetchmailNotifyErrorToSender, self).setUp()
self.fetchmail_server = self.env['fetchmail.server'].create({
'name': 'Test Fetchmail Server',
'type': 'imap',
'error_notice_template_id': self.env.ref('%s.%s' % (
'fetchmail_notify_error_to_sender',
'email_template_error_notice',
)).id
})
def format_and_process_with_context(
self, template, to_email='<EMAIL>, <EMAIL>',
subject='Frogs', extra='',
email_from='<NAME> <<EMAIL>>',
cc_email='',
msg_id='<<EMAIL>8923581.41972151344608186760.<EMAIL>>',
model=None, target_model='mail.test.simple', target_field='name',
ctx=None,
):
self.assertFalse(self.env[target_model].search([
(target_field, '=', subject),
]))
mail = template.format(
to=to_email,
subject=subject,
cc=cc_email,
extra=extra,
email_from=email_from,
msg_id=msg_id,
)
self.env['mail.thread'].with_context(ctx or {}).message_process(
model,
mail,
)
return self.env[target_model].search([(target_field, '=', subject)])
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process(self):
email_from = formataddr((self.partner_1.name, self.partner_1.email))
count_return_mails_before = self.env['mail.mail'].search_count([
('email_to', '=', email_from),
])
with self.assertRaises(ValueError):
self.format_and_process_with_context(
MAIL_TEMPLATE,
email_from=email_from,
to_email='<EMAIL>',
subject='spam',
extra='In-Reply-To: <12321321-openerp-%d-mail.<EMAIL>@%s'
'>' % (self.test_record.id,
socket.gethostname(),
),
ctx={
'fetchmail_server_id': self.fetchmail_server.id,
}
)
count_return_mails_after = self.env['mail.mail'].search_count([
('email_to', '=', email_from),
])
self.assertEqual(
count_return_mails_after,
count_return_mails_before + 1,
) | 0.360489 | 0.167695 |
from analyzer import StockAnalyzer
from stock import Stock
from bs4 import BeautifulSoup
import pandas as panda
import yfinance as yf
import requests
import re
import datetime
import os
import openpyxl
import argparse
class StockScanner:
def __init__(self):
self.winner_stock_list = list()
self.loser_stock_list = list()
def generate_data(self, scan_back_to_date, username):
print("Starting a new scan to generate stock data...")
# The first page will be '1', but need to start it at '0' for the loop
page_number = 0
while True:
page_number += 1
# Used to compare to the scan_back_to_date to determine how far back to scan
most_recent_scan_date = ''
# Scrape the profit.ly page for the user and store it
web_page = "https://profit.ly/user/" + username + "/trades?page=" + str(page_number) + "&size=10"
page = requests.get(web_page)
# Create the main soup
soup = BeautifulSoup(page.content, "html.parser")
# This grabs the center feed of the page that contains every 'card'
card_feed = soup.find("div", class_="col-md-8 col-lg-7 no-gutters")
has_scanned_winners = False
# Loop through each user's page twice. Once to grab the profit cards and
# once to grab all the loss cards
for _ in range(2):
# Grab either the profit cards or the loss cards
if has_scanned_winners == False:
all_card_headers = card_feed.find_all("div", class_=re.compile("card-header bg-profitGreen"))
print("==== Scanning all profit cards... ====")
else:
all_card_headers = card_feed.find_all("div", class_=re.compile("card-header bg-danger"))
print("==== Scanning all loss cards.. ====")
# Grab specific info from individual cards
for card_header in all_card_headers:
card_feed_info = card_header.find_next("div", class_="feed-info")
trade_date = card_feed_info.find_next('a', href=True, class_="text-muted").get_text()
trade_formatted_date = self.create_datetime(trade_date)
most_recent_scan_date = trade_formatted_date
# If the trade date of the card is before the scan_back_to_date, break
# because we should already have this info
if trade_formatted_date < scan_back_to_date:
break
stock_ticker = card_header.find_next('a', class_="trade-ticker").get_text()
trade_type = card_header.find_next('span', class_="trade-type").get_text()
if has_scanned_winners == False:
trade_profit = card_header.find_next('a', class_="text-white trade-up mr-1").get_text().strip()
else:
trade_profit = card_header.find_next('a', class_="text-white trade-down mr-1").get_text().strip()
# Follow the link for even more specific trade info such as entry, exit, and position size
trade_url = card_feed_info.find_next('a', href=True, class_="text-muted").get('href')
trade_page = requests.get("https://profit.ly" + trade_url)
specific_soup = BeautifulSoup(trade_page.content, "html.parser")
specific_trade_feed = specific_soup.find("div", class_="container feeds mb-0 border-bottom-0 pb-0")
trade_entry_and_exit = specific_trade_feed.find_all("td", align="right")
trade_entry = trade_entry_and_exit[0].get_text()
trade_exit = trade_entry_and_exit[1].get_text()
trade_position_size_table = specific_trade_feed.find_all("li", class_="list-group-item d-flex justify-content-between align-items-center")
trade_position_size = trade_position_size_table[1].span.get_text()
trade_percentage_profit = trade_position_size_table[2].span.get_text().strip()
stock = Stock(stock_ticker, trade_type, trade_profit, trade_formatted_date, trade_entry, trade_exit, trade_position_size, trade_percentage_profit)
if has_scanned_winners == False:
self.winner_stock_list.append(stock)
else:
self.loser_stock_list.append(stock)
has_scanned_winners = True
if most_recent_scan_date < scan_back_to_date:
break
def create_datetime(self, date_string):
# Split the date_string by whitespace and get the month and day
# Strip the right side comma from the day
month = date_string.split()[0]
day = date_string.split()[1].rstrip(',')
# Get the hour AND minute based on splitting on whitespace
time = date_string.split()[2]
# The hour will be in a twelve hour format to start
# Split the hour and minute on the colon
twelve_hour = time.split(':')[0]
minute = time.split(':')[1]
# Clock period refers to AM or PM
clock_period = date_string.split()[3]
twenty_four_hour = ''
# Convert the hour from twelve hour format to a twenty four hour format
if (clock_period == 'AM') and (twelve_hour == '12'):
twenty_four_hour = '00'
elif (clock_period == 'AM') and (twelve_hour == '10' or twelve_hour == '11'):
twenty_four_hour = twelve_hour
elif (clock_period == 'AM'):
twenty_four_hour = '0' + twelve_hour
elif (clock_period == 'PM') and (twelve_hour == '12'):
twenty_four_hour = twelve_hour
else:
twenty_four_hour = str(int(twelve_hour) + 12)
# Create a dictionary to switch the 3 letter month abbreviation to a number
month_switcher = {
'Jan': '01',
'Feb': '02',
'Mar': '03',
'Apr': '04',
'May': '05',
'Jun': '06',
'Jul': '07',
'Aug': '08',
'Sep': '09',
'Oct': '10',
'Nov': '11',
'Dec': '12'
}
# Change the month format by calling the dictionary above
formatted_month = month_switcher.get(month, "Invalid Month")
# The year is not reported in the scraped time, so will assume the year is the current year
current_year = datetime.datetime.now().year
formatted_datetime = datetime.datetime(int(current_year), int(formatted_month), int(day), int(twenty_four_hour), int(minute))
print(f"\n*** Scanning trade from: {formatted_datetime} ***")
return formatted_datetime
# Use pandas to create an excel workbook with two sheets, winners and losers, and create
# all the column headers
def create_stock_workbook(self, username):
stock_dataframe = panda.DataFrame(columns=['Date', 'Ticker', 'Trade Type', 'Profit', 'Entry', 'Exit', 'Position Size', 'Percent Profit',
'SPACE', 'L:H', 'Volume', 'SPACE', 'L:H (-) 1 Day', 'L:H (-) 2 Days', 'Volume (-) 1 Day',
'Volume (-) 2 Days', '% Change From Open', '% Change (-) 1 Day', '% Change (-) 2 Days',
'% Change Volume (-) 1 Day', '% Change Volume (-) 2 Days', 'SPACE',
'Float Shares', 'Shares Outstanding', 'Implied Shares Outstanding'])
writer = panda.ExcelWriter('stocks_' + username + '.xlsx', engine='xlsxwriter')
stock_dataframe.to_excel(writer, sheet_name='winners', index=False)
stock_dataframe.to_excel(writer, sheet_name='losers', index=False)
writer.save()
print("Empty stock workbook with headers created...")
# Add the trade data and the stock analysis data to the excel workbook
def populate_stock_workbook(self, winner_stock_list, loser_stock_list, username):
print("Attempting to populate data...")
stock_workbook = openpyxl.load_workbook('./stocks_' + username + '.xlsx')
winner_sheet = stock_workbook.get_sheet_by_name('winners')
loser_sheet = stock_workbook.get_sheet_by_name('losers')
stock_workbook.active = winner_sheet
for stock in winner_stock_list:
print(f"Adding data for winning ticker: {stock.ticker}")
winner_sheet.append([stock.date, stock.ticker, stock.trade_type, stock.profit, stock.entry, stock.exit, stock.position_size, stock.percent_profit,\
"N/A", str(stock.todays_low) + ':' + str(stock.todays_high), stock.todays_volume, "N/A", str(stock.yesterdays_low) + ':' + str(stock.yesterdays_high),\
str(stock.minus_two_days_low) + ':' + str(stock.minus_two_days_high), stock.yesterdays_volume, stock.minus_two_days_volume,\
stock.price_percent_change_today, stock.price_percent_change_since_yesterday, stock.price_percent_change_since_two_days,\
stock.volume_percent_change_since_yesterday, stock.volume_percent_change_since_two_days, "N/A",\
stock.float, stock.shares_outstanding, stock.implied_shares_outstanding])
stock_workbook.active = loser_sheet
for stock in loser_stock_list:
print(f"Adding data for losing ticker: {stock.ticker}")
loser_sheet.append([stock.date, stock.ticker, stock.trade_type, stock.profit, stock.entry, stock.exit, stock.position_size, stock.percent_profit,\
"N/A", str(stock.todays_low) + ':' + str(stock.todays_high), stock.todays_volume, "N/A", str(stock.yesterdays_low) + ':' + str(stock.yesterdays_high),\
str(stock.minus_two_days_low) + ':' + str(stock.minus_two_days_high), stock.yesterdays_volume, stock.minus_two_days_volume,\
stock.price_percent_change_today, stock.price_percent_change_since_yesterday, stock.price_percent_change_since_two_days,\
stock.volume_percent_change_since_yesterday, stock.volume_percent_change_since_two_days, "N/A",\
stock.float, stock.shares_outstanding, stock.implied_shares_outstanding])
stock_workbook.save('stocks_' + username + '.xlsx')
# Analyze each trades stock data from Yahoo Finance
def perform_stock_analysis(self, stock_list):
for stock in stock_list:
stock_analyzer = StockAnalyzer(stock)
stock_analyzer.analyze()
# Called when an excel workbook does not already exist for a user.
# Calls the main functions throughout the program to create the excel workbook, scrape and generate
# the data, perform analysis, and populate the data into the excel workbook
def create_new_stock_data(stock_scan, username):
print("A workbook does not exist.. Creating a new stock workbook")
stock_scan.create_stock_workbook(username)
date = request_scan_back_date()
stock_scan.generate_data(date, username)
stock_scan.perform_stock_analysis(stock_scan.winner_stock_list)
stock_scan.perform_stock_analysis(stock_scan.loser_stock_list)
stock_scan.populate_stock_workbook(stock_scan.winner_stock_list, stock_scan.loser_stock_list, username)
# Called when an excel workbook already exists and contains users data.
# Pulls the latest data entry's date and will then perform the main functions of the program
# such as scraping, analysis, and data population
def update_existing_stock_data(stock_scan, username):
print("A workbook already exists... Reading the latest entry date")
most_recent_stock_date = ''
data_frame = panda.read_excel('./stocks_' + username + '.xlsx', sheet_name='winners')
try:
recent_winner_date = data_frame['Date'][0]
except IndexError:
print("Scanning the winner sheet produced an IndexError, most likely there are no entries")
recent_winner_date = datetime.datetime(1970, 1, 1)
data_frame = panda.read_excel('./stocks_' + username + '.xlsx', sheet_name='losers')
try:
recent_loser_date = data_frame['Date'][0]
except IndexError:
print("Scanning the loser sheet produced an IndexError, most likely there are no entries")
recent_loser_date = datetime.datetime(1970, 1, 1)
if recent_winner_date > recent_loser_date:
most_recent_stock_date = recent_winner_date
else:
most_recent_stock_date = recent_loser_date
print(f"Most recent stock entry date: {most_recent_stock_date}")
stock_scan.generate_data(most_recent_stock_date, username)
stock_scan.perform_stock_analysis(stock_scan.winner_stock_list)
stock_scan.perform_stock_analysis(stock_scan.loser_stock_list)
stock_scan.populate_stock_workbook(stock_scan.winner_stock_list, stock_scan.loser_stock_list, username)
# Used to ask to user for a date to scan trades back to
def request_scan_back_date():
print("*** Requesting a date to scan stocks back to.. ***")
month = input("Enter the numerical value for a month: ")
day = input("Enter a numerical value for a day: ")
year = input("Enter a numerical value for a year: ")
hour = input("Enter a 24-hour clock hour: ")
minute = input("Enter a minute: ")
date = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute))
return date
def _arg_parse():
parser = argparse.ArgumentParser(description="Scan the provided username's profit.ly trades, add the data to an excel workbook, and analyze the stock's data and history.")
parser.add_argument("--username",
help="The username of the profit.ly account you want to scan")
return parser.parse_args()
def main(args):
username = args.username
stock_scan = StockScanner()
print("Checking if a stock workbook already exists for " + username + "...")
if not os.path.exists("./stocks_" + username + ".xlsx"):
create_new_stock_data(stock_scan, username)
else:
update_existing_stock_data(stock_scan, username)
if __name__ == "__main__":
args = _arg_parse()
main(args) | src/main.py | from analyzer import StockAnalyzer
from stock import Stock
from bs4 import BeautifulSoup
import pandas as panda
import yfinance as yf
import requests
import re
import datetime
import os
import openpyxl
import argparse
class StockScanner:
def __init__(self):
self.winner_stock_list = list()
self.loser_stock_list = list()
def generate_data(self, scan_back_to_date, username):
print("Starting a new scan to generate stock data...")
# The first page will be '1', but need to start it at '0' for the loop
page_number = 0
while True:
page_number += 1
# Used to compare to the scan_back_to_date to determine how far back to scan
most_recent_scan_date = ''
# Scrape the profit.ly page for the user and store it
web_page = "https://profit.ly/user/" + username + "/trades?page=" + str(page_number) + "&size=10"
page = requests.get(web_page)
# Create the main soup
soup = BeautifulSoup(page.content, "html.parser")
# This grabs the center feed of the page that contains every 'card'
card_feed = soup.find("div", class_="col-md-8 col-lg-7 no-gutters")
has_scanned_winners = False
# Loop through each user's page twice. Once to grab the profit cards and
# once to grab all the loss cards
for _ in range(2):
# Grab either the profit cards or the loss cards
if has_scanned_winners == False:
all_card_headers = card_feed.find_all("div", class_=re.compile("card-header bg-profitGreen"))
print("==== Scanning all profit cards... ====")
else:
all_card_headers = card_feed.find_all("div", class_=re.compile("card-header bg-danger"))
print("==== Scanning all loss cards.. ====")
# Grab specific info from individual cards
for card_header in all_card_headers:
card_feed_info = card_header.find_next("div", class_="feed-info")
trade_date = card_feed_info.find_next('a', href=True, class_="text-muted").get_text()
trade_formatted_date = self.create_datetime(trade_date)
most_recent_scan_date = trade_formatted_date
# If the trade date of the card is before the scan_back_to_date, break
# because we should already have this info
if trade_formatted_date < scan_back_to_date:
break
stock_ticker = card_header.find_next('a', class_="trade-ticker").get_text()
trade_type = card_header.find_next('span', class_="trade-type").get_text()
if has_scanned_winners == False:
trade_profit = card_header.find_next('a', class_="text-white trade-up mr-1").get_text().strip()
else:
trade_profit = card_header.find_next('a', class_="text-white trade-down mr-1").get_text().strip()
# Follow the link for even more specific trade info such as entry, exit, and position size
trade_url = card_feed_info.find_next('a', href=True, class_="text-muted").get('href')
trade_page = requests.get("https://profit.ly" + trade_url)
specific_soup = BeautifulSoup(trade_page.content, "html.parser")
specific_trade_feed = specific_soup.find("div", class_="container feeds mb-0 border-bottom-0 pb-0")
trade_entry_and_exit = specific_trade_feed.find_all("td", align="right")
trade_entry = trade_entry_and_exit[0].get_text()
trade_exit = trade_entry_and_exit[1].get_text()
trade_position_size_table = specific_trade_feed.find_all("li", class_="list-group-item d-flex justify-content-between align-items-center")
trade_position_size = trade_position_size_table[1].span.get_text()
trade_percentage_profit = trade_position_size_table[2].span.get_text().strip()
stock = Stock(stock_ticker, trade_type, trade_profit, trade_formatted_date, trade_entry, trade_exit, trade_position_size, trade_percentage_profit)
if has_scanned_winners == False:
self.winner_stock_list.append(stock)
else:
self.loser_stock_list.append(stock)
has_scanned_winners = True
if most_recent_scan_date < scan_back_to_date:
break
def create_datetime(self, date_string):
# Split the date_string by whitespace and get the month and day
# Strip the right side comma from the day
month = date_string.split()[0]
day = date_string.split()[1].rstrip(',')
# Get the hour AND minute based on splitting on whitespace
time = date_string.split()[2]
# The hour will be in a twelve hour format to start
# Split the hour and minute on the colon
twelve_hour = time.split(':')[0]
minute = time.split(':')[1]
# Clock period refers to AM or PM
clock_period = date_string.split()[3]
twenty_four_hour = ''
# Convert the hour from twelve hour format to a twenty four hour format
if (clock_period == 'AM') and (twelve_hour == '12'):
twenty_four_hour = '00'
elif (clock_period == 'AM') and (twelve_hour == '10' or twelve_hour == '11'):
twenty_four_hour = twelve_hour
elif (clock_period == 'AM'):
twenty_four_hour = '0' + twelve_hour
elif (clock_period == 'PM') and (twelve_hour == '12'):
twenty_four_hour = twelve_hour
else:
twenty_four_hour = str(int(twelve_hour) + 12)
# Create a dictionary to switch the 3 letter month abbreviation to a number
month_switcher = {
'Jan': '01',
'Feb': '02',
'Mar': '03',
'Apr': '04',
'May': '05',
'Jun': '06',
'Jul': '07',
'Aug': '08',
'Sep': '09',
'Oct': '10',
'Nov': '11',
'Dec': '12'
}
# Change the month format by calling the dictionary above
formatted_month = month_switcher.get(month, "Invalid Month")
# The year is not reported in the scraped time, so will assume the year is the current year
current_year = datetime.datetime.now().year
formatted_datetime = datetime.datetime(int(current_year), int(formatted_month), int(day), int(twenty_four_hour), int(minute))
print(f"\n*** Scanning trade from: {formatted_datetime} ***")
return formatted_datetime
# Use pandas to create an excel workbook with two sheets, winners and losers, and create
# all the column headers
def create_stock_workbook(self, username):
stock_dataframe = panda.DataFrame(columns=['Date', 'Ticker', 'Trade Type', 'Profit', 'Entry', 'Exit', 'Position Size', 'Percent Profit',
'SPACE', 'L:H', 'Volume', 'SPACE', 'L:H (-) 1 Day', 'L:H (-) 2 Days', 'Volume (-) 1 Day',
'Volume (-) 2 Days', '% Change From Open', '% Change (-) 1 Day', '% Change (-) 2 Days',
'% Change Volume (-) 1 Day', '% Change Volume (-) 2 Days', 'SPACE',
'Float Shares', 'Shares Outstanding', 'Implied Shares Outstanding'])
writer = panda.ExcelWriter('stocks_' + username + '.xlsx', engine='xlsxwriter')
stock_dataframe.to_excel(writer, sheet_name='winners', index=False)
stock_dataframe.to_excel(writer, sheet_name='losers', index=False)
writer.save()
print("Empty stock workbook with headers created...")
# Add the trade data and the stock analysis data to the excel workbook
def populate_stock_workbook(self, winner_stock_list, loser_stock_list, username):
print("Attempting to populate data...")
stock_workbook = openpyxl.load_workbook('./stocks_' + username + '.xlsx')
winner_sheet = stock_workbook.get_sheet_by_name('winners')
loser_sheet = stock_workbook.get_sheet_by_name('losers')
stock_workbook.active = winner_sheet
for stock in winner_stock_list:
print(f"Adding data for winning ticker: {stock.ticker}")
winner_sheet.append([stock.date, stock.ticker, stock.trade_type, stock.profit, stock.entry, stock.exit, stock.position_size, stock.percent_profit,\
"N/A", str(stock.todays_low) + ':' + str(stock.todays_high), stock.todays_volume, "N/A", str(stock.yesterdays_low) + ':' + str(stock.yesterdays_high),\
str(stock.minus_two_days_low) + ':' + str(stock.minus_two_days_high), stock.yesterdays_volume, stock.minus_two_days_volume,\
stock.price_percent_change_today, stock.price_percent_change_since_yesterday, stock.price_percent_change_since_two_days,\
stock.volume_percent_change_since_yesterday, stock.volume_percent_change_since_two_days, "N/A",\
stock.float, stock.shares_outstanding, stock.implied_shares_outstanding])
stock_workbook.active = loser_sheet
for stock in loser_stock_list:
print(f"Adding data for losing ticker: {stock.ticker}")
loser_sheet.append([stock.date, stock.ticker, stock.trade_type, stock.profit, stock.entry, stock.exit, stock.position_size, stock.percent_profit,\
"N/A", str(stock.todays_low) + ':' + str(stock.todays_high), stock.todays_volume, "N/A", str(stock.yesterdays_low) + ':' + str(stock.yesterdays_high),\
str(stock.minus_two_days_low) + ':' + str(stock.minus_two_days_high), stock.yesterdays_volume, stock.minus_two_days_volume,\
stock.price_percent_change_today, stock.price_percent_change_since_yesterday, stock.price_percent_change_since_two_days,\
stock.volume_percent_change_since_yesterday, stock.volume_percent_change_since_two_days, "N/A",\
stock.float, stock.shares_outstanding, stock.implied_shares_outstanding])
stock_workbook.save('stocks_' + username + '.xlsx')
# Analyze each trades stock data from Yahoo Finance
def perform_stock_analysis(self, stock_list):
for stock in stock_list:
stock_analyzer = StockAnalyzer(stock)
stock_analyzer.analyze()
# Called when an excel workbook does not already exist for a user.
# Calls the main functions throughout the program to create the excel workbook, scrape and generate
# the data, perform analysis, and populate the data into the excel workbook
def create_new_stock_data(stock_scan, username):
print("A workbook does not exist.. Creating a new stock workbook")
stock_scan.create_stock_workbook(username)
date = request_scan_back_date()
stock_scan.generate_data(date, username)
stock_scan.perform_stock_analysis(stock_scan.winner_stock_list)
stock_scan.perform_stock_analysis(stock_scan.loser_stock_list)
stock_scan.populate_stock_workbook(stock_scan.winner_stock_list, stock_scan.loser_stock_list, username)
# Called when an excel workbook already exists and contains users data.
# Pulls the latest data entry's date and will then perform the main functions of the program
# such as scraping, analysis, and data population
def update_existing_stock_data(stock_scan, username):
print("A workbook already exists... Reading the latest entry date")
most_recent_stock_date = ''
data_frame = panda.read_excel('./stocks_' + username + '.xlsx', sheet_name='winners')
try:
recent_winner_date = data_frame['Date'][0]
except IndexError:
print("Scanning the winner sheet produced an IndexError, most likely there are no entries")
recent_winner_date = datetime.datetime(1970, 1, 1)
data_frame = panda.read_excel('./stocks_' + username + '.xlsx', sheet_name='losers')
try:
recent_loser_date = data_frame['Date'][0]
except IndexError:
print("Scanning the loser sheet produced an IndexError, most likely there are no entries")
recent_loser_date = datetime.datetime(1970, 1, 1)
if recent_winner_date > recent_loser_date:
most_recent_stock_date = recent_winner_date
else:
most_recent_stock_date = recent_loser_date
print(f"Most recent stock entry date: {most_recent_stock_date}")
stock_scan.generate_data(most_recent_stock_date, username)
stock_scan.perform_stock_analysis(stock_scan.winner_stock_list)
stock_scan.perform_stock_analysis(stock_scan.loser_stock_list)
stock_scan.populate_stock_workbook(stock_scan.winner_stock_list, stock_scan.loser_stock_list, username)
# Used to ask to user for a date to scan trades back to
def request_scan_back_date():
print("*** Requesting a date to scan stocks back to.. ***")
month = input("Enter the numerical value for a month: ")
day = input("Enter a numerical value for a day: ")
year = input("Enter a numerical value for a year: ")
hour = input("Enter a 24-hour clock hour: ")
minute = input("Enter a minute: ")
date = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute))
return date
def _arg_parse():
parser = argparse.ArgumentParser(description="Scan the provided username's profit.ly trades, add the data to an excel workbook, and analyze the stock's data and history.")
parser.add_argument("--username",
help="The username of the profit.ly account you want to scan")
return parser.parse_args()
def main(args):
username = args.username
stock_scan = StockScanner()
print("Checking if a stock workbook already exists for " + username + "...")
if not os.path.exists("./stocks_" + username + ".xlsx"):
create_new_stock_data(stock_scan, username)
else:
update_existing_stock_data(stock_scan, username)
if __name__ == "__main__":
args = _arg_parse()
main(args) | 0.307878 | 0.151153 |
import gym
from gym.utils import seeding
import numpy as np
import logging
logger = logging.getLogger(__name__)
class Example_v0(gym.Env):
LF_MIN = 1
RT_MAX = 10
MOVE_LF = 0
MOVE_RT = 1
MAX_STEPS = 10
REWARD_AWAY = -2
REWARD_STEP = -1
REWARD_GOAL = MAX_STEPS
metadata = {
"render.modes": ["human"]
}
def seed(self, seed=None):
logger.warning(f"Setting random seed to: {seed}")
self.np_random, seed = seeding.np_random(seed)
return [seed]
def __init__ (self, config=None):
self.action_space = gym.spaces.Discrete(2)
self.observation_space = gym.spaces.Discrete(self.RT_MAX + 1)
# possible positions to chose on `reset()`
self.goal = int((self.LF_MIN + self.RT_MAX - 1) / 2)
self.init_positions = list(range(self.LF_MIN, self.RT_MAX))
self.init_positions.remove(self.goal)
def reset (self):
self.position = self.np_random.choice(self.init_positions)
self.count = 0
self.state = self.position
self.reward = 0
self.done = False
self.info = None
return self.state
def step (self, action):
self.info = {}
if self.done:
# should never reach this point
print("EPISODE DONE!!!")
elif self.count == self.MAX_STEPS:
self.done = True;
else:
assert self.action_space.contains(action)
self.count += 1
# logic to handle an action ...
if action == self.MOVE_LF:
if self.position == self.LF_MIN:
# invalid
self.reward = self.REWARD_AWAY
self.info['explanation'] = 'REWARD_AWAY'
else:
self.position -= 1
if self.position == self.goal:
# on goal now
self.reward = self.REWARD_GOAL
self.info['explanation'] = 'REWARD_GOAL'
self.done = 1
elif self.position < self.goal:
# moving away from goal
self.reward = self.REWARD_AWAY
self.info['explanation'] = 'REWARD_AWAY'
else:
# moving toward goal
self.reward = self.REWARD_STEP
self.info['explanation'] = 'REWARD_STEP'
elif action == self.MOVE_RT:
if self.position == self.RT_MAX:
# invalid
self.reward = self.REWARD_AWAY
self.info['explanation'] = 'REWARD_AWAY'
else:
self.position += 1
if self.position == self.goal:
# on goal now
self.reward = self.REWARD_GOAL
self.info['explanation'] = 'REWARD_GOAL'
self.done = 1
elif self.position > self.goal:
# moving away from goal
self.reward = self.REWARD_AWAY
self.info['explanation'] = 'REWARD_AWAY'
else:
# moving toward goal
self.reward = self.REWARD_STEP
self.info['explanation'] = 'REWARD_STEP'
self.state = self.position
self.info["dist"] = self.goal - self.position
try:
assert self.observation_space.contains(self.state)
except AssertionError:
print("INVALID STATE", self.state)
return [self.state, self.reward, self.done, self.info]
def render (self, mode="human"):
s = "position: {:2d} reward: {:2d} info: {}"
print(s.format(self.state, self.reward, self.info))
def close (self):
pass | environments/gym_env_example.py | import gym
from gym.utils import seeding
import numpy as np
import logging
logger = logging.getLogger(__name__)
class Example_v0(gym.Env):
LF_MIN = 1
RT_MAX = 10
MOVE_LF = 0
MOVE_RT = 1
MAX_STEPS = 10
REWARD_AWAY = -2
REWARD_STEP = -1
REWARD_GOAL = MAX_STEPS
metadata = {
"render.modes": ["human"]
}
def seed(self, seed=None):
logger.warning(f"Setting random seed to: {seed}")
self.np_random, seed = seeding.np_random(seed)
return [seed]
def __init__ (self, config=None):
self.action_space = gym.spaces.Discrete(2)
self.observation_space = gym.spaces.Discrete(self.RT_MAX + 1)
# possible positions to chose on `reset()`
self.goal = int((self.LF_MIN + self.RT_MAX - 1) / 2)
self.init_positions = list(range(self.LF_MIN, self.RT_MAX))
self.init_positions.remove(self.goal)
def reset (self):
self.position = self.np_random.choice(self.init_positions)
self.count = 0
self.state = self.position
self.reward = 0
self.done = False
self.info = None
return self.state
def step (self, action):
self.info = {}
if self.done:
# should never reach this point
print("EPISODE DONE!!!")
elif self.count == self.MAX_STEPS:
self.done = True;
else:
assert self.action_space.contains(action)
self.count += 1
# logic to handle an action ...
if action == self.MOVE_LF:
if self.position == self.LF_MIN:
# invalid
self.reward = self.REWARD_AWAY
self.info['explanation'] = 'REWARD_AWAY'
else:
self.position -= 1
if self.position == self.goal:
# on goal now
self.reward = self.REWARD_GOAL
self.info['explanation'] = 'REWARD_GOAL'
self.done = 1
elif self.position < self.goal:
# moving away from goal
self.reward = self.REWARD_AWAY
self.info['explanation'] = 'REWARD_AWAY'
else:
# moving toward goal
self.reward = self.REWARD_STEP
self.info['explanation'] = 'REWARD_STEP'
elif action == self.MOVE_RT:
if self.position == self.RT_MAX:
# invalid
self.reward = self.REWARD_AWAY
self.info['explanation'] = 'REWARD_AWAY'
else:
self.position += 1
if self.position == self.goal:
# on goal now
self.reward = self.REWARD_GOAL
self.info['explanation'] = 'REWARD_GOAL'
self.done = 1
elif self.position > self.goal:
# moving away from goal
self.reward = self.REWARD_AWAY
self.info['explanation'] = 'REWARD_AWAY'
else:
# moving toward goal
self.reward = self.REWARD_STEP
self.info['explanation'] = 'REWARD_STEP'
self.state = self.position
self.info["dist"] = self.goal - self.position
try:
assert self.observation_space.contains(self.state)
except AssertionError:
print("INVALID STATE", self.state)
return [self.state, self.reward, self.done, self.info]
def render (self, mode="human"):
s = "position: {:2d} reward: {:2d} info: {}"
print(s.format(self.state, self.reward, self.info))
def close (self):
pass | 0.585457 | 0.167253 |
from collections import namedtuple
'''
This module contains namedtuples that store the necessary parameters for the
real-case tests. Some tuples like as db_params could have also be used as
fixtures. However, to avoid the confusion of using some of parameters as
fixtures and some as namedtuple, all the necessary params are stored in
namedtuple rather than fixture.
'''
def get_aws_params():
AWSParams = namedtuple("AWSParams", [
"aws_key_id", "aws_access_key", "aws_role",
"aws_region", "aws_s3_uri", "aws_conn_name"])
return AWSParams(
aws_key_id="",
aws_access_key="",
aws_role="",
aws_region="eu-central-1",
aws_s3_uri="https://sagemaker-extension-bucket.s3.amazonaws.com",
aws_conn_name="aws_connection",
)
def get_db_params():
DBParams = namedtuple("DBParams", [
"host", "port", "user", "password"])
return DBParams(
host="127.0.0.1",
port="9563",
user="sys",
password="<PASSWORD>"
)
def get_regression_setup_params():
RegressionSetupParams = namedtuple("RegressionSetupParams", [
"schema_name", "table_name", "target_col", "data", "aws_output_path",
"job_name", "endpoint_name", "batch_size"])
return RegressionSetupParams(
schema_name="test_in_db_schema",
table_name="test_reg_table",
target_col="output_col",
data=[f"({i * 1.1}, {i * 1.2}, {i * 10})" for i in range(1, 1000)],
aws_output_path="test_reg_path",
job_name="regtestjob8",
endpoint_name="regtestjobendpoint",
batch_size=10
)
def get_classification_setup_params():
ClassificationSetupParams = namedtuple("ClassificationSetupParams", [
"schema_name", "table_name", "target_col", "data", "aws_output_path",
"job_name", "endpoint_name", "batch_size"])
return ClassificationSetupParams(
schema_name="test_in_db_schema",
table_name="test_cls_table",
target_col="output_col",
data=[f"({i * 1.1}, {i * 1.2}, {i % 2})" for i in range(1, 1000)],
aws_output_path="test_cls_path",
job_name="clstestjob8",
endpoint_name="clstestjobendpoint",
batch_size=10
)
aws_params = get_aws_params()
db_params = get_db_params()
reg_setup_params = get_regression_setup_params()
cls_setup_params = get_classification_setup_params() | tests/integration_tests/utils/parameters.py | from collections import namedtuple
'''
This module contains namedtuples that store the necessary parameters for the
real-case tests. Some tuples like as db_params could have also be used as
fixtures. However, to avoid the confusion of using some of parameters as
fixtures and some as namedtuple, all the necessary params are stored in
namedtuple rather than fixture.
'''
def get_aws_params():
AWSParams = namedtuple("AWSParams", [
"aws_key_id", "aws_access_key", "aws_role",
"aws_region", "aws_s3_uri", "aws_conn_name"])
return AWSParams(
aws_key_id="",
aws_access_key="",
aws_role="",
aws_region="eu-central-1",
aws_s3_uri="https://sagemaker-extension-bucket.s3.amazonaws.com",
aws_conn_name="aws_connection",
)
def get_db_params():
DBParams = namedtuple("DBParams", [
"host", "port", "user", "password"])
return DBParams(
host="127.0.0.1",
port="9563",
user="sys",
password="<PASSWORD>"
)
def get_regression_setup_params():
RegressionSetupParams = namedtuple("RegressionSetupParams", [
"schema_name", "table_name", "target_col", "data", "aws_output_path",
"job_name", "endpoint_name", "batch_size"])
return RegressionSetupParams(
schema_name="test_in_db_schema",
table_name="test_reg_table",
target_col="output_col",
data=[f"({i * 1.1}, {i * 1.2}, {i * 10})" for i in range(1, 1000)],
aws_output_path="test_reg_path",
job_name="regtestjob8",
endpoint_name="regtestjobendpoint",
batch_size=10
)
def get_classification_setup_params():
ClassificationSetupParams = namedtuple("ClassificationSetupParams", [
"schema_name", "table_name", "target_col", "data", "aws_output_path",
"job_name", "endpoint_name", "batch_size"])
return ClassificationSetupParams(
schema_name="test_in_db_schema",
table_name="test_cls_table",
target_col="output_col",
data=[f"({i * 1.1}, {i * 1.2}, {i % 2})" for i in range(1, 1000)],
aws_output_path="test_cls_path",
job_name="clstestjob8",
endpoint_name="clstestjobendpoint",
batch_size=10
)
aws_params = get_aws_params()
db_params = get_db_params()
reg_setup_params = get_regression_setup_params()
cls_setup_params = get_classification_setup_params() | 0.714528 | 0.335514 |
import time
import socket
import struct
import threading
import numpy as np
class BaseReadData(object):
def __init__(self, ip_address='172.16.31.10', sample_rate=1000, buffer_time=30, end_flag_trial=33):
self.collecting = False
self.data = []
self.CHANNELS = [
'FP1', 'FPZ', 'FP2', 'AF3', 'AF4', 'F7', 'F5', 'F3',
'F1', 'FZ', 'F2', 'F4', 'F6', 'F8', 'FT7', 'FC5',
'FC3', 'FC1', 'FCZ', 'FC2', 'FC4', 'FC6', 'FT8', 'T7',
'C5', 'C3', 'C1', 'CZ', 'C2', 'C4', 'C6', 'T8',
'M1', 'TP7', 'CP5', 'CP3', 'CP1', 'CPZ', 'CP2', 'CP4',
'CP6', 'TP8', 'M2', 'P7', 'P5', 'P3', 'P1', 'PZ',
'P2', 'P4', 'P6', 'P8', 'PO7', 'PO5', 'PO3', 'POZ',
'PO4', 'PO6', 'PO8', 'CB1', 'O1', 'OZ', 'O2', 'CB2',
'HEO', 'VEO', 'EKG', 'EMG'
] # 所有导联 其中 M1序号为33 M2序号为43
self.chanum = 66
# self.chanum = len(self.CHANNELS) # 导联数目
print(self.chanum)
self.sample_rate = sample_rate # 原始采样率
self.buffer_time = buffer_time # 缓存区秒数
self.buffer_point = int(
np.round(self.sample_rate * self.buffer_time)) # 缓存区采样点数
self.data_buffer = np.zeros(
(self.chanum + 1, self.buffer_point)) # 数据缓存区
self.per_packet_time = 0.04 # 每个包时长
self.packet_time_point = int(
np.round(self.sample_rate * self.per_packet_time)) # 每个包每导联的时间点数
self.per_packet_bytes = (self.chanum+1) * \
self.packet_time_point*4 # 每个包的最终的字节数
self.ip_address = ip_address
self.port = 4000 # 客户端端口号
self.client = None # 保存生成的客户端
self._unpack_data_fmt = '<' + \
str((self.chanum + 1) * self.packet_time_point) + 'i' # 解码参数
def connect(self):
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SEND_BUF_SIZE = self.per_packet_bytes
RECV_BUF_SIZE = self.per_packet_bytes * 9
self.client.connect((self.ip_address, self.port))
self.client.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.client.setsockopt(
socket.SOL_SOCKET, socket.SO_SNDBUF, SEND_BUF_SIZE)
self.client.setsockopt(
socket.SOL_SOCKET, socket.SO_RCVBUF, RECV_BUF_SIZE)
print('Connect Success')
def start_acq(self):
"""
开始获取数据
"""
self.client.send(struct.pack('12B', 67, 84, 82, 76,
0, 2, 0, 1, 0, 0, 0, 0)) # 开始获取数据
header_packet = self.receive_data(24)
print(header_packet)
print('开始从缓冲区读入数据。')
self.client.send(struct.pack('12B', 67, 84, 82, 76,
0, 3, 0, 3, 0, 0, 0, 0)) # 开始采集
self.data = []
self.collecting = True
t = threading.Thread(target=self.collect)
t.setDaemon(True)
t.start()
def collect(self):
print('Collecting Start.')
while self.collecting:
try:
self.data.append(self.get_data())
except ConnectionAbortedError:
print(
'Connection to the device is closed. This can be normal if collecting is done.')
break
print('Collecting Done.')
def get_all(self):
return np.concatenate(self.data, axis=1)
def receive_data(self, n_bytes):
"""
接收数据
"""
b_data = b''
flag_stop_recv = False
b_count = 0
while not flag_stop_recv:
tmp_bytes = self.client.recv(n_bytes - b_count)
if b_count == n_bytes or not tmp_bytes:
flag_stop_recv = True
b_count += len(tmp_bytes)
b_data += tmp_bytes
return b_data
def stop_acq(self):
"""
结束获取数据
"""
self.collecting = False
self.client.send(struct.pack('12B', 67, 84, 82, 76,
0, 3, 0, 4, 0, 0, 0, 0)) # 结束采集
time.sleep(0.001)
self.client.send(struct.pack('12B', 67, 84, 82, 76,
0, 2, 0, 2, 0, 0, 0, 0)) # 结束获取数据
self.client.send(struct.pack('12B', 67, 84, 82, 76,
0, 1, 0, 2, 0, 0, 0, 0)) # 关闭连接
self.client.close()
print('结束从缓冲区读入数据。')
def _unpack_header(self, header_packet):
"""
解码头部
"""
chan_name = struct.unpack('>4s', header_packet[:4])
w_code = struct.unpack('>H', header_packet[4:6])
w_request = struct.unpack('>H', header_packet[6:8])
packet_size = struct.unpack('>I', header_packet[8:])
return (chan_name[0], w_code[0], w_request[0], packet_size[0])
def _unpack_data(self, data_packet):
"""
解码数据
"""
data_trans = np.asarray(struct.unpack(
self._unpack_data_fmt, data_packet)).reshape((-1, self.chanum + 1)).T
return data_trans
def get_data(self):
"""
获取数据
"""
tmp_header = self.receive_data(12)
details_header = self._unpack_header(tmp_header)
if details_header[-1] == self.per_packet_bytes:
pass
else:
print(
f'Warning, received data has {details_header[-1]} bytes, and required data should have {self.per_packet_bytes} bytes. The EEG channels setting may be incorrect')
bytes_data = self.receive_data(self.per_packet_bytes)
new_data_trans = self._unpack_data(bytes_data)
new_data_temp = np.empty(new_data_trans.shape, dtype=np.float)
new_data_temp[:-1, :] = new_data_trans[:-1, :] * 0.0298 # 单位 uV
new_data_temp[-1, :] = np.zeros(new_data_trans.shape[1])
self.new_data = new_data_temp
return new_data_temp
if __name__ == '__main__':
client = BaseReadData()
client.connect()
time.sleep(2)
client.start_acq()
for _ in range(5):
time.sleep(2)
print('-------------------------')
print(client.get_all().shape)
time.sleep(2)
client.stop_acq()
time.sleep(1)
client.client.close() | TCPServer/neuroScanClient.py | import time
import socket
import struct
import threading
import numpy as np
class BaseReadData(object):
def __init__(self, ip_address='172.16.31.10', sample_rate=1000, buffer_time=30, end_flag_trial=33):
self.collecting = False
self.data = []
self.CHANNELS = [
'FP1', 'FPZ', 'FP2', 'AF3', 'AF4', 'F7', 'F5', 'F3',
'F1', 'FZ', 'F2', 'F4', 'F6', 'F8', 'FT7', 'FC5',
'FC3', 'FC1', 'FCZ', 'FC2', 'FC4', 'FC6', 'FT8', 'T7',
'C5', 'C3', 'C1', 'CZ', 'C2', 'C4', 'C6', 'T8',
'M1', 'TP7', 'CP5', 'CP3', 'CP1', 'CPZ', 'CP2', 'CP4',
'CP6', 'TP8', 'M2', 'P7', 'P5', 'P3', 'P1', 'PZ',
'P2', 'P4', 'P6', 'P8', 'PO7', 'PO5', 'PO3', 'POZ',
'PO4', 'PO6', 'PO8', 'CB1', 'O1', 'OZ', 'O2', 'CB2',
'HEO', 'VEO', 'EKG', 'EMG'
] # 所有导联 其中 M1序号为33 M2序号为43
self.chanum = 66
# self.chanum = len(self.CHANNELS) # 导联数目
print(self.chanum)
self.sample_rate = sample_rate # 原始采样率
self.buffer_time = buffer_time # 缓存区秒数
self.buffer_point = int(
np.round(self.sample_rate * self.buffer_time)) # 缓存区采样点数
self.data_buffer = np.zeros(
(self.chanum + 1, self.buffer_point)) # 数据缓存区
self.per_packet_time = 0.04 # 每个包时长
self.packet_time_point = int(
np.round(self.sample_rate * self.per_packet_time)) # 每个包每导联的时间点数
self.per_packet_bytes = (self.chanum+1) * \
self.packet_time_point*4 # 每个包的最终的字节数
self.ip_address = ip_address
self.port = 4000 # 客户端端口号
self.client = None # 保存生成的客户端
self._unpack_data_fmt = '<' + \
str((self.chanum + 1) * self.packet_time_point) + 'i' # 解码参数
def connect(self):
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SEND_BUF_SIZE = self.per_packet_bytes
RECV_BUF_SIZE = self.per_packet_bytes * 9
self.client.connect((self.ip_address, self.port))
self.client.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.client.setsockopt(
socket.SOL_SOCKET, socket.SO_SNDBUF, SEND_BUF_SIZE)
self.client.setsockopt(
socket.SOL_SOCKET, socket.SO_RCVBUF, RECV_BUF_SIZE)
print('Connect Success')
def start_acq(self):
"""
开始获取数据
"""
self.client.send(struct.pack('12B', 67, 84, 82, 76,
0, 2, 0, 1, 0, 0, 0, 0)) # 开始获取数据
header_packet = self.receive_data(24)
print(header_packet)
print('开始从缓冲区读入数据。')
self.client.send(struct.pack('12B', 67, 84, 82, 76,
0, 3, 0, 3, 0, 0, 0, 0)) # 开始采集
self.data = []
self.collecting = True
t = threading.Thread(target=self.collect)
t.setDaemon(True)
t.start()
def collect(self):
print('Collecting Start.')
while self.collecting:
try:
self.data.append(self.get_data())
except ConnectionAbortedError:
print(
'Connection to the device is closed. This can be normal if collecting is done.')
break
print('Collecting Done.')
def get_all(self):
return np.concatenate(self.data, axis=1)
def receive_data(self, n_bytes):
"""
接收数据
"""
b_data = b''
flag_stop_recv = False
b_count = 0
while not flag_stop_recv:
tmp_bytes = self.client.recv(n_bytes - b_count)
if b_count == n_bytes or not tmp_bytes:
flag_stop_recv = True
b_count += len(tmp_bytes)
b_data += tmp_bytes
return b_data
def stop_acq(self):
"""
结束获取数据
"""
self.collecting = False
self.client.send(struct.pack('12B', 67, 84, 82, 76,
0, 3, 0, 4, 0, 0, 0, 0)) # 结束采集
time.sleep(0.001)
self.client.send(struct.pack('12B', 67, 84, 82, 76,
0, 2, 0, 2, 0, 0, 0, 0)) # 结束获取数据
self.client.send(struct.pack('12B', 67, 84, 82, 76,
0, 1, 0, 2, 0, 0, 0, 0)) # 关闭连接
self.client.close()
print('结束从缓冲区读入数据。')
def _unpack_header(self, header_packet):
"""
解码头部
"""
chan_name = struct.unpack('>4s', header_packet[:4])
w_code = struct.unpack('>H', header_packet[4:6])
w_request = struct.unpack('>H', header_packet[6:8])
packet_size = struct.unpack('>I', header_packet[8:])
return (chan_name[0], w_code[0], w_request[0], packet_size[0])
def _unpack_data(self, data_packet):
"""
解码数据
"""
data_trans = np.asarray(struct.unpack(
self._unpack_data_fmt, data_packet)).reshape((-1, self.chanum + 1)).T
return data_trans
def get_data(self):
"""
获取数据
"""
tmp_header = self.receive_data(12)
details_header = self._unpack_header(tmp_header)
if details_header[-1] == self.per_packet_bytes:
pass
else:
print(
f'Warning, received data has {details_header[-1]} bytes, and required data should have {self.per_packet_bytes} bytes. The EEG channels setting may be incorrect')
bytes_data = self.receive_data(self.per_packet_bytes)
new_data_trans = self._unpack_data(bytes_data)
new_data_temp = np.empty(new_data_trans.shape, dtype=np.float)
new_data_temp[:-1, :] = new_data_trans[:-1, :] * 0.0298 # 单位 uV
new_data_temp[-1, :] = np.zeros(new_data_trans.shape[1])
self.new_data = new_data_temp
return new_data_temp
if __name__ == '__main__':
client = BaseReadData()
client.connect()
time.sleep(2)
client.start_acq()
for _ in range(5):
time.sleep(2)
print('-------------------------')
print(client.get_all().shape)
time.sleep(2)
client.stop_acq()
time.sleep(1)
client.client.close() | 0.219756 | 0.14682 |
import re
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from douban_spider.items import DoubanSpiderItem
class DoubanSpider(CrawlSpider):
name = 'douban'
allowed_domains = ['douban.com']
start_urls = ['https://movie.douban.com/top250']
rules = (
Rule(LinkExtractor(allow=r'.*?start=\d+.*'), follow=True),
Rule(LinkExtractor(allow=r".*/subject/\d+.*"), callback='parse_item', follow=False)
)
def parse_item(self, response):
rank_No = response.xpath("//span[@class='top250-no']/text()").get()
rank = rank_No.split('.')[1]
title = response.xpath("//h1/span/text()").get()
year_text = response.xpath("//span[@class='year']/text()").get()
year = re.sub(r'\(|\)', '', year_text)
infos = response.xpath("//div[@id='info']/span")
director = infos[0].xpath(".//a/text()").get()
screenwriter_list = infos[1].xpath(".//a/text()").getall()
screenwriter = ','.join(screenwriter_list)
stars_list = infos[2].xpath(".//a/text()").getall()
stars = ','.join(stars_list)
types_list = response.xpath("//div[@id='info']/span[@property='v:genre']/text()").getall()
types = ','.join(types_list)
runtime = response.xpath("//span[@property='v:runtime']/text()").get()
IMDb = response.xpath("//div[@id='info']/a[@rel='nofollow']/@href").get()
origin_url = response.url
pub_time = response.xpath("//span[@property='v:initialReleaseDate']/@content").get()
others = response.xpath("//div[@id='info']/text()").getall()
country, language, *_ = [x for x in list(map(lambda x: re.sub(r"\s|/", '', x), others)) if x]
item = DoubanSpiderItem(rank=rank,
title=title,
year=year,
director=director,
screenwriter=screenwriter,
stars=stars,
types=types,
runtime=runtime,
IMDb=IMDb,
origin_url=origin_url,
pub_time=pub_time,
)
yield item | spider_project/douban_spider/douban_spider/spiders/douban.py | import re
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from douban_spider.items import DoubanSpiderItem
class DoubanSpider(CrawlSpider):
name = 'douban'
allowed_domains = ['douban.com']
start_urls = ['https://movie.douban.com/top250']
rules = (
Rule(LinkExtractor(allow=r'.*?start=\d+.*'), follow=True),
Rule(LinkExtractor(allow=r".*/subject/\d+.*"), callback='parse_item', follow=False)
)
def parse_item(self, response):
rank_No = response.xpath("//span[@class='top250-no']/text()").get()
rank = rank_No.split('.')[1]
title = response.xpath("//h1/span/text()").get()
year_text = response.xpath("//span[@class='year']/text()").get()
year = re.sub(r'\(|\)', '', year_text)
infos = response.xpath("//div[@id='info']/span")
director = infos[0].xpath(".//a/text()").get()
screenwriter_list = infos[1].xpath(".//a/text()").getall()
screenwriter = ','.join(screenwriter_list)
stars_list = infos[2].xpath(".//a/text()").getall()
stars = ','.join(stars_list)
types_list = response.xpath("//div[@id='info']/span[@property='v:genre']/text()").getall()
types = ','.join(types_list)
runtime = response.xpath("//span[@property='v:runtime']/text()").get()
IMDb = response.xpath("//div[@id='info']/a[@rel='nofollow']/@href").get()
origin_url = response.url
pub_time = response.xpath("//span[@property='v:initialReleaseDate']/@content").get()
others = response.xpath("//div[@id='info']/text()").getall()
country, language, *_ = [x for x in list(map(lambda x: re.sub(r"\s|/", '', x), others)) if x]
item = DoubanSpiderItem(rank=rank,
title=title,
year=year,
director=director,
screenwriter=screenwriter,
stars=stars,
types=types,
runtime=runtime,
IMDb=IMDb,
origin_url=origin_url,
pub_time=pub_time,
)
yield item | 0.233095 | 0.082033 |
import logging
from rest_framework import generics
from rest_framework.response import Response
from rest_framework import status
from django.shortcuts import get_object_or_404
from cajas.api.CsrfExempt import CsrfExemptSessionAuthentication
from cajas.concepts.models.concepts import Concept
from cajas.inventory.models.brand import Brand
from cajas.loans.services.loan_service import LoanManager
from cajas.office.models.officeCountry import OfficeCountry
from cajas.webclient.views.get_ip import get_ip
from cajas.webclient.views.utils import is_secretary
from ....models.movement_daily_square import MovementDailySquare
from ....services.daily_square_service import MovementDailySquareManager
from ...serializers.movement_daily_square_serializer import MovementDailySquareSerializer
from ....models.movement_daily_square_request_item import MovementDailySquareRequestItem
logger = logging.getLogger(__name__)
daily_square_manager = MovementDailySquareManager()
class UpdateDailySquareMovement(generics.RetrieveUpdateDestroyAPIView):
queryset = MovementDailySquare.objects.all()
serializer_class = MovementDailySquareSerializer
authentication_classes = (CsrfExemptSessionAuthentication,)
def update(self, request, *args, **kwargs):
data = request.POST.copy()
data['pk'] = self.kwargs['pk']
data['responsible'] = request.user
data['ip'] = get_ip(request)
concept = Concept.objects.get(pk=request.POST['concept'])
office_country = OfficeCountry.objects.select_related('office', 'country', 'box').get(
pk=request.session['office']
)
try:
daily_square_manager.update_daily_square_movement(data)
if concept.name == "Compra de Inventario Unidad":
movement = get_object_or_404(MovementDailySquare, pk=kwargs['pk'])
MovementDailySquareRequestItem.objects.filter(movement=movement).delete()
values = request.data["elemts"].split(",")
for value in values:
if request.data["form[form][" + value + "][name]"] == '' or \
request.data["form[form][" + value + "][price]"] == '':
MovementDailySquareRequestItem.objects.create(
movement=movement,
)
else:
if "form[form][" + value + "][is_replacement]" in request.data:
MovementDailySquareRequestItem.objects.create(
movement=movement,
name=request.data["form[form][" + value + "][name]"],
brand=get_object_or_404(Brand, pk=request.data["form[form][" + value + "][brand]"]),
price=request.data["form[form][" + value + "][price]"],
is_replacement=True
)
else:
MovementDailySquareRequestItem.objects.create(
movement=movement,
name=request.data["form[form][" + value + "][name]"],
brand=get_object_or_404(Brand, pk=request.data["form[form][" + value + "][brand]"]),
price=request.data["form[form][" + value + "][price]"]
)
elif concept.name == 'Préstamo empleado':
movement = MovementDailySquare.objects.get(pk=data['pk'])
loan_manager = LoanManager()
value = data['value']
if data['value'] == '':
value = data['loan_value']
if request.user.is_superuser or is_secretary(request.user, office_country):
data_loan = {
'request': request,
'value': value,
'value_cop': 0,
'interest': data['interest'],
'time': data['time'],
'exchange': data['exchange'],
'office': request.session['office'],
'loan_type': 'EMP',
'lender': data['lender_employee'],
'box_from': data['box_from'],
'date': data['date'],
}
if data['box_from'] == 'partner':
data_loan['provider'] = data['partner_provider']
loan_manager.create_employee_loan(data_loan)
movement.review = True
movement.status = MovementDailySquare.APPROVED
movement.save()
return Response(
'Se ha actualizado el movimiento exitosamente',
status=status.HTTP_201_CREATED
)
except Exception as e:
logger.exception(str(e))
print(e)
return Response(
'Se ha alcanzado el tope para este usuario para este concepto. No se ha creado el movimiento.',
status=status.HTTP_204_NO_CONTENT
)
def delete(self, request, *args, **kwargs):
data = request.POST.copy()
data['pk'] = self.kwargs['pk']
daily_square_manager.delete_daily_square_movement(data)
return Response(
'Se ha eliminado el movimiento exitosamente',
status=status.HTTP_201_CREATED
) | cajas/movement/api/views/movement_daily_square/update_movement.py | import logging
from rest_framework import generics
from rest_framework.response import Response
from rest_framework import status
from django.shortcuts import get_object_or_404
from cajas.api.CsrfExempt import CsrfExemptSessionAuthentication
from cajas.concepts.models.concepts import Concept
from cajas.inventory.models.brand import Brand
from cajas.loans.services.loan_service import LoanManager
from cajas.office.models.officeCountry import OfficeCountry
from cajas.webclient.views.get_ip import get_ip
from cajas.webclient.views.utils import is_secretary
from ....models.movement_daily_square import MovementDailySquare
from ....services.daily_square_service import MovementDailySquareManager
from ...serializers.movement_daily_square_serializer import MovementDailySquareSerializer
from ....models.movement_daily_square_request_item import MovementDailySquareRequestItem
logger = logging.getLogger(__name__)
daily_square_manager = MovementDailySquareManager()
class UpdateDailySquareMovement(generics.RetrieveUpdateDestroyAPIView):
queryset = MovementDailySquare.objects.all()
serializer_class = MovementDailySquareSerializer
authentication_classes = (CsrfExemptSessionAuthentication,)
def update(self, request, *args, **kwargs):
data = request.POST.copy()
data['pk'] = self.kwargs['pk']
data['responsible'] = request.user
data['ip'] = get_ip(request)
concept = Concept.objects.get(pk=request.POST['concept'])
office_country = OfficeCountry.objects.select_related('office', 'country', 'box').get(
pk=request.session['office']
)
try:
daily_square_manager.update_daily_square_movement(data)
if concept.name == "Compra de Inventario Unidad":
movement = get_object_or_404(MovementDailySquare, pk=kwargs['pk'])
MovementDailySquareRequestItem.objects.filter(movement=movement).delete()
values = request.data["elemts"].split(",")
for value in values:
if request.data["form[form][" + value + "][name]"] == '' or \
request.data["form[form][" + value + "][price]"] == '':
MovementDailySquareRequestItem.objects.create(
movement=movement,
)
else:
if "form[form][" + value + "][is_replacement]" in request.data:
MovementDailySquareRequestItem.objects.create(
movement=movement,
name=request.data["form[form][" + value + "][name]"],
brand=get_object_or_404(Brand, pk=request.data["form[form][" + value + "][brand]"]),
price=request.data["form[form][" + value + "][price]"],
is_replacement=True
)
else:
MovementDailySquareRequestItem.objects.create(
movement=movement,
name=request.data["form[form][" + value + "][name]"],
brand=get_object_or_404(Brand, pk=request.data["form[form][" + value + "][brand]"]),
price=request.data["form[form][" + value + "][price]"]
)
elif concept.name == 'Préstamo empleado':
movement = MovementDailySquare.objects.get(pk=data['pk'])
loan_manager = LoanManager()
value = data['value']
if data['value'] == '':
value = data['loan_value']
if request.user.is_superuser or is_secretary(request.user, office_country):
data_loan = {
'request': request,
'value': value,
'value_cop': 0,
'interest': data['interest'],
'time': data['time'],
'exchange': data['exchange'],
'office': request.session['office'],
'loan_type': 'EMP',
'lender': data['lender_employee'],
'box_from': data['box_from'],
'date': data['date'],
}
if data['box_from'] == 'partner':
data_loan['provider'] = data['partner_provider']
loan_manager.create_employee_loan(data_loan)
movement.review = True
movement.status = MovementDailySquare.APPROVED
movement.save()
return Response(
'Se ha actualizado el movimiento exitosamente',
status=status.HTTP_201_CREATED
)
except Exception as e:
logger.exception(str(e))
print(e)
return Response(
'Se ha alcanzado el tope para este usuario para este concepto. No se ha creado el movimiento.',
status=status.HTTP_204_NO_CONTENT
)
def delete(self, request, *args, **kwargs):
data = request.POST.copy()
data['pk'] = self.kwargs['pk']
daily_square_manager.delete_daily_square_movement(data)
return Response(
'Se ha eliminado el movimiento exitosamente',
status=status.HTTP_201_CREATED
) | 0.382257 | 0.102529 |
def write_todo(open_file, todo):
line = ' - ' + todo + '\n'
open_file.write(line)
def write_todos_for_module(open_file, todo_list):
for todo in todo_list:
write_todo(open_file, todo)
def write_newline(open_file):
open_file.write('\n')
def format_as_details(open_file, todo_list):
open_file.write(' <details>\n')
open_file.write(' <summary> Todos ({})</summary>\n\n'.format(num_todos))
write_todos_for_module(open_file, todo_list)
open_file.write('\n </details>\n')
def write_header(open_file, title, level=1):
line = '#' * level + ' ' + title + '\n'
open_file.write(line)
def make_filepath_link(filepath):
return '[' + filepath + '](../' + filepath + ')'
def write_filepath(open_file, filepath):
filepath_link = make_filepath_link(filepath)
line = str(i) + '. ' + filepath_link + '\n'
open_file.write(line)
d = {}
with open('todos/todos.txt', 'r') as open_file:
for line in open_file:
if 'TODO' not in line:
continue
email, filepath, todo = line.lstrip('<').replace('>', '#').split('#')
email = email.strip().replace('_', '\_')
filepath = filepath.strip().replace('_', '\_')
todo = todo.strip().replace('_', '\_')
try:
filepath_to_todos = d[email]
except KeyError:
d[email] = {filepath:[todo]}
else:
filepath_to_todos.setdefault(filepath, []).append(todo)
with open('todos/README.md', 'w') as open_file:
for email in d:
write_header(open_file, email, level=1)
for i, filepath in enumerate(d[email], start=1):
write_filepath(open_file, filepath)
todo_list = d[email][filepath]
num_todos = len(todo_list)
if num_todos > 3:
format_as_details(open_file, todo_list)
else:
write_todos_for_module(open_file, todo_list)
write_newline(open_file)
write_newline(open_file)
write_newline(open_file) | todos/aggregate_todos.py |
def write_todo(open_file, todo):
line = ' - ' + todo + '\n'
open_file.write(line)
def write_todos_for_module(open_file, todo_list):
for todo in todo_list:
write_todo(open_file, todo)
def write_newline(open_file):
open_file.write('\n')
def format_as_details(open_file, todo_list):
open_file.write(' <details>\n')
open_file.write(' <summary> Todos ({})</summary>\n\n'.format(num_todos))
write_todos_for_module(open_file, todo_list)
open_file.write('\n </details>\n')
def write_header(open_file, title, level=1):
line = '#' * level + ' ' + title + '\n'
open_file.write(line)
def make_filepath_link(filepath):
return '[' + filepath + '](../' + filepath + ')'
def write_filepath(open_file, filepath):
filepath_link = make_filepath_link(filepath)
line = str(i) + '. ' + filepath_link + '\n'
open_file.write(line)
d = {}
with open('todos/todos.txt', 'r') as open_file:
for line in open_file:
if 'TODO' not in line:
continue
email, filepath, todo = line.lstrip('<').replace('>', '#').split('#')
email = email.strip().replace('_', '\_')
filepath = filepath.strip().replace('_', '\_')
todo = todo.strip().replace('_', '\_')
try:
filepath_to_todos = d[email]
except KeyError:
d[email] = {filepath:[todo]}
else:
filepath_to_todos.setdefault(filepath, []).append(todo)
with open('todos/README.md', 'w') as open_file:
for email in d:
write_header(open_file, email, level=1)
for i, filepath in enumerate(d[email], start=1):
write_filepath(open_file, filepath)
todo_list = d[email][filepath]
num_todos = len(todo_list)
if num_todos > 3:
format_as_details(open_file, todo_list)
else:
write_todos_for_module(open_file, todo_list)
write_newline(open_file)
write_newline(open_file)
write_newline(open_file) | 0.177063 | 0.144752 |
from django.conf import settings
from mighty.functions import make_searchable
from company.backends.search import SearchBackend
from company.choices.fr import LEGALFORM, APE
from io import BytesIO
import base64, pycurl, json, re, logging, time, datetime
logger = logging.getLogger(__name__)
class SearchBackend(SearchBackend):
token_url = 'https://api.insee.fr/token'
siren_url = 'https://api.insee.fr/entreprises/sirene/V3/siren'
siret_url = 'https://api.insee.fr/entreprises/sirene/V3/siret'
since_format = '%Y-%m-%d'
iso_format = '%Y-%m-%dT%H:%M:%S'
error = 5
raw_address = "%(address)s, %(locality)s %(postal_code)s"
def call_webservice(self, url, headers, postfields=None):
buffer = BytesIO() # buffer
c = pycurl.Curl() # ouverture du navigateur
c.setopt(c.URL, url) # définition de l'URL
c.setopt(c.WRITEDATA, buffer) # définition du buffer
c.setopt(c.HTTPHEADER, headers) # Ajoute l'entete d'autorisation avec la concatenation
if postfields:
c.setopt(c.POSTFIELDS, postfields) # ajoute les champs envoyer avec la method POST
try:
c.perform() # execute le navigateur
response_code = c.getinfo(c.RESPONSE_CODE) # récupération du code de réponse http
c.close() # fermeture du navigateur
datas = json.loads(buffer.getvalue())
except Exception as e:
logger.error(buffer.getvalue())
logger.error(e)
self.error-=1
if self.error:
return self.call_webservice(url, headers, postfields)
else:
raise e
return datas, response_code
def get_token(self):
basic = '%s:%s' % (settings.INSEE_KEY, settings.INSEE_SECRET)
basic = base64.b64encode(basic.encode('utf-8')).decode('utf-8')
headers = ["Authorization: Basic %s" % basic]
buffer, response_code = self.call_webservice(self.token_url, headers, "grant_type=client_credentials")
try:
return buffer["access_token"]
except Exception:
return False
def get_companies(self, qreq, number=50, offset=0):
message, companies, total, pages = (False, [], 0, 0)
access_token = self.get_token()
headers = ['Accept: application/json', 'Authorization: Bearer %s' % access_token]
url = "%s?q=%s&nombre=%s&debut=%s&masquerValeursNulles=true" % (self.siret_url, qreq, number, offset)
buffer, response_code = self.call_webservice(url, headers)
if'header' in buffer:
message = False if buffer['header']['message'] == "OK" else buffer['header']['message']
total = buffer['header'].get('total', 0)
pages = round(total/number) if total else 0
if str(response_code)[0] in ["2", "3"]:
for company in buffer.get('etablissements', [buffer['header']]):
logger.debug(company)
new_company = {
'siret': company.get('siret'),
'denomination': company['uniteLegale'].get('denominationUniteLegale', company['uniteLegale'].get('nomUniteLegale')),
'legalform': company['uniteLegale']['categorieJuridiqueUniteLegale'],
'ape': company['uniteLegale']['activitePrincipaleUniteLegale'].replace('.', ''),
'ape_noun': company['uniteLegale']['nomenclatureActivitePrincipaleUniteLegale'],
'since': self.since(company['uniteLegale'].get('dateCreationUniteLegale')),
'category': company['uniteLegale'].get('categorieEntreprise', ''),
'slice_effective': company['uniteLegale'].get('trancheEffectifsUniteLegale', ''),
'siege':company.get('etablissementSiege', False),
'rna': company['uniteLegale'].get('identifiantAssociationUniteLegale', None),
'address': {
'address': ' '.join(filter(None, [
company['adresseEtablissement'].get('numeroVoieEtablissement'),
company['adresseEtablissement'].get('typeVoieEtablissement'),
company['adresseEtablissement'].get('libelleVoieEtablissement')
])),
'complement': company['adresseEtablissement'].get('complementAdresseEtablissement', ''),
'locality': company['adresseEtablissement'].get('libelleCommuneEtablissement',
company['adresseEtablissement'].get('libelleCommuneEtrangerEtablissement', '')),
'postal_code': company['adresseEtablissement'].get('codePostalEtablissement',
company['adresseEtablissement'].get('codeCommuneEtablissement', '')),
'country': company['adresseEtablissement'].get('libellePaysEtrangerEtablissement', 'france').lower(),
'country_code': company['adresseEtablissement'].get('codePaysEtrangerEtablissement', 'fr').lower(),
'cedex': company['adresseEtablissement'].get('libelleCedexEtablissement', ''),
'cedex_code': company['adresseEtablissement'].get('codeCedexEtablissement', ''),
'special': company['adresseEtablissement'].get('distributionSpecialeEtablissement', ''),
'index': company['adresseEtablissement'].get('indiceRepetitionEtablissement', ''),
'nic': company.get('nic')
}
}
new_company['raw_address'] = self.raw_address % (new_company['address'])
new_company['ape_str'] = self.get_ape_str(new_company['ape'])
new_company['legalform_str'] = self.get_legalform_str(new_company['legalform'])
new_company['slice_str'] = self.get_slice_str(new_company['slice_effective'])
companies.append(new_company)
return message, companies, total, pages
else:
if 'fault' in buffer:
if buffer['fault']['code'] == 900804:
sleepfor = 61-datetime.datetime.now().second
i = 0
while i < sleepfor:
logger.info("desc: %s, wait: %s seconds" % (buffer['fault']['description'], sleepfor))
time.sleep(1)
sleepfor-=1
return self.get_companies(qreq, number, offset)
else:
logger.info(buffer)
else:
logger.info("Error encountered but we dont know what")
def get_company_by_siren(self, siren):
return self.get_companies('siren:%s+AND+etablissementSiege:true' % siren)
def get_company_by_rna(self, rna):
return self.get_companies('identifiantAssociationUniteLegale:%s+AND+etablissementSiege:true' % rna)
def get_active_companies(self, number, offset):
return self.get_companies('etatAdministratifUniteLegale:A', number, offset)
def get_company_by_fulltext(self, fulltext):
if len(fulltext) == 10 and fulltext[0] == 'W':
return self.get_company_by_rna(fulltext)
fulltext = re.sub(r"\s+", '-', fulltext)
return self.get_companies('denominationUniteLegale:%s+AND+etatAdministratifUniteLegale:A' % make_searchable(fulltext)) | backends/search/insee.py | from django.conf import settings
from mighty.functions import make_searchable
from company.backends.search import SearchBackend
from company.choices.fr import LEGALFORM, APE
from io import BytesIO
import base64, pycurl, json, re, logging, time, datetime
logger = logging.getLogger(__name__)
class SearchBackend(SearchBackend):
token_url = 'https://api.insee.fr/token'
siren_url = 'https://api.insee.fr/entreprises/sirene/V3/siren'
siret_url = 'https://api.insee.fr/entreprises/sirene/V3/siret'
since_format = '%Y-%m-%d'
iso_format = '%Y-%m-%dT%H:%M:%S'
error = 5
raw_address = "%(address)s, %(locality)s %(postal_code)s"
def call_webservice(self, url, headers, postfields=None):
buffer = BytesIO() # buffer
c = pycurl.Curl() # ouverture du navigateur
c.setopt(c.URL, url) # définition de l'URL
c.setopt(c.WRITEDATA, buffer) # définition du buffer
c.setopt(c.HTTPHEADER, headers) # Ajoute l'entete d'autorisation avec la concatenation
if postfields:
c.setopt(c.POSTFIELDS, postfields) # ajoute les champs envoyer avec la method POST
try:
c.perform() # execute le navigateur
response_code = c.getinfo(c.RESPONSE_CODE) # récupération du code de réponse http
c.close() # fermeture du navigateur
datas = json.loads(buffer.getvalue())
except Exception as e:
logger.error(buffer.getvalue())
logger.error(e)
self.error-=1
if self.error:
return self.call_webservice(url, headers, postfields)
else:
raise e
return datas, response_code
def get_token(self):
basic = '%s:%s' % (settings.INSEE_KEY, settings.INSEE_SECRET)
basic = base64.b64encode(basic.encode('utf-8')).decode('utf-8')
headers = ["Authorization: Basic %s" % basic]
buffer, response_code = self.call_webservice(self.token_url, headers, "grant_type=client_credentials")
try:
return buffer["access_token"]
except Exception:
return False
def get_companies(self, qreq, number=50, offset=0):
message, companies, total, pages = (False, [], 0, 0)
access_token = self.get_token()
headers = ['Accept: application/json', 'Authorization: Bearer %s' % access_token]
url = "%s?q=%s&nombre=%s&debut=%s&masquerValeursNulles=true" % (self.siret_url, qreq, number, offset)
buffer, response_code = self.call_webservice(url, headers)
if'header' in buffer:
message = False if buffer['header']['message'] == "OK" else buffer['header']['message']
total = buffer['header'].get('total', 0)
pages = round(total/number) if total else 0
if str(response_code)[0] in ["2", "3"]:
for company in buffer.get('etablissements', [buffer['header']]):
logger.debug(company)
new_company = {
'siret': company.get('siret'),
'denomination': company['uniteLegale'].get('denominationUniteLegale', company['uniteLegale'].get('nomUniteLegale')),
'legalform': company['uniteLegale']['categorieJuridiqueUniteLegale'],
'ape': company['uniteLegale']['activitePrincipaleUniteLegale'].replace('.', ''),
'ape_noun': company['uniteLegale']['nomenclatureActivitePrincipaleUniteLegale'],
'since': self.since(company['uniteLegale'].get('dateCreationUniteLegale')),
'category': company['uniteLegale'].get('categorieEntreprise', ''),
'slice_effective': company['uniteLegale'].get('trancheEffectifsUniteLegale', ''),
'siege':company.get('etablissementSiege', False),
'rna': company['uniteLegale'].get('identifiantAssociationUniteLegale', None),
'address': {
'address': ' '.join(filter(None, [
company['adresseEtablissement'].get('numeroVoieEtablissement'),
company['adresseEtablissement'].get('typeVoieEtablissement'),
company['adresseEtablissement'].get('libelleVoieEtablissement')
])),
'complement': company['adresseEtablissement'].get('complementAdresseEtablissement', ''),
'locality': company['adresseEtablissement'].get('libelleCommuneEtablissement',
company['adresseEtablissement'].get('libelleCommuneEtrangerEtablissement', '')),
'postal_code': company['adresseEtablissement'].get('codePostalEtablissement',
company['adresseEtablissement'].get('codeCommuneEtablissement', '')),
'country': company['adresseEtablissement'].get('libellePaysEtrangerEtablissement', 'france').lower(),
'country_code': company['adresseEtablissement'].get('codePaysEtrangerEtablissement', 'fr').lower(),
'cedex': company['adresseEtablissement'].get('libelleCedexEtablissement', ''),
'cedex_code': company['adresseEtablissement'].get('codeCedexEtablissement', ''),
'special': company['adresseEtablissement'].get('distributionSpecialeEtablissement', ''),
'index': company['adresseEtablissement'].get('indiceRepetitionEtablissement', ''),
'nic': company.get('nic')
}
}
new_company['raw_address'] = self.raw_address % (new_company['address'])
new_company['ape_str'] = self.get_ape_str(new_company['ape'])
new_company['legalform_str'] = self.get_legalform_str(new_company['legalform'])
new_company['slice_str'] = self.get_slice_str(new_company['slice_effective'])
companies.append(new_company)
return message, companies, total, pages
else:
if 'fault' in buffer:
if buffer['fault']['code'] == 900804:
sleepfor = 61-datetime.datetime.now().second
i = 0
while i < sleepfor:
logger.info("desc: %s, wait: %s seconds" % (buffer['fault']['description'], sleepfor))
time.sleep(1)
sleepfor-=1
return self.get_companies(qreq, number, offset)
else:
logger.info(buffer)
else:
logger.info("Error encountered but we dont know what")
def get_company_by_siren(self, siren):
return self.get_companies('siren:%s+AND+etablissementSiege:true' % siren)
def get_company_by_rna(self, rna):
return self.get_companies('identifiantAssociationUniteLegale:%s+AND+etablissementSiege:true' % rna)
def get_active_companies(self, number, offset):
return self.get_companies('etatAdministratifUniteLegale:A', number, offset)
def get_company_by_fulltext(self, fulltext):
if len(fulltext) == 10 and fulltext[0] == 'W':
return self.get_company_by_rna(fulltext)
fulltext = re.sub(r"\s+", '-', fulltext)
return self.get_companies('denominationUniteLegale:%s+AND+etatAdministratifUniteLegale:A' % make_searchable(fulltext)) | 0.187765 | 0.095518 |
import numpy as np
import json, pickle
out_dir = '../sample_output/pickles/'
def feature_vector(density_symmetry, roughness_max, roughness_symmetry, filename):
# print(density_symmetry, roughness_max, roughness_symmetry)
arr = np.append(density_symmetry, roughness_max, axis = 0)
arr = np.append(arr, roughness_symmetry, axis = 0)
label = filename[-5]
arr = np.append(arr, label)
return arr
def label_vec(all_vector, file):
labels = (np.asarray(all_vector))[:, [12]]
print(len(labels))
dump(labels, file)
def dump(all_vector, out_filename):
with open(out_dir + out_filename, 'wb') as outfile:
pickle.dump(all_vector, outfile)
def load(filename):
feat_vector = np.load(out_dir + filename)
feat_vector = cleaned(feat_vector)
return feat_vector
def cleaned(feat_vector):
pos = []
neg = []
for i in range(len(feat_vector)):
label = feat_vector[i][-1]
if label == '1':
pos.append(feat_vector[i])
else:
neg.append(feat_vector[i])
# print(len(pos), len(neg))
neg = replace_nan_or_inf(neg)
pos = replace_nan_or_inf(pos)
fv = np.concatenate((neg, pos), axis=0)
print(np.shape(fv))
return fv
def replace_nan_or_inf_mod(arr):
# Removes the rows containing NaN values.
print(len(arr))
nanorinf = []
fine = []
for j in range(0, len(arr)):
for i in range(0, 12):
if np.isnan(float(arr[j][i])) or np.isinf(float(arr[j][i])):
nanorinf.append(j)
break
print(len(nanorinf))
for j in range(0, len(arr)):
if j in nanorinf:
pass
else:
fine.append(j)
fine_mat = [[] for i in range(len(fine))]
for ind, ele in enumerate(fine):
fine_mat[ind] = arr[ele]
# print(fine_mat)
return fine_mat
def replace_nan_or_inf(arr):
fine = []
nanorinf = []
for i in range(0, 12):
for j in range(0, len(arr)):
if np.isnan(float(arr[j][i])) or np.isinf(float(arr[j][i])):
nanorinf.append(j)
else:
fine.append(float(arr[j][i]))
val = np.median(fine)
for ind in nanorinf:
arr[ind][i] = val
fine = []
nanorinf = []
return arr | scripts/save_features.py |
import numpy as np
import json, pickle
out_dir = '../sample_output/pickles/'
def feature_vector(density_symmetry, roughness_max, roughness_symmetry, filename):
# print(density_symmetry, roughness_max, roughness_symmetry)
arr = np.append(density_symmetry, roughness_max, axis = 0)
arr = np.append(arr, roughness_symmetry, axis = 0)
label = filename[-5]
arr = np.append(arr, label)
return arr
def label_vec(all_vector, file):
labels = (np.asarray(all_vector))[:, [12]]
print(len(labels))
dump(labels, file)
def dump(all_vector, out_filename):
with open(out_dir + out_filename, 'wb') as outfile:
pickle.dump(all_vector, outfile)
def load(filename):
feat_vector = np.load(out_dir + filename)
feat_vector = cleaned(feat_vector)
return feat_vector
def cleaned(feat_vector):
pos = []
neg = []
for i in range(len(feat_vector)):
label = feat_vector[i][-1]
if label == '1':
pos.append(feat_vector[i])
else:
neg.append(feat_vector[i])
# print(len(pos), len(neg))
neg = replace_nan_or_inf(neg)
pos = replace_nan_or_inf(pos)
fv = np.concatenate((neg, pos), axis=0)
print(np.shape(fv))
return fv
def replace_nan_or_inf_mod(arr):
# Removes the rows containing NaN values.
print(len(arr))
nanorinf = []
fine = []
for j in range(0, len(arr)):
for i in range(0, 12):
if np.isnan(float(arr[j][i])) or np.isinf(float(arr[j][i])):
nanorinf.append(j)
break
print(len(nanorinf))
for j in range(0, len(arr)):
if j in nanorinf:
pass
else:
fine.append(j)
fine_mat = [[] for i in range(len(fine))]
for ind, ele in enumerate(fine):
fine_mat[ind] = arr[ele]
# print(fine_mat)
return fine_mat
def replace_nan_or_inf(arr):
fine = []
nanorinf = []
for i in range(0, 12):
for j in range(0, len(arr)):
if np.isnan(float(arr[j][i])) or np.isinf(float(arr[j][i])):
nanorinf.append(j)
else:
fine.append(float(arr[j][i]))
val = np.median(fine)
for ind in nanorinf:
arr[ind][i] = val
fine = []
nanorinf = []
return arr | 0.093949 | 0.27648 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('popularity', models.PositiveIntegerField(default=0)),
('content', models.TextField(max_length=600)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-popularity', '-created_at'],
'get_latest_by': 'created_at',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('content', models.TextField()),
('updated_at', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-updated_at'],
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=80)),
],
),
migrations.AddField(
model_name='question',
name='tags',
field=models.ManyToManyField(to='main.Tag'),
),
migrations.AddField(
model_name='question',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='comment',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Question'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
] | main/migrations/0001_initial.py | from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('popularity', models.PositiveIntegerField(default=0)),
('content', models.TextField(max_length=600)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-popularity', '-created_at'],
'get_latest_by': 'created_at',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('content', models.TextField()),
('updated_at', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-updated_at'],
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=80)),
],
),
migrations.AddField(
model_name='question',
name='tags',
field=models.ManyToManyField(to='main.Tag'),
),
migrations.AddField(
model_name='question',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='comment',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Question'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
] | 0.583441 | 0.138491 |
from __future__ import print_function
from unicorn import *
from unicorn.arm_const import *
# code to be emulated
ARM_CODE = b"\xe3\xa0\x00\x37\xe0\x42\x10\x03" # mov r0, #0x37; sub r1, r2, r3
THUMB_CODE = b"\xb0\x83" # sub sp, #0xc
# memory address where emulation starts
ADDRESS = 0x10000
# callback for tracing basic blocks
def hook_block(uc, address, size, user_data):
print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size))
# callback for tracing instructions
def hook_code(uc, address, size, user_data):
print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size))
# Test ARM
def test_arm():
print("Emulate ARM Big-Endian code")
try:
# Initialize emulator in ARM mode
mu = Uc(UC_ARCH_ARM, UC_MODE_ARM | UC_MODE_BIG_ENDIAN)
# map 2MB memory for this emulation
mu.mem_map(ADDRESS, 2 * 1024 * 1024)
# write machine code to be emulated to memory
mu.mem_write(ADDRESS, ARM_CODE)
# initialize machine registers
mu.reg_write(UC_ARM_REG_R0, 0x1234)
mu.reg_write(UC_ARM_REG_R2, 0x6789)
mu.reg_write(UC_ARM_REG_R3, 0x3333)
mu.reg_write(UC_ARM_REG_APSR, 0xFFFFFFFF) #All application flags turned on
# tracing all basic blocks with customized callback
mu.hook_add(UC_HOOK_BLOCK, hook_block)
# tracing one instruction at ADDRESS with customized callback
mu.hook_add(UC_HOOK_CODE, hook_code, begin=ADDRESS, end=ADDRESS)
# emulate machine code in infinite time
mu.emu_start(ADDRESS, ADDRESS + len(ARM_CODE))
# now print out some registers
print(">>> Emulation done. Below is the CPU context")
r0 = mu.reg_read(UC_ARM_REG_R0)
r1 = mu.reg_read(UC_ARM_REG_R1)
print(">>> R0 = 0x%x" %r0)
print(">>> R1 = 0x%x" %r1)
except UcError as e:
print("ERROR: %s" % e)
def test_thumb():
print("Emulate THUMB code")
try:
# Initialize emulator in thumb mode
mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_BIG_ENDIAN)
# map 2MB memory for this emulation
mu.mem_map(ADDRESS, 2 * 1024 * 1024)
# write machine code to be emulated to memory
mu.mem_write(ADDRESS, THUMB_CODE)
# initialize machine registers
mu.reg_write(UC_ARM_REG_SP, 0x1234)
# tracing all basic blocks with customized callback
mu.hook_add(UC_HOOK_BLOCK, hook_block)
# tracing all instructions with customized callback
mu.hook_add(UC_HOOK_CODE, hook_code)
# emulate machine code in infinite time
# Note we start at ADDRESS | 1 to indicate THUMB mode.
mu.emu_start(ADDRESS | 1, ADDRESS + len(THUMB_CODE))
# now print out some registers
print(">>> Emulation done. Below is the CPU context")
sp = mu.reg_read(UC_ARM_REG_SP)
print(">>> SP = 0x%x" %sp)
except UcError as e:
print("ERROR: %s" % e)
if __name__ == '__main__':
test_arm()
print("=" * 26)
test_thumb() | unicorn/bindings/python/sample_armeb.py |
from __future__ import print_function
from unicorn import *
from unicorn.arm_const import *
# code to be emulated
ARM_CODE = b"\xe3\xa0\x00\x37\xe0\x42\x10\x03" # mov r0, #0x37; sub r1, r2, r3
THUMB_CODE = b"\xb0\x83" # sub sp, #0xc
# memory address where emulation starts
ADDRESS = 0x10000
# callback for tracing basic blocks
def hook_block(uc, address, size, user_data):
print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size))
# callback for tracing instructions
def hook_code(uc, address, size, user_data):
print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size))
# Test ARM
def test_arm():
print("Emulate ARM Big-Endian code")
try:
# Initialize emulator in ARM mode
mu = Uc(UC_ARCH_ARM, UC_MODE_ARM | UC_MODE_BIG_ENDIAN)
# map 2MB memory for this emulation
mu.mem_map(ADDRESS, 2 * 1024 * 1024)
# write machine code to be emulated to memory
mu.mem_write(ADDRESS, ARM_CODE)
# initialize machine registers
mu.reg_write(UC_ARM_REG_R0, 0x1234)
mu.reg_write(UC_ARM_REG_R2, 0x6789)
mu.reg_write(UC_ARM_REG_R3, 0x3333)
mu.reg_write(UC_ARM_REG_APSR, 0xFFFFFFFF) #All application flags turned on
# tracing all basic blocks with customized callback
mu.hook_add(UC_HOOK_BLOCK, hook_block)
# tracing one instruction at ADDRESS with customized callback
mu.hook_add(UC_HOOK_CODE, hook_code, begin=ADDRESS, end=ADDRESS)
# emulate machine code in infinite time
mu.emu_start(ADDRESS, ADDRESS + len(ARM_CODE))
# now print out some registers
print(">>> Emulation done. Below is the CPU context")
r0 = mu.reg_read(UC_ARM_REG_R0)
r1 = mu.reg_read(UC_ARM_REG_R1)
print(">>> R0 = 0x%x" %r0)
print(">>> R1 = 0x%x" %r1)
except UcError as e:
print("ERROR: %s" % e)
def test_thumb():
print("Emulate THUMB code")
try:
# Initialize emulator in thumb mode
mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_BIG_ENDIAN)
# map 2MB memory for this emulation
mu.mem_map(ADDRESS, 2 * 1024 * 1024)
# write machine code to be emulated to memory
mu.mem_write(ADDRESS, THUMB_CODE)
# initialize machine registers
mu.reg_write(UC_ARM_REG_SP, 0x1234)
# tracing all basic blocks with customized callback
mu.hook_add(UC_HOOK_BLOCK, hook_block)
# tracing all instructions with customized callback
mu.hook_add(UC_HOOK_CODE, hook_code)
# emulate machine code in infinite time
# Note we start at ADDRESS | 1 to indicate THUMB mode.
mu.emu_start(ADDRESS | 1, ADDRESS + len(THUMB_CODE))
# now print out some registers
print(">>> Emulation done. Below is the CPU context")
sp = mu.reg_read(UC_ARM_REG_SP)
print(">>> SP = 0x%x" %sp)
except UcError as e:
print("ERROR: %s" % e)
if __name__ == '__main__':
test_arm()
print("=" * 26)
test_thumb() | 0.569853 | 0.280296 |
import argparse
import os
import re
def report_coverage_summary(refs, ofile_path):
ofh = open(ofile_path, 'w')
ofh.write("ref_id\tuncovered_bases\tref_length\n")
for ref in refs:
last_pos = 0
c_uncovered = 0
mol_length = refs[ref]['length']
for range in refs[ref]['ranges']:
## this will only be true if the current range doesn't overlap the previous one
if ( range[0] > last_pos ):
c_uncovered += range[0] - last_pos
## now reset the last_pos, as long as this range is past it
if (range[1] > last_pos):
last_pos = range[1]
## now do the last position to the end of the molecule
c_uncovered += mol_length - last_pos
c_perc = ((mol_length - c_uncovered) / mol_length) * 100
ofh.write("{0}\t{1}\t{2}\t{3:.2f}\n".format(ref, c_uncovered, mol_length, c_perc))
def main():
'''
Expectations:
- The 4th line should have the column headers
- show-coords is run with at least the -r, -c and -l options
'''
parser = argparse.ArgumentParser( description='Parses a nucmer coords file and reports a summary of the reference genome coverage')
## output file to be written
parser.add_argument('-c', '--coords_file', type=str, required=True, help='Nucmer alignment coords file, run with the -r, -c and -l options' )
parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created' )
parser.add_argument('-p', '--pct_id_cutoff', type=float, required=False, default=0.0, help='Only consider alignment ranges with a percent identity higher than this value (0-100)' )
args = parser.parse_args()
## lookup for the indexes of needed columns (since show-coords options can make these change positions)
column_idx = {}
current_line_number = 0
## each element key is a reference molecule, with subkeys: ranges[], length
references = {}
current_ref_id = None
for line in open(args.coords_file):
line = line.strip()
current_line_number += 1
if ( current_line_number == 4 ):
cols = re.split('\s{2,}', line)
col_num = 0
for col in cols:
if col != '|' and col != '':
column_idx[col] = col_num
col_num += 1
elif current_line_number > 5:
cols = line.split()
this_ref_id = cols[ column_idx['| [TAGS]'] + 1]
fmin = int(cols[ column_idx['[S1]'] ]) - 1
fmax = int(cols[ column_idx['[E1]'] ])
mol_length = int(cols[ column_idx['[LEN R]'] ])
pct_id = float(cols[ column_idx['[% IDY]'] ])
if current_ref_id == None or this_ref_id != current_ref_id:
current_ref_id = this_ref_id
references[current_ref_id] = { 'ranges': [], 'length': mol_length }
if pct_id >= args.pct_id_cutoff:
references[current_ref_id]['ranges'].append( [fmin, fmax] )
report_coverage_summary(references, args.output_file)
if __name__ == '__main__':
main() | sandbox/jorvis/nucmer_genome_coverage_summary.py |
import argparse
import os
import re
def report_coverage_summary(refs, ofile_path):
ofh = open(ofile_path, 'w')
ofh.write("ref_id\tuncovered_bases\tref_length\n")
for ref in refs:
last_pos = 0
c_uncovered = 0
mol_length = refs[ref]['length']
for range in refs[ref]['ranges']:
## this will only be true if the current range doesn't overlap the previous one
if ( range[0] > last_pos ):
c_uncovered += range[0] - last_pos
## now reset the last_pos, as long as this range is past it
if (range[1] > last_pos):
last_pos = range[1]
## now do the last position to the end of the molecule
c_uncovered += mol_length - last_pos
c_perc = ((mol_length - c_uncovered) / mol_length) * 100
ofh.write("{0}\t{1}\t{2}\t{3:.2f}\n".format(ref, c_uncovered, mol_length, c_perc))
def main():
'''
Expectations:
- The 4th line should have the column headers
- show-coords is run with at least the -r, -c and -l options
'''
parser = argparse.ArgumentParser( description='Parses a nucmer coords file and reports a summary of the reference genome coverage')
## output file to be written
parser.add_argument('-c', '--coords_file', type=str, required=True, help='Nucmer alignment coords file, run with the -r, -c and -l options' )
parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created' )
parser.add_argument('-p', '--pct_id_cutoff', type=float, required=False, default=0.0, help='Only consider alignment ranges with a percent identity higher than this value (0-100)' )
args = parser.parse_args()
## lookup for the indexes of needed columns (since show-coords options can make these change positions)
column_idx = {}
current_line_number = 0
## each element key is a reference molecule, with subkeys: ranges[], length
references = {}
current_ref_id = None
for line in open(args.coords_file):
line = line.strip()
current_line_number += 1
if ( current_line_number == 4 ):
cols = re.split('\s{2,}', line)
col_num = 0
for col in cols:
if col != '|' and col != '':
column_idx[col] = col_num
col_num += 1
elif current_line_number > 5:
cols = line.split()
this_ref_id = cols[ column_idx['| [TAGS]'] + 1]
fmin = int(cols[ column_idx['[S1]'] ]) - 1
fmax = int(cols[ column_idx['[E1]'] ])
mol_length = int(cols[ column_idx['[LEN R]'] ])
pct_id = float(cols[ column_idx['[% IDY]'] ])
if current_ref_id == None or this_ref_id != current_ref_id:
current_ref_id = this_ref_id
references[current_ref_id] = { 'ranges': [], 'length': mol_length }
if pct_id >= args.pct_id_cutoff:
references[current_ref_id]['ranges'].append( [fmin, fmax] )
report_coverage_summary(references, args.output_file)
if __name__ == '__main__':
main() | 0.340705 | 0.376938 |
import uuid
from typing import Any, Callable, List, Optional
import structlog
from flask import Flask, jsonify
from flask_injector import FlaskInjector
from flask_migrate import Migrate
from flask_restx import Api
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import CSRFProtect
from sqlalchemy import MetaData
from structlog.stdlib import BoundLogger
from .__version__ import __version__ as API_VERSION
LOGGER: BoundLogger = structlog.stdlib.get_logger()
csrf: CSRFProtect = CSRFProtect()
db: SQLAlchemy = SQLAlchemy(
metadata=MetaData(
naming_convention={
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s",
}
)
)
migrate: Migrate = Migrate()
def create_app(env: Optional[str] = None, inject_dependencies: bool = True):
"""Creates and configures a fresh instance of the Dioptra REST API.
Args:
env: The configuration environment to use for the application. The allowed
values are `"dev"`, `"prod"` and `"test"`. If `None`, the `"test"`
configuration is used. The default is `None`.
inject_dependencies: Controls whether or not the dependency injection settings
in the ``dependencies.py`` files will be used. If `False`, then dependency
injection is not used and the configuration of the shared services must be
handled after the :py:class:`~flask.Flask` object is created. This is mostly
useful when performing unit tests. The default is `True`.
Returns:
An initialized and configured :py:class:`~flask.Flask` object.
"""
from .config import config_by_name
from .dependencies import bind_dependencies, register_providers
from .errors import register_error_handlers
from .routes import register_routes
if env is None:
env = "test"
app: Flask = Flask(__name__)
app.config.from_object(config_by_name[env])
api: Api = Api(
app,
title="Dioptra REST API",
version=API_VERSION,
)
modules: List[Callable[..., Any]] = [bind_dependencies]
register_routes(api, app)
register_error_handlers(api)
register_providers(modules)
csrf.init_app(app)
db.init_app(app)
with app.app_context():
migrate.init_app(app, db, render_as_batch=True)
@app.route("/health")
def health():
"""An endpoint for monitoring if the REST API is responding to requests."""
log = LOGGER.new(request_id=str(uuid.uuid4())) # noqa: F841
return jsonify("healthy")
if not inject_dependencies:
return app
FlaskInjector(app=app, modules=modules)
return app | src/mitre/securingai/restapi/app.py | import uuid
from typing import Any, Callable, List, Optional
import structlog
from flask import Flask, jsonify
from flask_injector import FlaskInjector
from flask_migrate import Migrate
from flask_restx import Api
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import CSRFProtect
from sqlalchemy import MetaData
from structlog.stdlib import BoundLogger
from .__version__ import __version__ as API_VERSION
LOGGER: BoundLogger = structlog.stdlib.get_logger()
csrf: CSRFProtect = CSRFProtect()
db: SQLAlchemy = SQLAlchemy(
metadata=MetaData(
naming_convention={
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s",
}
)
)
migrate: Migrate = Migrate()
def create_app(env: Optional[str] = None, inject_dependencies: bool = True):
"""Creates and configures a fresh instance of the Dioptra REST API.
Args:
env: The configuration environment to use for the application. The allowed
values are `"dev"`, `"prod"` and `"test"`. If `None`, the `"test"`
configuration is used. The default is `None`.
inject_dependencies: Controls whether or not the dependency injection settings
in the ``dependencies.py`` files will be used. If `False`, then dependency
injection is not used and the configuration of the shared services must be
handled after the :py:class:`~flask.Flask` object is created. This is mostly
useful when performing unit tests. The default is `True`.
Returns:
An initialized and configured :py:class:`~flask.Flask` object.
"""
from .config import config_by_name
from .dependencies import bind_dependencies, register_providers
from .errors import register_error_handlers
from .routes import register_routes
if env is None:
env = "test"
app: Flask = Flask(__name__)
app.config.from_object(config_by_name[env])
api: Api = Api(
app,
title="Dioptra REST API",
version=API_VERSION,
)
modules: List[Callable[..., Any]] = [bind_dependencies]
register_routes(api, app)
register_error_handlers(api)
register_providers(modules)
csrf.init_app(app)
db.init_app(app)
with app.app_context():
migrate.init_app(app, db, render_as_batch=True)
@app.route("/health")
def health():
"""An endpoint for monitoring if the REST API is responding to requests."""
log = LOGGER.new(request_id=str(uuid.uuid4())) # noqa: F841
return jsonify("healthy")
if not inject_dependencies:
return app
FlaskInjector(app=app, modules=modules)
return app | 0.852905 | 0.139924 |
from __future__ import absolute_import, print_function
import os
import sys
import time
import signal
import locale
import unittest
import pyuv
import gruvi
from gruvi.hub import get_hub
from gruvi.errors import Timeout
from gruvi.process import Process, PIPE, DEVNULL
from gruvi.stream import StreamClient
from support import UnitTest
def python_script(args):
"""On Windows, modify *args* so that a script in test/bin is executed
directly via Python rather than via the .exe wrapper."""
# This is needed to make inheriting of handles other than stdio work. My
# assumption is that the .exe wrapper is not passing on these handles (or
# that they are somehow not inheritable).
if not sys.platform.startswith('win'):
return args
if isinstance(args, str):
args = [args]
args[0] = 'bin\\{0}'.format(args[0])
return [sys.executable] + args
class TestProcess(UnitTest):
def test_spawn(self):
# Ensure that spawning a child works.
proc = Process()
proc.spawn('true')
self.assertIsInstance(proc.pid, int)
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
def test_respawn(self):
# Ensure that calling spawn() again after the child has exited works.
proc = Process()
proc.spawn('true')
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertRaises(RuntimeError, proc.spawn, 'true')
proc.close()
proc.spawn('false')
proc.wait()
self.assertEqual(proc.returncode, 1)
proc.close()
def test_spawn_shell(self):
# Ensure spawning a child with shell=True works.
proc = Process()
proc.spawn('true', shell=True)
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
proc.spawn('false', shell=True)
proc.wait()
self.assertEqual(proc.returncode, 1)
proc.close()
def test_spawn_args(self):
# Ensure that passing arguments to our child works.
proc = Process()
proc.spawn(['exitn', '1', '2', '3'])
proc.wait()
self.assertEqual(proc.returncode, 6)
proc.close()
def test_spawn_shell_args(self):
# Ensure that passing arguments to our child works with shell=True.
proc = Process()
proc.spawn('exitn 1 2 3', shell=True)
proc.wait()
self.assertEqual(proc.returncode, 6)
proc.close()
def test_spawn_executable(self):
# Ensure that spawn honors the executable argument.
proc = Process()
proc.spawn(['exit', '1'], executable='true')
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
def test_spawn_shell_executable(self):
# Ensure that spawn honors the executable argument with shell=True.
proc = Process()
proc.spawn(['exit 1'], shell=True, executable='echo', stdout=PIPE)
output = proc.stdout.readline().split()
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertEqual(output[0], b'-c' if hasattr(os, 'fork') else b'/c')
self.assertEqual(output[1], b'exit')
self.assertEqual(output[2], b'1')
proc.close()
def test_exit(self):
# Ensure that the child's exist status is correctly reported.
proc = Process()
proc.spawn(['exitn', '5'])
proc.wait()
self.assertEqual(proc.returncode, 5)
proc.close()
def test_spawn_cwd(self):
# Ensure that the "cwd" argument to spawn is effective.
proc = Process()
encoding = locale.getpreferredencoding()
curdir = os.getcwd()
tempdir = self.tempdir
self.assertNotEqual(tempdir, curdir)
proc.spawn('pwd', stdout=PIPE)
childdir = proc.stdout.readline().rstrip().decode(encoding)
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertEqual(childdir, curdir)
proc.close()
proc = Process()
proc.spawn('pwd', stdout=PIPE, cwd=tempdir)
childdir = proc.stdout.readline().rstrip().decode(encoding)
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertEqual(childdir, tempdir)
proc.close()
def test_spawn_env(self):
# Ensure that the "env" argument to spawn is effective.
proc = Process()
encoding = locale.getpreferredencoding()
env = os.environ.copy()
env['FOO'] = 'Bar'
self.assertNotEqual(os.environ.get('FOO'), env['FOO'])
proc.spawn(['echo', '$FOO'], stdout=PIPE, env=env)
value = proc.stdout.readline().rstrip().decode(encoding)
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertEqual(value, env['FOO'])
proc.close()
def test_returncode(self):
# Ensure that the returncode attribute gets set when the child exits.
proc = Process()
proc.spawn(['sleep', '0.2'])
tries = 0
while True:
if proc.returncode is not None:
break
tries += 1
gruvi.sleep(0.02)
self.assertEqual(proc.returncode, 0)
self.assertGreater(tries, 5)
proc.close()
def test_wait(self):
# Ensure that wait() waits for the child to exit.
proc = Process()
t0 = time.time()
proc.spawn(['sleep', '0.2'])
proc.wait()
t1 = time.time()
self.assertGreater(t1-t0, 0.2)
self.assertEqual(proc.returncode, 0)
proc.close()
def test_wait_timeout(self):
# Ensure that wait() honors its "timeout" argument.
proc = Process()
proc.spawn(['sleep', '10'])
self.assertRaises(Timeout, proc.wait, 0.1)
proc.terminate()
proc.wait()
proc.close()
def test_stdout(self):
# Ensure that it's possible to capure stdout using stdout=PIPE
proc = Process()
proc.spawn('catn', stdin=PIPE, stdout=PIPE)
proc.stdin.write(b'Foo\n')
proc.stdin.close()
self.assertEqual(proc.stdout.readline(), b'Foo\n')
self.assertEqual(proc.stdout.readline(), b'')
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
def test_stderr(self):
# Ensure that it's possible to capure stderr using stderr=PIPE
proc = Process()
proc.spawn(['catn', '0', '2'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
proc.stdin.write(b'Foo\n')
proc.stdin.close()
self.assertEqual(proc.stderr.readline(), b'Foo\n')
self.assertEqual(proc.stderr.readline(), b'')
self.assertEqual(proc.stdout.readline(), b'')
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
def test_devnull(self):
# Ensure that using stdout=DEVNULL doesn't produce any output.
proc = Process()
proc.spawn('catn', stdin=PIPE, stdout=DEVNULL)
proc.stdin.write(b'Foo\n')
proc.stdin.close()
self.assertIsNone(proc.stdout)
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
def test_stdio_encoding(self):
# Ensure that passing encoding=xxx to the constructor works.
encoding = locale.getpreferredencoding()
proc = Process(encoding=encoding)
proc.spawn('catn', stdin=PIPE, stdout=PIPE)
proc.stdin.write(u'20 \u20ac\n')
proc.stdin.close()
self.assertEqual(proc.stdout.readline(), u'20 \u20ac\n')
self.assertEqual(proc.wait(), 0)
proc.close()
def test_spawn_unicode_args(self):
# Ensure that it's possible to spawn a child with unicode arguments.
proc = Process()
env = os.environ.copy()
# Ensure to have a capable sys.stdout encoding.
env['PYTHONIOENCODING'] = 'utf-8'
proc.spawn(['echo', 'foo', u'\u20ac'], stdout=PIPE, env=env)
# Unsure why a \x00 is present at the end
line = proc.stdout.readline().rstrip()
self.assertEqual(line, u'foo \u20ac'.encode('utf-8'))
proc.wait()
proc.close()
def test_spawn_unicode_env(self):
# Ensure that it's possible to spawn a child with a unicode environment.
proc = Process()
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'utf-8'
env['FOO'] = u'foo \u20ac'
proc.spawn(['echo', '$FOO'], stdout=PIPE, env=env)
line = proc.stdout.readline().rstrip()
self.assertEqual(line, u'foo \u20ac'.encode('utf-8'))
proc.wait()
proc.close()
def test_timeout(self):
# Ensure that the timeout=xxx constructor argument works.
proc = Process(timeout=0.1)
proc.spawn('catn', stdin=PIPE, stdout=PIPE)
self.assertRaises(Timeout, proc.stdout.readline)
proc.stdin.write(b'foo\n')
self.assertEqual(proc.stdout.readline(), b'foo\n')
proc.stdin.close()
self.assertEqual(proc.wait(), 0)
proc.close()
def test_inherit_handle(self):
# Ensure that it's possible to pass a handle to the child.
# Note: The "ipc" flag doubles as a read/write flag.
hub = get_hub()
handle = pyuv.Pipe(hub.loop, True)
proc = Process()
proc.spawn(python_script(['catn', '3']), extra_handles=[handle])
stream = StreamClient()
stream.connect(handle)
stream.write(b'Foo\n')
self.assertEqual(stream.readline(), b'Foo\n')
stream.write_eof()
self.assertEqual(stream.readline(), b'')
stream.close()
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
def test_send_signal(self):
# Ensure we can send a signal to our child using send_signal(0
proc = Process()
proc.spawn(['sleep', '1'])
proc.send_signal(signal.SIGINT)
proc.wait()
self.assertEqual(proc.returncode, -signal.SIGINT)
proc.close()
def test_terminate(self):
# Ensure that terminate() kills our child.
proc = Process()
proc.spawn(['sleep', '1'])
proc.terminate()
proc.wait()
self.assertEqual(proc.returncode, -signal.SIGTERM)
proc.terminate() # should not error
proc.close()
def test_child_exited(self):
# Ensure that the child_exited callback gets called.
proc = Process()
cbargs = []
def on_child_exit(*args):
cbargs.extend(args)
proc.child_exited = on_child_exit
proc.spawn(['sleep', '0'])
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertEqual(cbargs[0], 0)
self.assertEqual(cbargs[1], 0)
proc.close()
def test_send_data(self):
# Test that sending a lot of data works.
proc = Process()
proc.spawn('catn', stdin=PIPE, stdout=PIPE)
buf = b'x' * 1024
nbytes = 10*1024*1024 # Send 10MB
result = [0, 0]
def writer():
while result[0] < nbytes:
towrite = min(1024, nbytes - result[0])
proc.stdin.write(buf[:towrite])
result[0] += towrite
proc.stdin.flush()
proc.stdin.write_eof()
def reader():
while True:
read = len(proc.stdout.read1(1024))
if read == 0:
break
result[1] += read
gruvi.spawn(writer)
gruvi.spawn(reader).join()
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertEqual(result[0], nbytes)
self.assertEqual(result[1], nbytes)
proc.close()
def test_communicate(self):
# Test that communicate() works
proc = Process()
proc.spawn('catn', stdin=PIPE, stdout=PIPE)
buf = b'x' * 1024
stdout, stderr = proc.communicate(buf)
self.assertEqual(stdout, buf)
self.assertEqual(len(stderr), 0)
self.assertEqual(proc.returncode, 0)
proc.close()
def test_communicate_stderr(self):
# Test that communicate() works with stderr
proc = Process()
proc.spawn(['catn', '0', '2'], stdin=PIPE, stderr=PIPE)
buf = b'x' * 1024
stdout, stderr = proc.communicate(buf)
self.assertEqual(len(stdout), 0)
self.assertEqual(stderr, buf)
self.assertEqual(proc.returncode, 0)
proc.close()
def test_communicate_timeout(self):
# Test that communicate() honors its "timeout" argument
proc = Process()
proc.spawn(['sleep', '10'], stdin=PIPE, stdout=PIPE)
buf = b'x' * 1024
self.assertRaises(Timeout, proc.communicate, buf, timeout=0.1)
proc.terminate()
proc.wait()
proc.close()
def test_no_child(self):
# Test method behavior when there is no child.
proc = Process()
self.assertIsNone(proc.pid)
self.assertRaises(RuntimeError, proc.send_signal, signal.SIGTERM)
self.assertRaises(RuntimeError, proc.terminate)
self.assertRaises(RuntimeError, proc.wait)
self.assertRaises(RuntimeError, proc.communicate)
proc.close()
if __name__ == '__main__':
unittest.main() | tests/test_process.py |
from __future__ import absolute_import, print_function
import os
import sys
import time
import signal
import locale
import unittest
import pyuv
import gruvi
from gruvi.hub import get_hub
from gruvi.errors import Timeout
from gruvi.process import Process, PIPE, DEVNULL
from gruvi.stream import StreamClient
from support import UnitTest
def python_script(args):
"""On Windows, modify *args* so that a script in test/bin is executed
directly via Python rather than via the .exe wrapper."""
# This is needed to make inheriting of handles other than stdio work. My
# assumption is that the .exe wrapper is not passing on these handles (or
# that they are somehow not inheritable).
if not sys.platform.startswith('win'):
return args
if isinstance(args, str):
args = [args]
args[0] = 'bin\\{0}'.format(args[0])
return [sys.executable] + args
class TestProcess(UnitTest):
def test_spawn(self):
# Ensure that spawning a child works.
proc = Process()
proc.spawn('true')
self.assertIsInstance(proc.pid, int)
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
def test_respawn(self):
# Ensure that calling spawn() again after the child has exited works.
proc = Process()
proc.spawn('true')
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertRaises(RuntimeError, proc.spawn, 'true')
proc.close()
proc.spawn('false')
proc.wait()
self.assertEqual(proc.returncode, 1)
proc.close()
def test_spawn_shell(self):
# Ensure spawning a child with shell=True works.
proc = Process()
proc.spawn('true', shell=True)
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
proc.spawn('false', shell=True)
proc.wait()
self.assertEqual(proc.returncode, 1)
proc.close()
def test_spawn_args(self):
# Ensure that passing arguments to our child works.
proc = Process()
proc.spawn(['exitn', '1', '2', '3'])
proc.wait()
self.assertEqual(proc.returncode, 6)
proc.close()
def test_spawn_shell_args(self):
# Ensure that passing arguments to our child works with shell=True.
proc = Process()
proc.spawn('exitn 1 2 3', shell=True)
proc.wait()
self.assertEqual(proc.returncode, 6)
proc.close()
def test_spawn_executable(self):
# Ensure that spawn honors the executable argument.
proc = Process()
proc.spawn(['exit', '1'], executable='true')
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
def test_spawn_shell_executable(self):
# Ensure that spawn honors the executable argument with shell=True.
proc = Process()
proc.spawn(['exit 1'], shell=True, executable='echo', stdout=PIPE)
output = proc.stdout.readline().split()
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertEqual(output[0], b'-c' if hasattr(os, 'fork') else b'/c')
self.assertEqual(output[1], b'exit')
self.assertEqual(output[2], b'1')
proc.close()
def test_exit(self):
# Ensure that the child's exist status is correctly reported.
proc = Process()
proc.spawn(['exitn', '5'])
proc.wait()
self.assertEqual(proc.returncode, 5)
proc.close()
def test_spawn_cwd(self):
# Ensure that the "cwd" argument to spawn is effective.
proc = Process()
encoding = locale.getpreferredencoding()
curdir = os.getcwd()
tempdir = self.tempdir
self.assertNotEqual(tempdir, curdir)
proc.spawn('pwd', stdout=PIPE)
childdir = proc.stdout.readline().rstrip().decode(encoding)
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertEqual(childdir, curdir)
proc.close()
proc = Process()
proc.spawn('pwd', stdout=PIPE, cwd=tempdir)
childdir = proc.stdout.readline().rstrip().decode(encoding)
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertEqual(childdir, tempdir)
proc.close()
def test_spawn_env(self):
# Ensure that the "env" argument to spawn is effective.
proc = Process()
encoding = locale.getpreferredencoding()
env = os.environ.copy()
env['FOO'] = 'Bar'
self.assertNotEqual(os.environ.get('FOO'), env['FOO'])
proc.spawn(['echo', '$FOO'], stdout=PIPE, env=env)
value = proc.stdout.readline().rstrip().decode(encoding)
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertEqual(value, env['FOO'])
proc.close()
def test_returncode(self):
# Ensure that the returncode attribute gets set when the child exits.
proc = Process()
proc.spawn(['sleep', '0.2'])
tries = 0
while True:
if proc.returncode is not None:
break
tries += 1
gruvi.sleep(0.02)
self.assertEqual(proc.returncode, 0)
self.assertGreater(tries, 5)
proc.close()
def test_wait(self):
# Ensure that wait() waits for the child to exit.
proc = Process()
t0 = time.time()
proc.spawn(['sleep', '0.2'])
proc.wait()
t1 = time.time()
self.assertGreater(t1-t0, 0.2)
self.assertEqual(proc.returncode, 0)
proc.close()
def test_wait_timeout(self):
# Ensure that wait() honors its "timeout" argument.
proc = Process()
proc.spawn(['sleep', '10'])
self.assertRaises(Timeout, proc.wait, 0.1)
proc.terminate()
proc.wait()
proc.close()
def test_stdout(self):
# Ensure that it's possible to capure stdout using stdout=PIPE
proc = Process()
proc.spawn('catn', stdin=PIPE, stdout=PIPE)
proc.stdin.write(b'Foo\n')
proc.stdin.close()
self.assertEqual(proc.stdout.readline(), b'Foo\n')
self.assertEqual(proc.stdout.readline(), b'')
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
def test_stderr(self):
# Ensure that it's possible to capure stderr using stderr=PIPE
proc = Process()
proc.spawn(['catn', '0', '2'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
proc.stdin.write(b'Foo\n')
proc.stdin.close()
self.assertEqual(proc.stderr.readline(), b'Foo\n')
self.assertEqual(proc.stderr.readline(), b'')
self.assertEqual(proc.stdout.readline(), b'')
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
def test_devnull(self):
# Ensure that using stdout=DEVNULL doesn't produce any output.
proc = Process()
proc.spawn('catn', stdin=PIPE, stdout=DEVNULL)
proc.stdin.write(b'Foo\n')
proc.stdin.close()
self.assertIsNone(proc.stdout)
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
def test_stdio_encoding(self):
# Ensure that passing encoding=xxx to the constructor works.
encoding = locale.getpreferredencoding()
proc = Process(encoding=encoding)
proc.spawn('catn', stdin=PIPE, stdout=PIPE)
proc.stdin.write(u'20 \u20ac\n')
proc.stdin.close()
self.assertEqual(proc.stdout.readline(), u'20 \u20ac\n')
self.assertEqual(proc.wait(), 0)
proc.close()
def test_spawn_unicode_args(self):
# Ensure that it's possible to spawn a child with unicode arguments.
proc = Process()
env = os.environ.copy()
# Ensure to have a capable sys.stdout encoding.
env['PYTHONIOENCODING'] = 'utf-8'
proc.spawn(['echo', 'foo', u'\u20ac'], stdout=PIPE, env=env)
# Unsure why a \x00 is present at the end
line = proc.stdout.readline().rstrip()
self.assertEqual(line, u'foo \u20ac'.encode('utf-8'))
proc.wait()
proc.close()
def test_spawn_unicode_env(self):
# Ensure that it's possible to spawn a child with a unicode environment.
proc = Process()
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'utf-8'
env['FOO'] = u'foo \u20ac'
proc.spawn(['echo', '$FOO'], stdout=PIPE, env=env)
line = proc.stdout.readline().rstrip()
self.assertEqual(line, u'foo \u20ac'.encode('utf-8'))
proc.wait()
proc.close()
def test_timeout(self):
# Ensure that the timeout=xxx constructor argument works.
proc = Process(timeout=0.1)
proc.spawn('catn', stdin=PIPE, stdout=PIPE)
self.assertRaises(Timeout, proc.stdout.readline)
proc.stdin.write(b'foo\n')
self.assertEqual(proc.stdout.readline(), b'foo\n')
proc.stdin.close()
self.assertEqual(proc.wait(), 0)
proc.close()
def test_inherit_handle(self):
# Ensure that it's possible to pass a handle to the child.
# Note: The "ipc" flag doubles as a read/write flag.
hub = get_hub()
handle = pyuv.Pipe(hub.loop, True)
proc = Process()
proc.spawn(python_script(['catn', '3']), extra_handles=[handle])
stream = StreamClient()
stream.connect(handle)
stream.write(b'Foo\n')
self.assertEqual(stream.readline(), b'Foo\n')
stream.write_eof()
self.assertEqual(stream.readline(), b'')
stream.close()
proc.wait()
self.assertEqual(proc.returncode, 0)
proc.close()
def test_send_signal(self):
# Ensure we can send a signal to our child using send_signal(0
proc = Process()
proc.spawn(['sleep', '1'])
proc.send_signal(signal.SIGINT)
proc.wait()
self.assertEqual(proc.returncode, -signal.SIGINT)
proc.close()
def test_terminate(self):
# Ensure that terminate() kills our child.
proc = Process()
proc.spawn(['sleep', '1'])
proc.terminate()
proc.wait()
self.assertEqual(proc.returncode, -signal.SIGTERM)
proc.terminate() # should not error
proc.close()
def test_child_exited(self):
# Ensure that the child_exited callback gets called.
proc = Process()
cbargs = []
def on_child_exit(*args):
cbargs.extend(args)
proc.child_exited = on_child_exit
proc.spawn(['sleep', '0'])
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertEqual(cbargs[0], 0)
self.assertEqual(cbargs[1], 0)
proc.close()
def test_send_data(self):
# Test that sending a lot of data works.
proc = Process()
proc.spawn('catn', stdin=PIPE, stdout=PIPE)
buf = b'x' * 1024
nbytes = 10*1024*1024 # Send 10MB
result = [0, 0]
def writer():
while result[0] < nbytes:
towrite = min(1024, nbytes - result[0])
proc.stdin.write(buf[:towrite])
result[0] += towrite
proc.stdin.flush()
proc.stdin.write_eof()
def reader():
while True:
read = len(proc.stdout.read1(1024))
if read == 0:
break
result[1] += read
gruvi.spawn(writer)
gruvi.spawn(reader).join()
proc.wait()
self.assertEqual(proc.returncode, 0)
self.assertEqual(result[0], nbytes)
self.assertEqual(result[1], nbytes)
proc.close()
def test_communicate(self):
# Test that communicate() works
proc = Process()
proc.spawn('catn', stdin=PIPE, stdout=PIPE)
buf = b'x' * 1024
stdout, stderr = proc.communicate(buf)
self.assertEqual(stdout, buf)
self.assertEqual(len(stderr), 0)
self.assertEqual(proc.returncode, 0)
proc.close()
def test_communicate_stderr(self):
# Test that communicate() works with stderr
proc = Process()
proc.spawn(['catn', '0', '2'], stdin=PIPE, stderr=PIPE)
buf = b'x' * 1024
stdout, stderr = proc.communicate(buf)
self.assertEqual(len(stdout), 0)
self.assertEqual(stderr, buf)
self.assertEqual(proc.returncode, 0)
proc.close()
def test_communicate_timeout(self):
# Test that communicate() honors its "timeout" argument
proc = Process()
proc.spawn(['sleep', '10'], stdin=PIPE, stdout=PIPE)
buf = b'x' * 1024
self.assertRaises(Timeout, proc.communicate, buf, timeout=0.1)
proc.terminate()
proc.wait()
proc.close()
def test_no_child(self):
# Test method behavior when there is no child.
proc = Process()
self.assertIsNone(proc.pid)
self.assertRaises(RuntimeError, proc.send_signal, signal.SIGTERM)
self.assertRaises(RuntimeError, proc.terminate)
self.assertRaises(RuntimeError, proc.wait)
self.assertRaises(RuntimeError, proc.communicate)
proc.close()
if __name__ == '__main__':
unittest.main() | 0.52756 | 0.313105 |
# Import libraries
import os
from docopt import docopt
import pandas as pd
import numpy as np
from sklearn.compose import ColumnTransformer
from sklearn.dummy import DummyClassifier
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import (
OneHotEncoder,
StandardScaler
)
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
cross_validate,
train_test_split,
)
opt = docopt(__doc__)
def main(path_1, path_2, out_dir):
# Read the data and split the data
train_df = pd.read_csv(path_1)
test_df = pd.read_csv(path_2)
X_train, y_train = train_df.drop(["target"], axis=1), train_df["target"]
X_test, y_test = test_df.drop(["target"], axis=1), test_df["target"]
# Indentify feature types and define the transformations
numeric_features = ["fixed acidity", "volatile acidity", "citric acid", "residual sugar", "chlorides", "free sulfur dioxide", "total sulfur dioxide",
"density", "pH", "sulphates", "alcohol"]
binary_features = ["type"]
numeric_transformer = make_pipeline(StandardScaler())
binary_transformer = make_pipeline(OneHotEncoder(drop='if_binary', dtype=int))
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("binary", binary_transformer, binary_features),
])
# Use DummyClassifier as baseline and store the results
results_dict = {}
dummy = DummyClassifier(strategy = "most_frequent")
dummy_score = cross_validate(dummy, X_train, y_train, return_train_score=True)
store_cross_val_results("DummyClassifier", dummy_score, results_dict)
# Comparing different classifiers and store the results
models = {
"Decision Tree": DecisionTreeClassifier(random_state=123),
"RBF SVM": SVC(random_state=123),
"Logistic Regression": LogisticRegression(max_iter=2000, random_state=123),
"Random Forest": RandomForestClassifier(random_state=123)
}
for model, classifier in models.items():
pip = make_pipeline(preprocessor, classifier)
scores = cross_validate(pip, X_train, y_train, n_jobs=-1, return_train_score=True)
store_cross_val_results(model, scores, results_dict)
model_comparison = pd.DataFrame(results_dict).T.reset_index().rename(columns={"index": "model"})
# Pick the Random Forest Classifier as our model and carry out the hyperparameter optimization using RandomizedSearchCV and save the results
rf_pipeline = make_pipeline(
preprocessor, RandomForestClassifier(random_state=123)
)
param_dist = {
"randomforestclassifier__n_estimators": list(range(50,200)),
"randomforestclassifier__max_depth": list(range(2,20)),
}
random_search = RandomizedSearchCV(rf_pipeline, param_distributions=param_dist, n_jobs=-1, n_iter=30, random_state=123)
random_search.fit(X_train, y_train)
hyperpara_result = pd.DataFrame(random_search.cv_results_).sort_values('rank_test_score').head()[["param_randomforestclassifier__max_depth",
"param_randomforestclassifier__n_estimators",
"mean_test_score",
"std_test_score",
"rank_test_score"]]
# Based on the result of RandomizedSearchCV, using the model with best hyperparamters on test set and save the test score
best_n = random_search.best_params_['randomforestclassifier__n_estimators']
best_depth = random_search.best_params_['randomforestclassifier__max_depth']
rf_pipeline = make_pipeline(
preprocessor, RandomForestClassifier(random_state=123, n_estimators=best_n,
max_depth=best_depth))
rf_pipeline.fit(X_train, y_train)
test_score = rf_pipeline.score(X_test, y_test)
summary = {"Test score": [test_score]}
test_score = pd.DataFrame(data = summary)
# Export the test results as csv files
model_comparison.to_csv(out_dir + "model_comparison.csv", index=False)
hyperpara_result.to_csv(out_dir + "hyperparameter_result.csv", index=False)
test_score.to_csv(out_dir + "test_score.csv", index=False)
# Define the helper function to store the cross-validation results for the model
def store_cross_val_results(model_name, scores, results_dict):
"""
Stores mean scores from cross_validate in results_dict for
the given model model_name.
Parameters
----------
model_name :
scikit-learn classification model
scores : dict
object return by `cross_validate`
results_dict: dict
dictionary to store results
Returns
----------
None
"""
results_dict[model_name] = {
"mean_train_accuracy": "{:0.4f}".format(np.mean(scores["train_score"])),
"mean_valid_accuracy": "{:0.4f}".format(np.mean(scores["test_score"])),
"mean_fit_time (s)": "{:0.4f}".format(np.mean(scores["fit_time"])),
"mean_score_time (s)": "{:0.4f}".format(np.mean(scores["score_time"])),
"std_train_score": "{:0.4f}".format(scores["train_score"].std()),
"std_valid_score": "{:0.4f}".format(scores["test_score"].std()),
}
if __name__ == "__main__":
main(opt["--path_1"], opt["--path_2"], opt["--out_dir"]) | src/ml_model.py | # Import libraries
import os
from docopt import docopt
import pandas as pd
import numpy as np
from sklearn.compose import ColumnTransformer
from sklearn.dummy import DummyClassifier
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import (
OneHotEncoder,
StandardScaler
)
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
cross_validate,
train_test_split,
)
opt = docopt(__doc__)
def main(path_1, path_2, out_dir):
# Read the data and split the data
train_df = pd.read_csv(path_1)
test_df = pd.read_csv(path_2)
X_train, y_train = train_df.drop(["target"], axis=1), train_df["target"]
X_test, y_test = test_df.drop(["target"], axis=1), test_df["target"]
# Indentify feature types and define the transformations
numeric_features = ["fixed acidity", "volatile acidity", "citric acid", "residual sugar", "chlorides", "free sulfur dioxide", "total sulfur dioxide",
"density", "pH", "sulphates", "alcohol"]
binary_features = ["type"]
numeric_transformer = make_pipeline(StandardScaler())
binary_transformer = make_pipeline(OneHotEncoder(drop='if_binary', dtype=int))
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("binary", binary_transformer, binary_features),
])
# Use DummyClassifier as baseline and store the results
results_dict = {}
dummy = DummyClassifier(strategy = "most_frequent")
dummy_score = cross_validate(dummy, X_train, y_train, return_train_score=True)
store_cross_val_results("DummyClassifier", dummy_score, results_dict)
# Comparing different classifiers and store the results
models = {
"Decision Tree": DecisionTreeClassifier(random_state=123),
"RBF SVM": SVC(random_state=123),
"Logistic Regression": LogisticRegression(max_iter=2000, random_state=123),
"Random Forest": RandomForestClassifier(random_state=123)
}
for model, classifier in models.items():
pip = make_pipeline(preprocessor, classifier)
scores = cross_validate(pip, X_train, y_train, n_jobs=-1, return_train_score=True)
store_cross_val_results(model, scores, results_dict)
model_comparison = pd.DataFrame(results_dict).T.reset_index().rename(columns={"index": "model"})
# Pick the Random Forest Classifier as our model and carry out the hyperparameter optimization using RandomizedSearchCV and save the results
rf_pipeline = make_pipeline(
preprocessor, RandomForestClassifier(random_state=123)
)
param_dist = {
"randomforestclassifier__n_estimators": list(range(50,200)),
"randomforestclassifier__max_depth": list(range(2,20)),
}
random_search = RandomizedSearchCV(rf_pipeline, param_distributions=param_dist, n_jobs=-1, n_iter=30, random_state=123)
random_search.fit(X_train, y_train)
hyperpara_result = pd.DataFrame(random_search.cv_results_).sort_values('rank_test_score').head()[["param_randomforestclassifier__max_depth",
"param_randomforestclassifier__n_estimators",
"mean_test_score",
"std_test_score",
"rank_test_score"]]
# Based on the result of RandomizedSearchCV, using the model with best hyperparamters on test set and save the test score
best_n = random_search.best_params_['randomforestclassifier__n_estimators']
best_depth = random_search.best_params_['randomforestclassifier__max_depth']
rf_pipeline = make_pipeline(
preprocessor, RandomForestClassifier(random_state=123, n_estimators=best_n,
max_depth=best_depth))
rf_pipeline.fit(X_train, y_train)
test_score = rf_pipeline.score(X_test, y_test)
summary = {"Test score": [test_score]}
test_score = pd.DataFrame(data = summary)
# Export the test results as csv files
model_comparison.to_csv(out_dir + "model_comparison.csv", index=False)
hyperpara_result.to_csv(out_dir + "hyperparameter_result.csv", index=False)
test_score.to_csv(out_dir + "test_score.csv", index=False)
# Define the helper function to store the cross-validation results for the model
def store_cross_val_results(model_name, scores, results_dict):
"""
Stores mean scores from cross_validate in results_dict for
the given model model_name.
Parameters
----------
model_name :
scikit-learn classification model
scores : dict
object return by `cross_validate`
results_dict: dict
dictionary to store results
Returns
----------
None
"""
results_dict[model_name] = {
"mean_train_accuracy": "{:0.4f}".format(np.mean(scores["train_score"])),
"mean_valid_accuracy": "{:0.4f}".format(np.mean(scores["test_score"])),
"mean_fit_time (s)": "{:0.4f}".format(np.mean(scores["fit_time"])),
"mean_score_time (s)": "{:0.4f}".format(np.mean(scores["score_time"])),
"std_train_score": "{:0.4f}".format(scores["train_score"].std()),
"std_valid_score": "{:0.4f}".format(scores["test_score"].std()),
}
if __name__ == "__main__":
main(opt["--path_1"], opt["--path_2"], opt["--out_dir"]) | 0.794624 | 0.428652 |
import json
import urllib.parse
from json import JSONDecodeError
import requests_cache as requests
from neon_utils.logger import LOG
from requests.adapters import HTTPAdapter
from . import AUTH_CONFIG, NeonAPI, request_neon_api
SESSION = requests.CachedSession(backend='memory', cache_name="alpha_vantage")
SESSION.mount('http://', HTTPAdapter(max_retries=8))
SESSION.mount('https://', HTTPAdapter(max_retries=8))
def search_stock_by_name(company: str, **kwargs) -> list:
"""
Queries Alpha Vantage for stocks matching the specified company
:param company: Company name/stock search term
:param kwargs:
'api_key' - optional str api_key to use for query (None to force remote lookup)
'region' - optional preferred region (default `United States`)
:return: list of dict matched stock data
"""
api_key = kwargs.get("api_key", AUTH_CONFIG.get("alpha_vantage", {}).get("api_key"))
region = kwargs.get("region") or "United States"
if api_key:
query_params = {"function": "SYMBOL_SEARCH",
"keywords": company,
"apikey": api_key}
resp = query_alpha_vantage_api(f"https://www.alphavantage.co/query?{urllib.parse.urlencode(query_params)}")
else:
query_params = {**kwargs, **{"api": "symbol", "company": company}}
resp = request_neon_api(NeonAPI.ALPHA_VANTAGE, query_params)
if resp["status_code"] == -1:
data = {"error": resp["content"]}
else:
try:
data = json.loads(resp["content"])
except JSONDecodeError:
data = {"error": "Error decoding response",
"response": resp}
if data.get("Information"):
LOG.warning(data.get("Information"))
# TODO: Handle API Errors DM
if not data.get("bestMatches"):
LOG.warning(f"No matches found for {company}")
return []
filtered_data = [stock for stock in data.get("bestMatches") if stock.get("4. region") == region]
if not filtered_data:
filtered_data = data.get("bestMatches")
data = [{"symbol": stock.get("1. symbol"),
"name": stock.get("2. name"),
"region": stock.get("4. region"),
"currency": stock.get("8. currency")} for stock in filtered_data]
return data
def get_stock_quote(symbol: str, **kwargs) -> dict:
"""
Queries Alpha Vantage for stock information for the specified company
:param symbol: Stock ticker symbol
:param kwargs:
'api_key' - optional str api_key to use for query (None to force remote lookup)
:return: dict stock data
"""
api_key = kwargs.get("api_key", AUTH_CONFIG.get("alpha_vantage", {}).get("api_key"))
if api_key:
query_params = {"function": "GLOBAL_QUOTE",
"symbol": symbol,
"apikey": api_key}
resp = query_alpha_vantage_api(f"https://www.alphavantage.co/query?{urllib.parse.urlencode(query_params)}")
else:
query_params = {**kwargs, **{"api": "quote", "symbol": symbol}}
resp = request_neon_api(NeonAPI.ALPHA_VANTAGE, query_params)
if resp["status_code"] == -1:
data = {"error": resp["content"]}
else:
try:
data = json.loads(resp["content"])
except JSONDecodeError:
data = {"error": "Error decoding response",
"response": resp}
if data.get("Information"):
LOG.warning(data.get("Information"))
# TODO: Handle API Errors DM
if not data.get("Global Quote"):
LOG.warning(f"No data found for {symbol}")
data["error"] = data.get("error") or "No data found"
LOG.error(data)
return data
return {"symbol": data.get("Global Quote")["01. symbol"],
"price": data.get("Global Quote")["05. price"],
"close": data.get("Global Quote")["08. previous close"]}
def query_alpha_vantage_api(url: str) -> dict:
"""
Query the Alpha Vantage API and return the result
:param url: Alpha Vantage API URL to query
:return: dict status_code, content, encoding
"""
if "global_quote" in url.lower():
expiration = 5*60 # Cache quotes for 5 minutes
elif "symbol_search" in url.lower():
expiration = None
else:
LOG.warning(f"Unknown URL request; caching for 15 minutes: {url}")
expiration = 15*60
result = SESSION.get(url, expire_after=expiration)
return {"status_code": result.status_code,
"content": result.content,
"encoding": result.encoding,
"cached": result.from_cache} | neon_utils/service_apis/alpha_vantage.py |
import json
import urllib.parse
from json import JSONDecodeError
import requests_cache as requests
from neon_utils.logger import LOG
from requests.adapters import HTTPAdapter
from . import AUTH_CONFIG, NeonAPI, request_neon_api
SESSION = requests.CachedSession(backend='memory', cache_name="alpha_vantage")
SESSION.mount('http://', HTTPAdapter(max_retries=8))
SESSION.mount('https://', HTTPAdapter(max_retries=8))
def search_stock_by_name(company: str, **kwargs) -> list:
"""
Queries Alpha Vantage for stocks matching the specified company
:param company: Company name/stock search term
:param kwargs:
'api_key' - optional str api_key to use for query (None to force remote lookup)
'region' - optional preferred region (default `United States`)
:return: list of dict matched stock data
"""
api_key = kwargs.get("api_key", AUTH_CONFIG.get("alpha_vantage", {}).get("api_key"))
region = kwargs.get("region") or "United States"
if api_key:
query_params = {"function": "SYMBOL_SEARCH",
"keywords": company,
"apikey": api_key}
resp = query_alpha_vantage_api(f"https://www.alphavantage.co/query?{urllib.parse.urlencode(query_params)}")
else:
query_params = {**kwargs, **{"api": "symbol", "company": company}}
resp = request_neon_api(NeonAPI.ALPHA_VANTAGE, query_params)
if resp["status_code"] == -1:
data = {"error": resp["content"]}
else:
try:
data = json.loads(resp["content"])
except JSONDecodeError:
data = {"error": "Error decoding response",
"response": resp}
if data.get("Information"):
LOG.warning(data.get("Information"))
# TODO: Handle API Errors DM
if not data.get("bestMatches"):
LOG.warning(f"No matches found for {company}")
return []
filtered_data = [stock for stock in data.get("bestMatches") if stock.get("4. region") == region]
if not filtered_data:
filtered_data = data.get("bestMatches")
data = [{"symbol": stock.get("1. symbol"),
"name": stock.get("2. name"),
"region": stock.get("4. region"),
"currency": stock.get("8. currency")} for stock in filtered_data]
return data
def get_stock_quote(symbol: str, **kwargs) -> dict:
"""
Queries Alpha Vantage for stock information for the specified company
:param symbol: Stock ticker symbol
:param kwargs:
'api_key' - optional str api_key to use for query (None to force remote lookup)
:return: dict stock data
"""
api_key = kwargs.get("api_key", AUTH_CONFIG.get("alpha_vantage", {}).get("api_key"))
if api_key:
query_params = {"function": "GLOBAL_QUOTE",
"symbol": symbol,
"apikey": api_key}
resp = query_alpha_vantage_api(f"https://www.alphavantage.co/query?{urllib.parse.urlencode(query_params)}")
else:
query_params = {**kwargs, **{"api": "quote", "symbol": symbol}}
resp = request_neon_api(NeonAPI.ALPHA_VANTAGE, query_params)
if resp["status_code"] == -1:
data = {"error": resp["content"]}
else:
try:
data = json.loads(resp["content"])
except JSONDecodeError:
data = {"error": "Error decoding response",
"response": resp}
if data.get("Information"):
LOG.warning(data.get("Information"))
# TODO: Handle API Errors DM
if not data.get("Global Quote"):
LOG.warning(f"No data found for {symbol}")
data["error"] = data.get("error") or "No data found"
LOG.error(data)
return data
return {"symbol": data.get("Global Quote")["01. symbol"],
"price": data.get("Global Quote")["05. price"],
"close": data.get("Global Quote")["08. previous close"]}
def query_alpha_vantage_api(url: str) -> dict:
"""
Query the Alpha Vantage API and return the result
:param url: Alpha Vantage API URL to query
:return: dict status_code, content, encoding
"""
if "global_quote" in url.lower():
expiration = 5*60 # Cache quotes for 5 minutes
elif "symbol_search" in url.lower():
expiration = None
else:
LOG.warning(f"Unknown URL request; caching for 15 minutes: {url}")
expiration = 15*60
result = SESSION.get(url, expire_after=expiration)
return {"status_code": result.status_code,
"content": result.content,
"encoding": result.encoding,
"cached": result.from_cache} | 0.494873 | 0.187932 |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User,Group
from registration.serializers import UserSerializer
from profBasic.models import profBasic
from profDetailed.models import profDetailed
from studentBasic.models import studentBasic
from studentDetailed.models import studentDetailed
class StudentCreate(APIView):
"""
Creates the student User.
"""
def post(self, request, format='json'):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
if user:
token = Token.objects.create(user=user)
group = Group.objects.get(name='student')
user.groups.add(group)
json = serializer.data
json['token'] = token.key
json['type'] = 'student'
sb = studentBasic.objects.create(username=json['username'])
sd = studentDetailed.objects.create(username=json['username'])
return Response(json, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ProfCreate(APIView):
"""
Creates the professor User.
"""
def post(self, request, format='json'):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
if user:
token = Token.objects.create(user=user)
group = Group.objects.get(name='prof')
user.groups.add(group)
json = serializer.data
json['token'] = token.key
json['type'] = 'prof'
pb = profBasic.objects.create(username=json['username'])
pd = profDetailed.objects.create(username=json['username'])
return Response(json, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ProfRemove(APIView):
"""
Deletes the professor user
"""
def delete(self, request, pk, format='json'):
user = User.objects.get(username=pk)
pb = profBasic.objects.get(pk=pk)
pd = profDetailed.objects.get(pk=pk)
if (user is not None) and (pb is not None) and (pd is not None):
user.delete()
pb.delete()
pd.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(status=status.HTTP_404_NOT_FOUND)
class StudentRemove(APIView):
"""
Deletes the student user
"""
def delete(self, request, pk, format='json'):
user = User.objects.get(username=pk)
sb = studentBasic.objects.get(pk=pk)
sd = studentDetailed.objects.get(pk=pk)
if (user is not None) and (sb is not None) and (sd is not None):
user.delete()
sb.delete()
sd.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(status=status.HTTP_404_NOT_FOUND) | registration/views.py | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User,Group
from registration.serializers import UserSerializer
from profBasic.models import profBasic
from profDetailed.models import profDetailed
from studentBasic.models import studentBasic
from studentDetailed.models import studentDetailed
class StudentCreate(APIView):
"""
Creates the student User.
"""
def post(self, request, format='json'):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
if user:
token = Token.objects.create(user=user)
group = Group.objects.get(name='student')
user.groups.add(group)
json = serializer.data
json['token'] = token.key
json['type'] = 'student'
sb = studentBasic.objects.create(username=json['username'])
sd = studentDetailed.objects.create(username=json['username'])
return Response(json, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ProfCreate(APIView):
"""
Creates the professor User.
"""
def post(self, request, format='json'):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
if user:
token = Token.objects.create(user=user)
group = Group.objects.get(name='prof')
user.groups.add(group)
json = serializer.data
json['token'] = token.key
json['type'] = 'prof'
pb = profBasic.objects.create(username=json['username'])
pd = profDetailed.objects.create(username=json['username'])
return Response(json, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ProfRemove(APIView):
"""
Deletes the professor user
"""
def delete(self, request, pk, format='json'):
user = User.objects.get(username=pk)
pb = profBasic.objects.get(pk=pk)
pd = profDetailed.objects.get(pk=pk)
if (user is not None) and (pb is not None) and (pd is not None):
user.delete()
pb.delete()
pd.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(status=status.HTTP_404_NOT_FOUND)
class StudentRemove(APIView):
"""
Deletes the student user
"""
def delete(self, request, pk, format='json'):
user = User.objects.get(username=pk)
sb = studentBasic.objects.get(pk=pk)
sd = studentDetailed.objects.get(pk=pk)
if (user is not None) and (sb is not None) and (sd is not None):
user.delete()
sb.delete()
sd.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(status=status.HTTP_404_NOT_FOUND) | 0.672009 | 0.091626 |
tz_str = None
sep = ' -- '
testFunctions = True
import datetime as dt
class time:
def now(tz_str):
tz_now = dt.datetime.now(tz_str)
return tz_now
def timestamp():
rightnow = time.now(tz_str)
stamp = str(rightnow)# + ' // TZ: ' + str(tz_str)
return stamp
class logger:
sep = ':'
def println(msg, lvl):
ln_timestamp = '@' + str(time.timestamp())
lvl = '[' + lvl + ']'
ln_str = ln_timestamp + sep + lvl + sep + msg + '\n'
print(ln_str)
class dep_resolve:
def pip_install():
!pip install -U kaggle
!pip install -U transformers==2.9.1
!pip install -U pytorch-lightning==0.7.6
!pip install -U aitextgen
def atg_imports():
import logging
logging.basicConfig(
format="%(asctime)s — %(levelname)s — %(name)s — %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
)
from aitextgen import aitextgen
from aitextgen.colab import mount_gdrive, copy_file_from_gdrive
from aitextgen.TokenDataset import TokenDataset, merge_datasets
from aitextgen.utils import build_gpt2_config
from aitextgen.tokenizers import train_tokenizer
def dep_resolve(skip_pip):
if skip_pip == True:
print('note: skip_pip is set to True, skipping pip.')
elif skip_pip == False:
print('note: skip_pip is set to False, installing dependencies with pip.')
pip_install()
else:
print('note: skip_pip is not set to True or False, skipping pip.')
class kaggle:
def log_err(msg):
logger.println(msg, 'ERR!')
def log_inf(msg)
logger.println(msg, 'INFO')
def kaggle_dl(dataset):
if dataset == 'anime-subtitles':
!mkdir -p /root/.kaggle
!mv kaggle.json /root/.kaggle/kaggle.json
!chmod 600 /root/.kaggle/kaggle.json
!kaggle datasets download -d jef1056/anime-subtitles
!unzip anime-subtitles.zip
!wc -l 'input (Cleaned).txt'
msg = 'input (Cleaned).txt'
log_inf(msg)
elif dataset == 'million-headlines':
!mkdir -p /root/.kaggle
!mv kaggle.json /root/.kaggle/kaggle.json
!chmod 600 /root/.kaggle/kaggle.json
!kaggle datasets download -d therohk/million-headlines
!unzip million-headlines.zip
!wc -l 'abcnews-date-text.csv'
msg = 'abcnews-date-text.csv'
log_inf(msg)
else:
msg = 'No dataset or invalid dataset selected.'
log_err(msg)
def kaggle_del(dataset):
if dataset == 'anime-subtitles':
!rm -rf anime-subtitles.zip
!rm -rf 'Anime Datasets V3.zip'
!rm -rf 'input (Cleaned).txt'
msg = 'Removed dataset: anime-subtitles'
log_inf(msg)
elif dataset == 'million-headlines':
!rm -rf million-headlines.zip
!rm -rf 'abcnews-date-text.csv'
msg = 'Removed dataset: million-headlines'
log_inf(msg)
else:
msg = 'No dataset or invalid dataset selected.'
log_inf(msg)
class housekeeping:
def cleandir(rm_model, rm_tokenizer_data):
if rm_tokenizer_data == True:
!rm -rf aitextgen-merges.txt
!rm -rf aitextgen-vocab.json
logger.println('rm_tokenizer_data set to true, deleting tokenizer data at $PWD/aitextgen-merges.txt and $PWD/aitextgen-vocab.json', 'INFO')
elif rm_tokenizer_data == False:
logger.println('rm_tokenizer_data set to false, skipping tokenizer data deletion.', 'WARN')
if rm_model == True:
logger.println('rm_model is set to true, deleting model stored at trained_model.', 'INFO')
!rm -rf /content/trained_model
elif rm_model == False:
logger.println('rm_model set to False, skipping model deletion.', 'WARN')
else:
logger.println('rm_model not set to True or False, skipping model deletion.', 'WARN')
class load:
def load_csv(pth):
x = pd.read_csv(pth)
return x
def load_json(pth):
x = pd.read_json(pth)
return x
def load_xlsx(pth):
x = pd.read_excel(pth)
return x
def load(pth, load_type):
if load_type == 'csv':
x = load_csv(pth)
return x
elif load_type == 'json':
x = load_json(pth)
return x
elif load_type == 'xlsx':
x = load_excel(pth)
return x
elif load_type == 'excel':
x = load_excel(pth)
return x
else:
logging.println('Incorrect load_type or load_type not found. Returning empty DataFrame', 'ERRR')
x = pd.DataFrame()
return x
class meta:
def test_helpers():
logging.println('Logging and timestamping seems to be working fine, testing functions...', 'TEST')
logging.println('Testing housekeeping.cleandir(False)...', 'TEST')
housekeeping.cleandir(False, False)
def test_decision(run_tests):
if run_tests == True:
logging.println('run_tests set to true, running tests.', 'INFO')
meta.test_helpers()
elif run_tests == False:
logging.println('run_tests set to false, not running any tests on any functions besides logging.', 'WARN')
else:
logging.println('run_tests not set to true or false, not running any tests on any functions besides logging.', 'WARN')
test_decision(testFunctions) | helper_functions.py | tz_str = None
sep = ' -- '
testFunctions = True
import datetime as dt
class time:
def now(tz_str):
tz_now = dt.datetime.now(tz_str)
return tz_now
def timestamp():
rightnow = time.now(tz_str)
stamp = str(rightnow)# + ' // TZ: ' + str(tz_str)
return stamp
class logger:
sep = ':'
def println(msg, lvl):
ln_timestamp = '@' + str(time.timestamp())
lvl = '[' + lvl + ']'
ln_str = ln_timestamp + sep + lvl + sep + msg + '\n'
print(ln_str)
class dep_resolve:
def pip_install():
!pip install -U kaggle
!pip install -U transformers==2.9.1
!pip install -U pytorch-lightning==0.7.6
!pip install -U aitextgen
def atg_imports():
import logging
logging.basicConfig(
format="%(asctime)s — %(levelname)s — %(name)s — %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
)
from aitextgen import aitextgen
from aitextgen.colab import mount_gdrive, copy_file_from_gdrive
from aitextgen.TokenDataset import TokenDataset, merge_datasets
from aitextgen.utils import build_gpt2_config
from aitextgen.tokenizers import train_tokenizer
def dep_resolve(skip_pip):
if skip_pip == True:
print('note: skip_pip is set to True, skipping pip.')
elif skip_pip == False:
print('note: skip_pip is set to False, installing dependencies with pip.')
pip_install()
else:
print('note: skip_pip is not set to True or False, skipping pip.')
class kaggle:
def log_err(msg):
logger.println(msg, 'ERR!')
def log_inf(msg)
logger.println(msg, 'INFO')
def kaggle_dl(dataset):
if dataset == 'anime-subtitles':
!mkdir -p /root/.kaggle
!mv kaggle.json /root/.kaggle/kaggle.json
!chmod 600 /root/.kaggle/kaggle.json
!kaggle datasets download -d jef1056/anime-subtitles
!unzip anime-subtitles.zip
!wc -l 'input (Cleaned).txt'
msg = 'input (Cleaned).txt'
log_inf(msg)
elif dataset == 'million-headlines':
!mkdir -p /root/.kaggle
!mv kaggle.json /root/.kaggle/kaggle.json
!chmod 600 /root/.kaggle/kaggle.json
!kaggle datasets download -d therohk/million-headlines
!unzip million-headlines.zip
!wc -l 'abcnews-date-text.csv'
msg = 'abcnews-date-text.csv'
log_inf(msg)
else:
msg = 'No dataset or invalid dataset selected.'
log_err(msg)
def kaggle_del(dataset):
if dataset == 'anime-subtitles':
!rm -rf anime-subtitles.zip
!rm -rf 'Anime Datasets V3.zip'
!rm -rf 'input (Cleaned).txt'
msg = 'Removed dataset: anime-subtitles'
log_inf(msg)
elif dataset == 'million-headlines':
!rm -rf million-headlines.zip
!rm -rf 'abcnews-date-text.csv'
msg = 'Removed dataset: million-headlines'
log_inf(msg)
else:
msg = 'No dataset or invalid dataset selected.'
log_inf(msg)
class housekeeping:
def cleandir(rm_model, rm_tokenizer_data):
if rm_tokenizer_data == True:
!rm -rf aitextgen-merges.txt
!rm -rf aitextgen-vocab.json
logger.println('rm_tokenizer_data set to true, deleting tokenizer data at $PWD/aitextgen-merges.txt and $PWD/aitextgen-vocab.json', 'INFO')
elif rm_tokenizer_data == False:
logger.println('rm_tokenizer_data set to false, skipping tokenizer data deletion.', 'WARN')
if rm_model == True:
logger.println('rm_model is set to true, deleting model stored at trained_model.', 'INFO')
!rm -rf /content/trained_model
elif rm_model == False:
logger.println('rm_model set to False, skipping model deletion.', 'WARN')
else:
logger.println('rm_model not set to True or False, skipping model deletion.', 'WARN')
class load:
def load_csv(pth):
x = pd.read_csv(pth)
return x
def load_json(pth):
x = pd.read_json(pth)
return x
def load_xlsx(pth):
x = pd.read_excel(pth)
return x
def load(pth, load_type):
if load_type == 'csv':
x = load_csv(pth)
return x
elif load_type == 'json':
x = load_json(pth)
return x
elif load_type == 'xlsx':
x = load_excel(pth)
return x
elif load_type == 'excel':
x = load_excel(pth)
return x
else:
logging.println('Incorrect load_type or load_type not found. Returning empty DataFrame', 'ERRR')
x = pd.DataFrame()
return x
class meta:
def test_helpers():
logging.println('Logging and timestamping seems to be working fine, testing functions...', 'TEST')
logging.println('Testing housekeeping.cleandir(False)...', 'TEST')
housekeeping.cleandir(False, False)
def test_decision(run_tests):
if run_tests == True:
logging.println('run_tests set to true, running tests.', 'INFO')
meta.test_helpers()
elif run_tests == False:
logging.println('run_tests set to false, not running any tests on any functions besides logging.', 'WARN')
else:
logging.println('run_tests not set to true or false, not running any tests on any functions besides logging.', 'WARN')
test_decision(testFunctions) | 0.343232 | 0.143128 |
from conans.assets.templates.new_v2_cmake import source_cpp, source_h, test_main
conanfile_sources_v2 = """
import os
from conan import ConanFile
from conan.tools.google import Bazel, bazel_layout
from conan.tools.files import copy
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
# Binary configuration
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False], "fPIC": [True, False]}}
default_options = {{"shared": False, "fPIC": True}}
# Sources are located in the same place as this recipe, copy them to the recipe
exports_sources = "main/*", "WORKSPACE"
generators = "BazelToolchain"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def layout(self):
bazel_layout(self)
def build(self):
bazel = Bazel(self)
bazel.configure()
bazel.build(label="//main:{name}")
def package(self):
dest_lib = os.path.join(self.package_folder, "lib")
dest_bin = os.path.join(self.package_folder, "bin")
build = os.path.join(self.build_folder, "bazel-bin", "main")
copy(self, "*.so", build, dest_bin, keep_path=False)
copy(self, "*.dll", build, dest_bin, keep_path=False)
copy(self, "*.dylib", build, dest_bin, keep_path=False)
copy(self, "*.a", build, dest_lib, keep_path=False)
copy(self, "*.lib", build, dest_lib, keep_path=False)
copy(self, "{name}.h", os.path.join(self.source_folder, "main"),
os.path.join(self.package_folder, "include"), keep_path=False)
def package_info(self):
self.cpp_info.libs = ["{name}"]
"""
test_conanfile_v2 = """import os
from conan import ConanFile
from conan.tools.google import Bazel, bazel_layout
from conan.tools.build import cross_building
class {package_name}TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
# VirtualBuildEnv and VirtualRunEnv can be avoided if "tools.env.virtualenv:auto_use" is defined
# (it will be defined in Conan 2.0)
generators = "BazelToolchain", "BazelDeps", "VirtualBuildEnv", "VirtualRunEnv"
apply_env = False
def requirements(self):
self.requires(self.tested_reference_str)
def build(self):
bazel = Bazel(self)
bazel.configure()
bazel.build(label="//main:example")
def layout(self):
bazel_layout(self)
def test(self):
if not cross_building(self):
cmd = os.path.join(self.cpp.build.bindirs[0], "main", "example")
self.run(cmd, env="conanrun")
"""
_bazel_build_test = """\
load("@rules_cc//cc:defs.bzl", "cc_binary")
cc_binary(
name = "example",
srcs = ["example.cpp"],
deps = [
"@{name}//:{name}",
],
)
"""
_bazel_build = """\
load("@rules_cc//cc:defs.bzl", "cc_library")
cc_library(
name = "{name}",
srcs = ["{name}.cpp"],
hdrs = ["{name}.h"],
)
"""
_bazel_workspace = ""
_test_bazel_workspace = """
load("@//:dependencies.bzl", "load_conan_dependencies")
load_conan_dependencies()
"""
conanfile_exe = """
import os
from conan import ConanFile
from conan.tools.google import Bazel, bazel_layout
from conan.tools.files import copy
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
# Binary configuration
settings = "os", "compiler", "build_type", "arch"
# Sources are located in the same place as this recipe, copy them to the recipe
exports_sources = "main/*", "WORKSPACE"
generators = "BazelToolchain"
def layout(self):
bazel_layout(self)
def build(self):
bazel = Bazel(self)
bazel.configure()
bazel.build(label="//main:{name}")
def package(self):
dest_bin = os.path.join(self.package_folder, "bin")
build = os.path.join(self.build_folder, "bazel-bin", "main")
copy(self, "{name}", build, dest_bin, keep_path=False)
copy(self, "{name}.exe", build, dest_bin, keep_path=False)
"""
test_conanfile_exe_v2 = """import os
from conan import ConanFile
from conan.tools.build import cross_building
class {package_name}TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
# VirtualRunEnv can be avoided if "tools.env.virtualenv:auto_use" is defined
# (it will be defined in Conan 2.0)
generators = "VirtualRunEnv"
apply_env = False
def test(self):
if not cross_building(self):
self.run("{name}", env="conanrun")
"""
_bazel_build_exe = """\
load("@rules_cc//cc:defs.bzl", "cc_binary")
cc_binary(
name = "{name}",
srcs = ["main.cpp", "{name}.cpp", "{name}.h"]
)
"""
def get_bazel_lib_files(name, version, package_name="Pkg"):
files = {"conanfile.py": conanfile_sources_v2.format(name=name, version=version,
package_name=package_name),
"main/{}.cpp".format(name): source_cpp.format(name=name, version=version),
"main/{}.h".format(name): source_h.format(name=name, version=version),
"main/BUILD": _bazel_build.format(name=name, version=version),
"WORKSPACE": _bazel_workspace.format(name=name, version=version),
"test_package/conanfile.py": test_conanfile_v2.format(name=name, version=version,
package_name=package_name),
"test_package/main/example.cpp": test_main.format(name=name),
"test_package/main/BUILD": _bazel_build_test.format(name=name),
"test_package/WORKSPACE": _test_bazel_workspace.format(name=name, version=version)}
return files
def get_bazel_exe_files(name, version, package_name="Pkg"):
files = {"conanfile.py": conanfile_exe.format(name=name, version=version,
package_name=package_name),
"main/{}.cpp".format(name): source_cpp.format(name=name, version=version),
"main/{}.h".format(name): source_h.format(name=name, version=version),
"main/main.cpp": test_main.format(name=name),
"main/BUILD": _bazel_build_exe.format(name=name, version=version),
"WORKSPACE": _bazel_workspace.format(name=name, version=version),
"test_package/conanfile.py": test_conanfile_exe_v2.format(name=name, version=version,
package_name=package_name)
}
return files | conans/assets/templates/new_v2_bazel.py | from conans.assets.templates.new_v2_cmake import source_cpp, source_h, test_main
conanfile_sources_v2 = """
import os
from conan import ConanFile
from conan.tools.google import Bazel, bazel_layout
from conan.tools.files import copy
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
# Binary configuration
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False], "fPIC": [True, False]}}
default_options = {{"shared": False, "fPIC": True}}
# Sources are located in the same place as this recipe, copy them to the recipe
exports_sources = "main/*", "WORKSPACE"
generators = "BazelToolchain"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def layout(self):
bazel_layout(self)
def build(self):
bazel = Bazel(self)
bazel.configure()
bazel.build(label="//main:{name}")
def package(self):
dest_lib = os.path.join(self.package_folder, "lib")
dest_bin = os.path.join(self.package_folder, "bin")
build = os.path.join(self.build_folder, "bazel-bin", "main")
copy(self, "*.so", build, dest_bin, keep_path=False)
copy(self, "*.dll", build, dest_bin, keep_path=False)
copy(self, "*.dylib", build, dest_bin, keep_path=False)
copy(self, "*.a", build, dest_lib, keep_path=False)
copy(self, "*.lib", build, dest_lib, keep_path=False)
copy(self, "{name}.h", os.path.join(self.source_folder, "main"),
os.path.join(self.package_folder, "include"), keep_path=False)
def package_info(self):
self.cpp_info.libs = ["{name}"]
"""
test_conanfile_v2 = """import os
from conan import ConanFile
from conan.tools.google import Bazel, bazel_layout
from conan.tools.build import cross_building
class {package_name}TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
# VirtualBuildEnv and VirtualRunEnv can be avoided if "tools.env.virtualenv:auto_use" is defined
# (it will be defined in Conan 2.0)
generators = "BazelToolchain", "BazelDeps", "VirtualBuildEnv", "VirtualRunEnv"
apply_env = False
def requirements(self):
self.requires(self.tested_reference_str)
def build(self):
bazel = Bazel(self)
bazel.configure()
bazel.build(label="//main:example")
def layout(self):
bazel_layout(self)
def test(self):
if not cross_building(self):
cmd = os.path.join(self.cpp.build.bindirs[0], "main", "example")
self.run(cmd, env="conanrun")
"""
_bazel_build_test = """\
load("@rules_cc//cc:defs.bzl", "cc_binary")
cc_binary(
name = "example",
srcs = ["example.cpp"],
deps = [
"@{name}//:{name}",
],
)
"""
_bazel_build = """\
load("@rules_cc//cc:defs.bzl", "cc_library")
cc_library(
name = "{name}",
srcs = ["{name}.cpp"],
hdrs = ["{name}.h"],
)
"""
_bazel_workspace = ""
_test_bazel_workspace = """
load("@//:dependencies.bzl", "load_conan_dependencies")
load_conan_dependencies()
"""
conanfile_exe = """
import os
from conan import ConanFile
from conan.tools.google import Bazel, bazel_layout
from conan.tools.files import copy
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
# Binary configuration
settings = "os", "compiler", "build_type", "arch"
# Sources are located in the same place as this recipe, copy them to the recipe
exports_sources = "main/*", "WORKSPACE"
generators = "BazelToolchain"
def layout(self):
bazel_layout(self)
def build(self):
bazel = Bazel(self)
bazel.configure()
bazel.build(label="//main:{name}")
def package(self):
dest_bin = os.path.join(self.package_folder, "bin")
build = os.path.join(self.build_folder, "bazel-bin", "main")
copy(self, "{name}", build, dest_bin, keep_path=False)
copy(self, "{name}.exe", build, dest_bin, keep_path=False)
"""
test_conanfile_exe_v2 = """import os
from conan import ConanFile
from conan.tools.build import cross_building
class {package_name}TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
# VirtualRunEnv can be avoided if "tools.env.virtualenv:auto_use" is defined
# (it will be defined in Conan 2.0)
generators = "VirtualRunEnv"
apply_env = False
def test(self):
if not cross_building(self):
self.run("{name}", env="conanrun")
"""
_bazel_build_exe = """\
load("@rules_cc//cc:defs.bzl", "cc_binary")
cc_binary(
name = "{name}",
srcs = ["main.cpp", "{name}.cpp", "{name}.h"]
)
"""
def get_bazel_lib_files(name, version, package_name="Pkg"):
files = {"conanfile.py": conanfile_sources_v2.format(name=name, version=version,
package_name=package_name),
"main/{}.cpp".format(name): source_cpp.format(name=name, version=version),
"main/{}.h".format(name): source_h.format(name=name, version=version),
"main/BUILD": _bazel_build.format(name=name, version=version),
"WORKSPACE": _bazel_workspace.format(name=name, version=version),
"test_package/conanfile.py": test_conanfile_v2.format(name=name, version=version,
package_name=package_name),
"test_package/main/example.cpp": test_main.format(name=name),
"test_package/main/BUILD": _bazel_build_test.format(name=name),
"test_package/WORKSPACE": _test_bazel_workspace.format(name=name, version=version)}
return files
def get_bazel_exe_files(name, version, package_name="Pkg"):
files = {"conanfile.py": conanfile_exe.format(name=name, version=version,
package_name=package_name),
"main/{}.cpp".format(name): source_cpp.format(name=name, version=version),
"main/{}.h".format(name): source_h.format(name=name, version=version),
"main/main.cpp": test_main.format(name=name),
"main/BUILD": _bazel_build_exe.format(name=name, version=version),
"WORKSPACE": _bazel_workspace.format(name=name, version=version),
"test_package/conanfile.py": test_conanfile_exe_v2.format(name=name, version=version,
package_name=package_name)
}
return files | 0.416678 | 0.141786 |
import asyncio
from asyncio.tasks import sleep
import websockets
import json
import constants
import time
SERVER_URL = 'ws://localhost:8765'
LOGIN_REQUEST = {
"request_data": "ValueToBeReturnedInResponse",
"md_login_request":
{
"login_id": constants.LOGIN_ID,
"password": <PASSWORD>,
"client_application": "WEB",
"client_application_version": "1.0",
"country_code": "no",
"language_code": "en"
}
}
class ClientSession(object):
token = None
timeout = 0
time_to_live = constants.TIME_TO_LIVE
is_registered = False
async def login(session):
print('Start Login job')
async with websockets.connect(SERVER_URL + '/md_login_request') as websocket:
await websocket.send(json.dumps(LOGIN_REQUEST))
response = await websocket.recv()
login_response = json.loads(response)
if login_response['error_code'] == 0:
session.token = login_response['session_token']
session.timeout = login_response['md_login_response']['session_timeout']
with open('active_session.json', 'w') as f:
f.write(json.dumps({"session_token" : session.token, "session_timeout": session.timeout}))
print('Login was succesfull ! with ' + session.token)
else:
print('Login failed !')
async def register(session, websocket):
if session.token and session.token != "0":
if not session.is_registered:
print('Inform server that I am logged in and ready')
await websocket.send(json.dumps({"session_token": session.token}))
session.is_registered = True
else:
print('Not be able to login yet')
async def consumer(session):
start = time.time()
elapsed = 0
async with websockets.connect(SERVER_URL + '/md_instrument_update') as websocket:
while elapsed < session.time_to_live:
elapsed = time.time() - start
if session.token and session.token != "0":
if not session.is_registered:
print('Inform server that I am logged in and ready')
await websocket.send(json.dumps({"session_token": session.token}))
session.is_registered = True
else:
print('Not be able to login yet')
if session.is_registered:
instrument_update = await websocket.recv()
print('Receiving ' + instrument_update)
await asyncio.sleep(0.5)
print('Bye ! I am going away')
def main():
with open('active_session.json') as f:
active_session = json.loads(f.read())
client_session = ClientSession()
client_session.token = active_session['session_token']
client_session.timeout = active_session['session_timeout']
# Login if not yet
if client_session.token == "0":
asyncio.get_event_loop().create_task(login(client_session))
asyncio.get_event_loop().run_until_complete(consumer(client_session))
#asyncio.get_event_loop().run_until_complete()
if __name__ == "__main__":
main() | consumer.py |
import asyncio
from asyncio.tasks import sleep
import websockets
import json
import constants
import time
SERVER_URL = 'ws://localhost:8765'
LOGIN_REQUEST = {
"request_data": "ValueToBeReturnedInResponse",
"md_login_request":
{
"login_id": constants.LOGIN_ID,
"password": <PASSWORD>,
"client_application": "WEB",
"client_application_version": "1.0",
"country_code": "no",
"language_code": "en"
}
}
class ClientSession(object):
token = None
timeout = 0
time_to_live = constants.TIME_TO_LIVE
is_registered = False
async def login(session):
print('Start Login job')
async with websockets.connect(SERVER_URL + '/md_login_request') as websocket:
await websocket.send(json.dumps(LOGIN_REQUEST))
response = await websocket.recv()
login_response = json.loads(response)
if login_response['error_code'] == 0:
session.token = login_response['session_token']
session.timeout = login_response['md_login_response']['session_timeout']
with open('active_session.json', 'w') as f:
f.write(json.dumps({"session_token" : session.token, "session_timeout": session.timeout}))
print('Login was succesfull ! with ' + session.token)
else:
print('Login failed !')
async def register(session, websocket):
if session.token and session.token != "0":
if not session.is_registered:
print('Inform server that I am logged in and ready')
await websocket.send(json.dumps({"session_token": session.token}))
session.is_registered = True
else:
print('Not be able to login yet')
async def consumer(session):
start = time.time()
elapsed = 0
async with websockets.connect(SERVER_URL + '/md_instrument_update') as websocket:
while elapsed < session.time_to_live:
elapsed = time.time() - start
if session.token and session.token != "0":
if not session.is_registered:
print('Inform server that I am logged in and ready')
await websocket.send(json.dumps({"session_token": session.token}))
session.is_registered = True
else:
print('Not be able to login yet')
if session.is_registered:
instrument_update = await websocket.recv()
print('Receiving ' + instrument_update)
await asyncio.sleep(0.5)
print('Bye ! I am going away')
def main():
with open('active_session.json') as f:
active_session = json.loads(f.read())
client_session = ClientSession()
client_session.token = active_session['session_token']
client_session.timeout = active_session['session_timeout']
# Login if not yet
if client_session.token == "0":
asyncio.get_event_loop().create_task(login(client_session))
asyncio.get_event_loop().run_until_complete(consumer(client_session))
#asyncio.get_event_loop().run_until_complete()
if __name__ == "__main__":
main() | 0.160628 | 0.120775 |
from __future__ import annotations
from typing import Optional, Type
import jax
import jax.numpy as jnp
from tjax import Generator, RealArray, Shape
from tjax.dataclasses import dataclass
from ..expectation_parametrization import ExpectationParametrization
from ..natural_parametrization import NaturalParametrization
from ..parameter import ScalarSupport, distribution_parameter
from ..samplable import Samplable
__all__ = ['WeibullNP', 'WeibullEP']
@dataclass
class WeibullNP(NaturalParametrization['WeibullEP', RealArray]):
concentration: RealArray = distribution_parameter(ScalarSupport(), fixed=True)
# eta = -scale^-concentration
eta: RealArray = distribution_parameter(ScalarSupport())
# Implemented methods --------------------------------------------------------------------------
@property
def shape(self) -> Shape:
return self.eta.shape
def log_normalizer(self) -> RealArray:
return -jnp.log(-self.eta) - jnp.log(self.concentration)
def to_exp(self) -> WeibullEP:
return WeibullEP(self.concentration, -1.0 / self.eta)
def carrier_measure(self, x: RealArray) -> RealArray:
return (self.concentration - 1.0) * jnp.log(x)
def sufficient_statistics(self, x: RealArray) -> WeibullEP:
return WeibullEP(jnp.broadcast_to(self.concentration, x.shape), x ** self.concentration)
@dataclass
class WeibullEP(ExpectationParametrization[WeibullNP], Samplable):
concentration: RealArray = distribution_parameter(ScalarSupport(), fixed=True)
# chi = scale^concentration
chi: RealArray = distribution_parameter(ScalarSupport())
# Implemented methods --------------------------------------------------------------------------
@property
def shape(self) -> Shape:
return self.chi.shape
@classmethod
def natural_parametrization_cls(cls) -> Type[WeibullNP]:
return WeibullNP
def to_nat(self) -> WeibullNP:
return WeibullNP(self.concentration, -1.0 / self.chi)
def expected_carrier_measure(self) -> RealArray:
k = self.concentration
lambda_ = self.chi ** (1.0 / k)
return (k - 1.0) * jnp.log(lambda_) - jnp.euler_gamma * (1.0 - 1.0 / k)
def sample(self, rng: Generator, shape: Optional[Shape] = None) -> RealArray:
if shape is not None:
shape += self.shape
else:
shape = self.shape
lambda_ = self.chi ** (1.0 / self.concentration)
return jax.random.weibull_min(rng.key, lambda_, self.concentration, shape) # type: ignore | efax/_src/distributions/weibull.py | from __future__ import annotations
from typing import Optional, Type
import jax
import jax.numpy as jnp
from tjax import Generator, RealArray, Shape
from tjax.dataclasses import dataclass
from ..expectation_parametrization import ExpectationParametrization
from ..natural_parametrization import NaturalParametrization
from ..parameter import ScalarSupport, distribution_parameter
from ..samplable import Samplable
__all__ = ['WeibullNP', 'WeibullEP']
@dataclass
class WeibullNP(NaturalParametrization['WeibullEP', RealArray]):
concentration: RealArray = distribution_parameter(ScalarSupport(), fixed=True)
# eta = -scale^-concentration
eta: RealArray = distribution_parameter(ScalarSupport())
# Implemented methods --------------------------------------------------------------------------
@property
def shape(self) -> Shape:
return self.eta.shape
def log_normalizer(self) -> RealArray:
return -jnp.log(-self.eta) - jnp.log(self.concentration)
def to_exp(self) -> WeibullEP:
return WeibullEP(self.concentration, -1.0 / self.eta)
def carrier_measure(self, x: RealArray) -> RealArray:
return (self.concentration - 1.0) * jnp.log(x)
def sufficient_statistics(self, x: RealArray) -> WeibullEP:
return WeibullEP(jnp.broadcast_to(self.concentration, x.shape), x ** self.concentration)
@dataclass
class WeibullEP(ExpectationParametrization[WeibullNP], Samplable):
concentration: RealArray = distribution_parameter(ScalarSupport(), fixed=True)
# chi = scale^concentration
chi: RealArray = distribution_parameter(ScalarSupport())
# Implemented methods --------------------------------------------------------------------------
@property
def shape(self) -> Shape:
return self.chi.shape
@classmethod
def natural_parametrization_cls(cls) -> Type[WeibullNP]:
return WeibullNP
def to_nat(self) -> WeibullNP:
return WeibullNP(self.concentration, -1.0 / self.chi)
def expected_carrier_measure(self) -> RealArray:
k = self.concentration
lambda_ = self.chi ** (1.0 / k)
return (k - 1.0) * jnp.log(lambda_) - jnp.euler_gamma * (1.0 - 1.0 / k)
def sample(self, rng: Generator, shape: Optional[Shape] = None) -> RealArray:
if shape is not None:
shape += self.shape
else:
shape = self.shape
lambda_ = self.chi ** (1.0 / self.concentration)
return jax.random.weibull_min(rng.key, lambda_, self.concentration, shape) # type: ignore | 0.889876 | 0.451327 |
import base64
import collections
import hashlib
import random
import struct
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from httplite import HTTPResponse
__all__ = ["WebSocket", "SingleOptionPolicy", "UnrestrictedPolicy"]
def split_field(s):
return [e.strip() for e in s.split(",")]
def validate_key(client_key):
try:
decoded = base64.standard_b64decode(client_key)
if len(decoded) == 16:
return True
except:
pass
return False
def compute_key(client_key):
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
hash_ = hashlib.sha1()
hash_.update(client_key)
hash_.update(GUID)
return base64.standard_b64encode(hash_.digest())
class Policy(object):
def __init__(self):
pass
def set_acceptor(self, func):
self.accept = func
def set_chooser(self, func):
self.choose = func
@classmethod
def make_policy(cls, accept, choose):
policy = cls()
policy.set_acceptor(accept)
policy.set_chooser(choose)
return policy
class SingleOptionPolicy(Policy):
def __init__(self, option):
self.option = option
def accept(self, options):
return self.option in options
def choose(self, options):
return self.option
class UnrestrictedPolicy(Policy):
def accept(self, options):
return True
def choose(self, options):
return options[0]
class PolicyError(ValueError):
pass
class BitProperty(object):
def __init__(self, field, position):
#Position is the position where the LSB is at position 0
self.field = field
self.position = position
self.mask = 1 << position
def __get__(self, obj, type_):
return int(getattr(obj, self.field) & self.mask != 0)
def __set__(self, obj, value):
if not (0 <= value <= 1):
raise ValueError("Value %s cannot fit in a 1-bit field" % (value,))
v = getattr(obj, self.field, 0)
v = (v | self.mask) ^ self.mask
setattr(obj, self.field, v | (value << self.position))
class MultiBitProperty(object):
def __init__(self, field, start, end):
self.field = field
if start > end:
start, end = end, start
self.start = start
self.end = end
mask = 0
for i in xrange(start, end + 1):
mask |= 1 << i
self.mask = mask
def __get__(self, obj, type_):
value = getattr(obj, self.field)
return (value & self.mask) >> self.start
def __set__(self, obj, value):
if value != (value & (self.mask >> self.start)):
raise ValueError("Value %s can't fit in a %d-bit field" % (value, width))
v = getattr(obj, self.field, 0)
#clear the bits occupied by the mask, taking into account variable widths
v = (v | self.mask) ^ self.mask
v |= value << self.start
setattr(obj, self.field, v)
class ByteProperty(object):
def __init__(self, field):
self.field = field
def __get__(self, obj, type_):
return getattr(obj, self.field)
def __set__(self, obj, value):
if value != value & 0xff:
raise ValueError("Value %s can't fit in a 1-byte field" % (value,))
setattr(obj, self.field, value)
def __delete__(self, obj, value):
if hasattr(obj, self.field):
delattr(obj, self.field)
class MultiByteProperty(object):
def __init__(self, *fields):
self.fields = fields
self.width = len(fields)
def __get__(self, obj, type_):
value = 0
for field in self.fields:
value = (value << 8) | getattr(obj, field)
return value
def __set__(self, obj, value):
if value >> (self.width * 8) != 0:
raise ValueError("Value %s can't fit in a %d-byte field" % (value, self.width))
for field in reversed(self.fields):
setattr(obj, field, value & 0xff)
value = value >> 8
def __delete__(self, obj):
for field in self.fields:
if hasattr(obj, field):
delattr(obj, field)
class Frame(object):
OP_CONTINUE = 0x0
OP_TEXT = 0x1
OP_BINARY = 0x2
OP_CLOSE = 0x8
OP_PING = 0x9
OP_PONG = 0xA
CONTROL_OPCODES = frozenset(xrange(0x8, 0xF+1))
@classmethod
def read_from(cls, stream):
frame = cls()
stream_read = stream.read
def next_byte():
char = stream_read(1)
if not char:
raise EOFError()
return ord(char)
frame.control_byte = next_byte()
frame.len_byte = next_byte()
if frame.LEN8 >= 126:
frame.len_ext16_byte1 = next_byte()
frame.len_ext16_byte2 = next_byte()
if frame.LEN8 == 127:
frame.len_ext64_byte3 = next_byte()
frame.len_ext64_byte4 = next_byte()
frame.len_ext64_byte5 = next_byte()
frame.len_ext64_byte6 = next_byte()
frame.len_ext64_byte7 = next_byte()
frame.len_ext64_byte8 = next_byte()
if frame.MASK:
frame.mask_byte1 = next_byte()
frame.mask_byte2 = next_byte()
frame.mask_byte3 = next_byte()
frame.mask_byte4 = next_byte()
frame.payload = StringIO()
xor_payload(stream, frame.payload, frame.KEY, frame.LEN)
frame.payload.seek(0)
else:
frame.payload = StringIO(stream_read(frame.LEN))
return frame
def __init__(self, **kwargs):
self.payload = None
def configure(self, OPCODE, LEN, payload, FIN=1, MASK_KEY=None):
self.FIN = FIN
self.OPCODE = OPCODE
self.LEN = LEN
if MASK_KEY is not None:
self.KEY = MASK_KEY
if hasattr(payload, "read"):
self.payload = StringIO(payload.read(LEN))
else:
self.payload = StringIO(payload)
FIN = BitProperty("control_byte", 7)
RSV1 = BitProperty("control_byte", 6)
RSV2 = BitProperty("control_byte", 5)
RSV3 = BitProperty("control_byte", 4)
OPCODE = MultiBitProperty("control_byte", 0, 3)
MASK = BitProperty("len_byte", 7)
LEN8 = MultiBitProperty("len_byte", 0, 6)
LENx16 = MultiByteProperty("len_ext16_byte1", "len_ext16_byte2")
LENx64 = MultiByteProperty("len_ext16_byte1", "len_ext16_byte2",
"len_ext64_byte3", "len_ext64_byte4",
"len_ext64_byte5", "len_ext64_byte6"
"len_ext64_byte7", "len_ext64_byte8")
KEY = MultiByteProperty("mask_byte1", "mask_byte2",
"mask_byte3", "mask_byte4")
def _get_len(self):
if self.LEN8 == 126:
return self.LENx16
elif self.LEN8 == 127:
return self.LENx64
else:
return self.LEN8
def _set_len(self, value):
if 0 <= value <= 125:
del self.LENx64
self.LEN8 = value
elif value < 2**16:
del self.LENx64
self.LEN8 = 126
self.LENx16 = value
elif value < 2**64:
self.LEN8 = 127
self.LENx64 = value
else:
raise ValueError("Payload size can't fit in a 64-bit integer")
LEN = property(_get_len, _set_len)
def is_control(self):
return self.OPCODE in self.CONTROL_OPCODES
def write_to(self, stream):
stream.write(chr(self.control_byte))
stream.write(chr(self.len_byte))
if self.LEN8 >= 126:
stream.write(chr(self.len_ext16_byte1))
stream.write(chr(self.len_ext16_byte2))
if self.LEN8 == 127:
stream.write(chr(self.len_ext64_byte3))
stream.write(chr(self.len_ext64_byte4))
stream.write(chr(self.len_ext64_byte5))
stream.write(chr(self.len_ext64_byte6))
stream.write(chr(self.len_ext64_byte7))
stream.write(chr(self.len_ext64_byte8))
if self.MASK:
stream.write(chr(self.mask_byte1))
stream.write(chr(self.mask_byte2))
stream.write(chr(self.mask_byte3))
stream.write(chr(self.mask_byte4))
if self.payload:
if self.MASK:
xor_payload(self.payload, stream, self.KEY, self.LEN)
else:
stream.write(self.payload.read(self.LEN))
class Message(object):
def __init__(self, opcode, payload=None):
self.opcode = opcode
self.payload = payload if payload is not None else StringIO()
self.complete = False
self._cached_string = None
self.frames = []
def extend(self, frame):
self.frames.append(frame)
self.payload.write(frame.payload.read())
frame.payload.seek(0)
def finish(self):
self.complete = True
@property
def payload_string(self):
if self._cached_string == None:
self._cached_string = self.payload.getvalue()
return self._cached_string
@property
def is_close(self):
return self.opcode == Frame.OP_CLOSE
@property
def is_ping(self):
return self.opcode == Frame.OP_PING
@property
def is_pong(self):
return self.opcode == Frame.OP_PONG
@property
def is_text(self):
return self.opcode == Frame.OP_TEXT
@property
def is_binary(self):
return self.opcode == Frame.OP_BINARY
@classmethod
def make_text(cls, payload, LEN, mask=False):
message = cls(Frame.OP_TEXT, payload)
message.frames.extend(cls.fragment_message(Frame.OP_TEXT, LEN, payload, mask))
return message
@staticmethod
def fragment_message(opcode, LEN, payload, mask=False, FRAGMENT_SIZE=1452):
"""
Fragments a message with the given opcode into multiple frames, if necessary.
If mask is true, a mask key is randomly generated and included, even if the
payload is empty.
Returns a list of frames suitable for sending over the wire
"""
#There's not really any obvious way to fragment messages
#But the simplest is just to fragment at the MTU size, going by
#Ethernet, which is 1500, less 20 bytes for an IPv4 header and
#20 bytes for a TCP header and 4 bytes for a 16-bit payload and
#possibly 4 bytes for a masking key, for a total of 1452 bytes free
frames = []
first = True
key = None
if LEN == 0:
frame = Frame()
frame.configure(opcode, 0, None, FIN=1)
if mask:
frame.KEY = gen_mask_key()
frame.MASK = 1
return [frame]
while LEN >= FRAGMENT_SIZE:
frame = Frame()
frame.OPCODE = opcode if first else Frame.OP_CONTINUE
frame.LEN = FRAGMENT_SIZE
frame.payload = StringIO(payload.read(FRAGMENT_SIZE))
if mask:
frame.KEY = gen_mask_key()
frame.MASK = 1
first = False
LEN -= FRAGMENT_SIZE
frames.append(frame)
if LEN > 0:
frame = Frame()
frame.OPCODE = opcode if first else Frame.OP_CONTINUE
frame.LEN = LEN
frame.payload = StringIO(payload.read(LEN))
if mask:
frame.KEY = gen_mask_key()
frame.MASK = 1
frames.append(frame)
frames[-1].FIN = True
return frames
def gen_mask_key():
return random.randint(-2**15, 2**15-1)
def xor_payload(in_stream, out_stream, mask_key, num_bytes=-1):
"Takes in a host-order masking key and xors data from in_stream and writes to out_stream"
if mask_key & 0xffffffff != mask_key:
raise ValueError("Mask Key %s doesn't fit in a 32-bit integer" % (mask_key,))
if num_bytes == -1:
chunk = in_stream.read(4)
while len(chunk) == 4:
(stuff,) = struct.unpack("!L", chunk)
out_stream.write(struct.pack("!L", stuff ^ mask_key))
chunk = in_stream.read(4)
else:
bytes_left = num_bytes
chunk = in_stream.read(min(4, bytes_left))
while len(chunk) == 4:
bytes_left -= len(chunk)
(stuff,) = struct.unpack("!L", chunk)
out_stream.write(struct.pack("!L", stuff ^ mask_key))
chunk = in_stream.read(min(4, bytes_left))
for i, char in enumerate(chunk):
octet = ord(char)
key_octet = (mask_key >> ((3-i)*8)) & 0xff
out_stream.write(chr(octet ^ key_octet))
class WebSocket(object):
def __init__(self, request, handler):
self.request = request
self.handler = handler
self.buffered_message = None
self.frame_queue = collections.deque()
@staticmethod
def validate(request, policies):
#Check if we support their WebSocket version
versions = split_field(request.headers.get("Sec-WebSocket-Version"))
if not policies["version"].accept(versions):
raise PolicyError("Unsupported version")
#Check that the upgrade request looks sane
if request.headers.get("Connection") != "Upgrade":
raise ValueError("Invalid upgrade request")
#Do a sanity check on the client key
client_key = request.headers.get("Sec-WebSocket-Key", "").strip()
if not validate_key(client_key):
raise ValueError("Invalid client key")
origin_policy = policies.get("origin", UnrestrictedPolicy())
host_policy = policies.get("host", UnrestrictedPolicy())
#Check if we accept their origin
if not origin_policy.accept([request.headers.get("Origin")]):
raise PolicyError("Origin not accepted")
#Check if we accept the host
if not host_policy.accept([request.headers.get("Host")]):
raise PolicyError("Host not accepted")
def negotiate(self, policies):
version_string = self.request.headers.get("Sec-WebSocket-Version")
client_versions = split_field(version_string)
self.version = policies["version"].choose(client_versions)
protocol_string = self.request.headers.get("Sec-WebSocket-Protocol", "")
client_protocols = split_field(protocol_string)
self.protocol = policies["protocol"].choose(client_protocols)
client_key = self.request.headers.get("Sec-WebSocket-Key", "").strip()
self.accept_key = compute_key(client_key)
self.handler.send_response(101)
self.handler.send_header("Upgrade", "websocket")
self.handler.send_header("Connection", "Upgrade")
self.handler.send_header("Sec-WebSocket-Accept", self.accept_key)
self.handler.send_header("Sec-WebSocket-Protocol", self.protocol)
self.handler.send_header("Sec-WebSocket-Version", self.version)
self.handler.end_headers()
def process_frame(self, frame):
if frame.is_control:
if not frame.FIN:
raise ValueError("Got a fragmented control message")
else:
message = Message(frame.OPCODE)
message.extend(frame)
message.finish()
return message
if self.buffered_message is not None:
if not frame.is_continuation:
raise ValueError("Got a new data frame before fragmented data frame was finished")
elif frame.is_continuation:
raise ValueError("Got a data frame continuation before receiving first data frame")
else:
self.buffered_message = Message(frame.OPCODE)
self.buffered_message.extend(frame)
if frame.FIN:
message = self.buffered_message
message.finish()
self.buffered_message = None
return message
else:
return None
def read_message(self):
message = self.process_frame(self.read_frame())
while message is None:
message = self.process_frame(self.read_frame())
return message
def read_frame(self):
frame = Frame.read_from(self.handler.rfile)
## print " FIN:", frame.FIN
## print " RSV:", "%d%d%d" % (frame.RSV1, frame.RSV2, frame.RSV3)
## print " OP:", hex(frame.OPCODE)
## print " LEN:", frame.LEN
## print " KEY:", hex(frame.KEY)
## print "DATA:", frame.payload.getvalue()
#### import sys
#### xor_payload(frame.masked_payload, sys.stdout, frame.KEY)
#### print
#### frame.masked_payload.seek(0)
return frame
def send_message(self, message, override=False):
"""
Enqueue a message's frames to be sent over the wire and block until
the frame queue is empty. Control frames may be sent in-between.
If override is set to true, the frame is enqueued at the head of the
frame queue. This should only be used for control frames.
"""
if override:
self.frame_queue.appendleft(message.frames[0])
else:
self.frame_queue.extend(message.frames)
while self.frame_queue:
self.send_frame(self.frame_queue.popleft())
def send_frame(self, frame):
"Sends a frame over the wire"
frame.write_to(self.handler.wfile)
self.handler.wfile.flush() | Telemetry/websocket.py | import base64
import collections
import hashlib
import random
import struct
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from httplite import HTTPResponse
__all__ = ["WebSocket", "SingleOptionPolicy", "UnrestrictedPolicy"]
def split_field(s):
return [e.strip() for e in s.split(",")]
def validate_key(client_key):
try:
decoded = base64.standard_b64decode(client_key)
if len(decoded) == 16:
return True
except:
pass
return False
def compute_key(client_key):
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
hash_ = hashlib.sha1()
hash_.update(client_key)
hash_.update(GUID)
return base64.standard_b64encode(hash_.digest())
class Policy(object):
def __init__(self):
pass
def set_acceptor(self, func):
self.accept = func
def set_chooser(self, func):
self.choose = func
@classmethod
def make_policy(cls, accept, choose):
policy = cls()
policy.set_acceptor(accept)
policy.set_chooser(choose)
return policy
class SingleOptionPolicy(Policy):
def __init__(self, option):
self.option = option
def accept(self, options):
return self.option in options
def choose(self, options):
return self.option
class UnrestrictedPolicy(Policy):
def accept(self, options):
return True
def choose(self, options):
return options[0]
class PolicyError(ValueError):
pass
class BitProperty(object):
def __init__(self, field, position):
#Position is the position where the LSB is at position 0
self.field = field
self.position = position
self.mask = 1 << position
def __get__(self, obj, type_):
return int(getattr(obj, self.field) & self.mask != 0)
def __set__(self, obj, value):
if not (0 <= value <= 1):
raise ValueError("Value %s cannot fit in a 1-bit field" % (value,))
v = getattr(obj, self.field, 0)
v = (v | self.mask) ^ self.mask
setattr(obj, self.field, v | (value << self.position))
class MultiBitProperty(object):
def __init__(self, field, start, end):
self.field = field
if start > end:
start, end = end, start
self.start = start
self.end = end
mask = 0
for i in xrange(start, end + 1):
mask |= 1 << i
self.mask = mask
def __get__(self, obj, type_):
value = getattr(obj, self.field)
return (value & self.mask) >> self.start
def __set__(self, obj, value):
if value != (value & (self.mask >> self.start)):
raise ValueError("Value %s can't fit in a %d-bit field" % (value, width))
v = getattr(obj, self.field, 0)
#clear the bits occupied by the mask, taking into account variable widths
v = (v | self.mask) ^ self.mask
v |= value << self.start
setattr(obj, self.field, v)
class ByteProperty(object):
def __init__(self, field):
self.field = field
def __get__(self, obj, type_):
return getattr(obj, self.field)
def __set__(self, obj, value):
if value != value & 0xff:
raise ValueError("Value %s can't fit in a 1-byte field" % (value,))
setattr(obj, self.field, value)
def __delete__(self, obj, value):
if hasattr(obj, self.field):
delattr(obj, self.field)
class MultiByteProperty(object):
def __init__(self, *fields):
self.fields = fields
self.width = len(fields)
def __get__(self, obj, type_):
value = 0
for field in self.fields:
value = (value << 8) | getattr(obj, field)
return value
def __set__(self, obj, value):
if value >> (self.width * 8) != 0:
raise ValueError("Value %s can't fit in a %d-byte field" % (value, self.width))
for field in reversed(self.fields):
setattr(obj, field, value & 0xff)
value = value >> 8
def __delete__(self, obj):
for field in self.fields:
if hasattr(obj, field):
delattr(obj, field)
class Frame(object):
OP_CONTINUE = 0x0
OP_TEXT = 0x1
OP_BINARY = 0x2
OP_CLOSE = 0x8
OP_PING = 0x9
OP_PONG = 0xA
CONTROL_OPCODES = frozenset(xrange(0x8, 0xF+1))
@classmethod
def read_from(cls, stream):
frame = cls()
stream_read = stream.read
def next_byte():
char = stream_read(1)
if not char:
raise EOFError()
return ord(char)
frame.control_byte = next_byte()
frame.len_byte = next_byte()
if frame.LEN8 >= 126:
frame.len_ext16_byte1 = next_byte()
frame.len_ext16_byte2 = next_byte()
if frame.LEN8 == 127:
frame.len_ext64_byte3 = next_byte()
frame.len_ext64_byte4 = next_byte()
frame.len_ext64_byte5 = next_byte()
frame.len_ext64_byte6 = next_byte()
frame.len_ext64_byte7 = next_byte()
frame.len_ext64_byte8 = next_byte()
if frame.MASK:
frame.mask_byte1 = next_byte()
frame.mask_byte2 = next_byte()
frame.mask_byte3 = next_byte()
frame.mask_byte4 = next_byte()
frame.payload = StringIO()
xor_payload(stream, frame.payload, frame.KEY, frame.LEN)
frame.payload.seek(0)
else:
frame.payload = StringIO(stream_read(frame.LEN))
return frame
def __init__(self, **kwargs):
self.payload = None
def configure(self, OPCODE, LEN, payload, FIN=1, MASK_KEY=None):
self.FIN = FIN
self.OPCODE = OPCODE
self.LEN = LEN
if MASK_KEY is not None:
self.KEY = MASK_KEY
if hasattr(payload, "read"):
self.payload = StringIO(payload.read(LEN))
else:
self.payload = StringIO(payload)
FIN = BitProperty("control_byte", 7)
RSV1 = BitProperty("control_byte", 6)
RSV2 = BitProperty("control_byte", 5)
RSV3 = BitProperty("control_byte", 4)
OPCODE = MultiBitProperty("control_byte", 0, 3)
MASK = BitProperty("len_byte", 7)
LEN8 = MultiBitProperty("len_byte", 0, 6)
LENx16 = MultiByteProperty("len_ext16_byte1", "len_ext16_byte2")
LENx64 = MultiByteProperty("len_ext16_byte1", "len_ext16_byte2",
"len_ext64_byte3", "len_ext64_byte4",
"len_ext64_byte5", "len_ext64_byte6"
"len_ext64_byte7", "len_ext64_byte8")
KEY = MultiByteProperty("mask_byte1", "mask_byte2",
"mask_byte3", "mask_byte4")
def _get_len(self):
if self.LEN8 == 126:
return self.LENx16
elif self.LEN8 == 127:
return self.LENx64
else:
return self.LEN8
def _set_len(self, value):
if 0 <= value <= 125:
del self.LENx64
self.LEN8 = value
elif value < 2**16:
del self.LENx64
self.LEN8 = 126
self.LENx16 = value
elif value < 2**64:
self.LEN8 = 127
self.LENx64 = value
else:
raise ValueError("Payload size can't fit in a 64-bit integer")
LEN = property(_get_len, _set_len)
def is_control(self):
return self.OPCODE in self.CONTROL_OPCODES
def write_to(self, stream):
stream.write(chr(self.control_byte))
stream.write(chr(self.len_byte))
if self.LEN8 >= 126:
stream.write(chr(self.len_ext16_byte1))
stream.write(chr(self.len_ext16_byte2))
if self.LEN8 == 127:
stream.write(chr(self.len_ext64_byte3))
stream.write(chr(self.len_ext64_byte4))
stream.write(chr(self.len_ext64_byte5))
stream.write(chr(self.len_ext64_byte6))
stream.write(chr(self.len_ext64_byte7))
stream.write(chr(self.len_ext64_byte8))
if self.MASK:
stream.write(chr(self.mask_byte1))
stream.write(chr(self.mask_byte2))
stream.write(chr(self.mask_byte3))
stream.write(chr(self.mask_byte4))
if self.payload:
if self.MASK:
xor_payload(self.payload, stream, self.KEY, self.LEN)
else:
stream.write(self.payload.read(self.LEN))
class Message(object):
def __init__(self, opcode, payload=None):
self.opcode = opcode
self.payload = payload if payload is not None else StringIO()
self.complete = False
self._cached_string = None
self.frames = []
def extend(self, frame):
self.frames.append(frame)
self.payload.write(frame.payload.read())
frame.payload.seek(0)
def finish(self):
self.complete = True
@property
def payload_string(self):
if self._cached_string == None:
self._cached_string = self.payload.getvalue()
return self._cached_string
@property
def is_close(self):
return self.opcode == Frame.OP_CLOSE
@property
def is_ping(self):
return self.opcode == Frame.OP_PING
@property
def is_pong(self):
return self.opcode == Frame.OP_PONG
@property
def is_text(self):
return self.opcode == Frame.OP_TEXT
@property
def is_binary(self):
return self.opcode == Frame.OP_BINARY
@classmethod
def make_text(cls, payload, LEN, mask=False):
message = cls(Frame.OP_TEXT, payload)
message.frames.extend(cls.fragment_message(Frame.OP_TEXT, LEN, payload, mask))
return message
@staticmethod
def fragment_message(opcode, LEN, payload, mask=False, FRAGMENT_SIZE=1452):
"""
Fragments a message with the given opcode into multiple frames, if necessary.
If mask is true, a mask key is randomly generated and included, even if the
payload is empty.
Returns a list of frames suitable for sending over the wire
"""
#There's not really any obvious way to fragment messages
#But the simplest is just to fragment at the MTU size, going by
#Ethernet, which is 1500, less 20 bytes for an IPv4 header and
#20 bytes for a TCP header and 4 bytes for a 16-bit payload and
#possibly 4 bytes for a masking key, for a total of 1452 bytes free
frames = []
first = True
key = None
if LEN == 0:
frame = Frame()
frame.configure(opcode, 0, None, FIN=1)
if mask:
frame.KEY = gen_mask_key()
frame.MASK = 1
return [frame]
while LEN >= FRAGMENT_SIZE:
frame = Frame()
frame.OPCODE = opcode if first else Frame.OP_CONTINUE
frame.LEN = FRAGMENT_SIZE
frame.payload = StringIO(payload.read(FRAGMENT_SIZE))
if mask:
frame.KEY = gen_mask_key()
frame.MASK = 1
first = False
LEN -= FRAGMENT_SIZE
frames.append(frame)
if LEN > 0:
frame = Frame()
frame.OPCODE = opcode if first else Frame.OP_CONTINUE
frame.LEN = LEN
frame.payload = StringIO(payload.read(LEN))
if mask:
frame.KEY = gen_mask_key()
frame.MASK = 1
frames.append(frame)
frames[-1].FIN = True
return frames
def gen_mask_key():
return random.randint(-2**15, 2**15-1)
def xor_payload(in_stream, out_stream, mask_key, num_bytes=-1):
"Takes in a host-order masking key and xors data from in_stream and writes to out_stream"
if mask_key & 0xffffffff != mask_key:
raise ValueError("Mask Key %s doesn't fit in a 32-bit integer" % (mask_key,))
if num_bytes == -1:
chunk = in_stream.read(4)
while len(chunk) == 4:
(stuff,) = struct.unpack("!L", chunk)
out_stream.write(struct.pack("!L", stuff ^ mask_key))
chunk = in_stream.read(4)
else:
bytes_left = num_bytes
chunk = in_stream.read(min(4, bytes_left))
while len(chunk) == 4:
bytes_left -= len(chunk)
(stuff,) = struct.unpack("!L", chunk)
out_stream.write(struct.pack("!L", stuff ^ mask_key))
chunk = in_stream.read(min(4, bytes_left))
for i, char in enumerate(chunk):
octet = ord(char)
key_octet = (mask_key >> ((3-i)*8)) & 0xff
out_stream.write(chr(octet ^ key_octet))
class WebSocket(object):
def __init__(self, request, handler):
self.request = request
self.handler = handler
self.buffered_message = None
self.frame_queue = collections.deque()
@staticmethod
def validate(request, policies):
#Check if we support their WebSocket version
versions = split_field(request.headers.get("Sec-WebSocket-Version"))
if not policies["version"].accept(versions):
raise PolicyError("Unsupported version")
#Check that the upgrade request looks sane
if request.headers.get("Connection") != "Upgrade":
raise ValueError("Invalid upgrade request")
#Do a sanity check on the client key
client_key = request.headers.get("Sec-WebSocket-Key", "").strip()
if not validate_key(client_key):
raise ValueError("Invalid client key")
origin_policy = policies.get("origin", UnrestrictedPolicy())
host_policy = policies.get("host", UnrestrictedPolicy())
#Check if we accept their origin
if not origin_policy.accept([request.headers.get("Origin")]):
raise PolicyError("Origin not accepted")
#Check if we accept the host
if not host_policy.accept([request.headers.get("Host")]):
raise PolicyError("Host not accepted")
def negotiate(self, policies):
version_string = self.request.headers.get("Sec-WebSocket-Version")
client_versions = split_field(version_string)
self.version = policies["version"].choose(client_versions)
protocol_string = self.request.headers.get("Sec-WebSocket-Protocol", "")
client_protocols = split_field(protocol_string)
self.protocol = policies["protocol"].choose(client_protocols)
client_key = self.request.headers.get("Sec-WebSocket-Key", "").strip()
self.accept_key = compute_key(client_key)
self.handler.send_response(101)
self.handler.send_header("Upgrade", "websocket")
self.handler.send_header("Connection", "Upgrade")
self.handler.send_header("Sec-WebSocket-Accept", self.accept_key)
self.handler.send_header("Sec-WebSocket-Protocol", self.protocol)
self.handler.send_header("Sec-WebSocket-Version", self.version)
self.handler.end_headers()
def process_frame(self, frame):
if frame.is_control:
if not frame.FIN:
raise ValueError("Got a fragmented control message")
else:
message = Message(frame.OPCODE)
message.extend(frame)
message.finish()
return message
if self.buffered_message is not None:
if not frame.is_continuation:
raise ValueError("Got a new data frame before fragmented data frame was finished")
elif frame.is_continuation:
raise ValueError("Got a data frame continuation before receiving first data frame")
else:
self.buffered_message = Message(frame.OPCODE)
self.buffered_message.extend(frame)
if frame.FIN:
message = self.buffered_message
message.finish()
self.buffered_message = None
return message
else:
return None
def read_message(self):
message = self.process_frame(self.read_frame())
while message is None:
message = self.process_frame(self.read_frame())
return message
def read_frame(self):
frame = Frame.read_from(self.handler.rfile)
## print " FIN:", frame.FIN
## print " RSV:", "%d%d%d" % (frame.RSV1, frame.RSV2, frame.RSV3)
## print " OP:", hex(frame.OPCODE)
## print " LEN:", frame.LEN
## print " KEY:", hex(frame.KEY)
## print "DATA:", frame.payload.getvalue()
#### import sys
#### xor_payload(frame.masked_payload, sys.stdout, frame.KEY)
#### print
#### frame.masked_payload.seek(0)
return frame
def send_message(self, message, override=False):
"""
Enqueue a message's frames to be sent over the wire and block until
the frame queue is empty. Control frames may be sent in-between.
If override is set to true, the frame is enqueued at the head of the
frame queue. This should only be used for control frames.
"""
if override:
self.frame_queue.appendleft(message.frames[0])
else:
self.frame_queue.extend(message.frames)
while self.frame_queue:
self.send_frame(self.frame_queue.popleft())
def send_frame(self, frame):
"Sends a frame over the wire"
frame.write_to(self.handler.wfile)
self.handler.wfile.flush() | 0.433022 | 0.064065 |
import datetime
import json
import logging
import os
from django.conf import settings
from django.http import HttpResponseForbidden, HttpResponse
from django.shortcuts import render, redirect
from django.views.generic import DetailView, ListView
from websocket import create_connection
from websocket._exceptions import WebSocketBadStatusException
from staticpages.models import StaticPage, StaticPageNav
from django.http import HttpResponseForbidden, request
from copy import deepcopy
from .models import Event, EventAttendees
from .forms import PasscodeForm
logger = logging.getLogger('date')
class IndexView(ListView):
model = Event
template_name = 'events/index.html'
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['event_list'] = Event.objects.filter(published=True,
event_date_end__gte=datetime.date.today()).order_by(
'event_date_start')
context['past_events'] = Event.objects.filter(published=True, event_date_end__lte=datetime.date.today()).order_by(
'event_date_start').reverse()
return context
class EventDetailView(DetailView):
model = Event
template_name = 'events/detail.html'
def get_template_names(self):
template_name = 'events/detail.html'
logger.debug(self.get_context_data().get('event').title.lower())
if self.get_context_data().get('event').title.lower() == 'årsfest':
template_name = 'events/arsfest.html'
if self.object.passcode and self.object.passcode != self.request.session.get('passcode_status', False):
template_name = 'events/event_passcode.html'
return template_name
def get_context_data(self, **kwargs):
context = super(EventDetailView, self).get_context_data(**kwargs)
form = kwargs.pop('form', None)
if self.object.passcode and self.object.passcode != self.request.session.get('passcode_status', False):
form = PasscodeForm
if form:
context['form'] = form
else:
context['form'] = self.object.make_registration_form()
baal_staticnav = StaticPageNav.objects.filter(category_name="Årsfest")
if len(baal_staticnav) > 0:
baal_staticpages = StaticPage.objects.filter(category=baal_staticnav[0].pk)
context['staticpages'] = baal_staticpages
return context
def get(self, request, *args, **kwargs):
self.object = self.get_object()
show_content = not self.object.members_only or (self.object.members_only and request.user.is_authenticated)
if not show_content:
return redirect('/members/login')
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
# set passcode status to session if passcode is enabled
if self.object.passcode and self.object.passcode != self.request.session.get('passcode_status', False):
if self.object.passcode == request.POST.get('passcode'):
self.request.session['passcode_status'] = self.object.passcode
return render(self.request, 'events/detail.html', self.get_context_data())
else:
return render(self.request, 'events/event_passcode.html', self.get_context_data(passcode_error='invalid passcode'))
if self.object.sign_up and (request.user.is_authenticated
and self.object.registration_is_open_members()
or self.object.registration_is_open_others()
or request.user.groups.filter(name="commodore").exists()): # Temp fix to allow commodore peeps to enter pre-signed up attendees
form = self.object.make_registration_form().__call__(data=request.POST)
if form.is_valid():
public_info = self.object.get_registration_form_public_info()
# Do not send ws data on refresh after initial signup.
if not EventAttendees.objects.filter(email=request.POST.get('email'), event=self.object.id).first():
logger.info(f"User {request.user} signed up with name: {request.POST.get('user')}")
ws_send(request, form, public_info)
return self.form_valid(form)
return self.form_invalid(form)
return HttpResponseForbidden()
def form_valid(self, form):
attendee = self.get_object().add_event_attendance(user=form.cleaned_data['user'], email=form.cleaned_data['email'],
anonymous=form.cleaned_data['anonymous'], preferences=form.cleaned_data)
if 'avec' in form.cleaned_data and form.cleaned_data['avec']:
avec_data = {'avec_for': attendee}
for key in form.cleaned_data:
if key.startswith('avec_'):
field_name = key.split('avec_')[1]
value = form.cleaned_data[key]
avec_data[field_name] = value
self.get_object().add_event_attendance(user=avec_data['user'], email=avec_data['email'],
anonymous=avec_data['anonymous'], preferences=avec_data, avec_for=avec_data['avec_for'])
if self.get_context_data().get('event').title.lower() == 'årsfest':
return redirect('/events/arsfest/#/anmalda')
return render(self.request, self.template_name, self.get_context_data())
def form_invalid(self, form):
if self.get_context_data().get('event').title.lower() == 'årsfest':
return render(self.request, 'events/arsfest.html', self.get_context_data(form=form))
return render(self.request, self.template_name, self.get_context_data(form=form))
def ws_send(request, form, public_info):
ws_schema = 'ws' if settings.DEVELOP else 'wss'
url = request.META.get('HTTP_HOST')
path = ws_schema + '://' + url + '/ws' + request.path
try:
ws = create_connection(path)
ws.send(json.dumps(ws_data(form, public_info)))
# Send ws again if avec
if dict(form.cleaned_data).get('avec'):
newform = deepcopy(form)
newform.cleaned_data['user'] = dict(newform.cleaned_data).get('avec_user')
public_info = ''
ws.send(json.dumps(ws_data(newform, public_info)))
ws.close()
except WebSocketBadStatusException:
logger.error("Could not create connection for web socket")
# Alert Datörer
def ws_data(form, public_info):
data = {}
pref = dict(form.cleaned_data) # Creates copy of form
data['user'] = "Anonymous" if pref['anonymous'] else pref['user']
# parse the public info and only send that through websockets.
for index, info in enumerate(public_info):
if str(info) in pref:
data[str(info)] = pref[str(info)]
print(data)
return {"data": data} | events/views.py | import datetime
import json
import logging
import os
from django.conf import settings
from django.http import HttpResponseForbidden, HttpResponse
from django.shortcuts import render, redirect
from django.views.generic import DetailView, ListView
from websocket import create_connection
from websocket._exceptions import WebSocketBadStatusException
from staticpages.models import StaticPage, StaticPageNav
from django.http import HttpResponseForbidden, request
from copy import deepcopy
from .models import Event, EventAttendees
from .forms import PasscodeForm
logger = logging.getLogger('date')
class IndexView(ListView):
model = Event
template_name = 'events/index.html'
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['event_list'] = Event.objects.filter(published=True,
event_date_end__gte=datetime.date.today()).order_by(
'event_date_start')
context['past_events'] = Event.objects.filter(published=True, event_date_end__lte=datetime.date.today()).order_by(
'event_date_start').reverse()
return context
class EventDetailView(DetailView):
model = Event
template_name = 'events/detail.html'
def get_template_names(self):
template_name = 'events/detail.html'
logger.debug(self.get_context_data().get('event').title.lower())
if self.get_context_data().get('event').title.lower() == 'årsfest':
template_name = 'events/arsfest.html'
if self.object.passcode and self.object.passcode != self.request.session.get('passcode_status', False):
template_name = 'events/event_passcode.html'
return template_name
def get_context_data(self, **kwargs):
context = super(EventDetailView, self).get_context_data(**kwargs)
form = kwargs.pop('form', None)
if self.object.passcode and self.object.passcode != self.request.session.get('passcode_status', False):
form = PasscodeForm
if form:
context['form'] = form
else:
context['form'] = self.object.make_registration_form()
baal_staticnav = StaticPageNav.objects.filter(category_name="Årsfest")
if len(baal_staticnav) > 0:
baal_staticpages = StaticPage.objects.filter(category=baal_staticnav[0].pk)
context['staticpages'] = baal_staticpages
return context
def get(self, request, *args, **kwargs):
self.object = self.get_object()
show_content = not self.object.members_only or (self.object.members_only and request.user.is_authenticated)
if not show_content:
return redirect('/members/login')
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
# set passcode status to session if passcode is enabled
if self.object.passcode and self.object.passcode != self.request.session.get('passcode_status', False):
if self.object.passcode == request.POST.get('passcode'):
self.request.session['passcode_status'] = self.object.passcode
return render(self.request, 'events/detail.html', self.get_context_data())
else:
return render(self.request, 'events/event_passcode.html', self.get_context_data(passcode_error='invalid passcode'))
if self.object.sign_up and (request.user.is_authenticated
and self.object.registration_is_open_members()
or self.object.registration_is_open_others()
or request.user.groups.filter(name="commodore").exists()): # Temp fix to allow commodore peeps to enter pre-signed up attendees
form = self.object.make_registration_form().__call__(data=request.POST)
if form.is_valid():
public_info = self.object.get_registration_form_public_info()
# Do not send ws data on refresh after initial signup.
if not EventAttendees.objects.filter(email=request.POST.get('email'), event=self.object.id).first():
logger.info(f"User {request.user} signed up with name: {request.POST.get('user')}")
ws_send(request, form, public_info)
return self.form_valid(form)
return self.form_invalid(form)
return HttpResponseForbidden()
def form_valid(self, form):
attendee = self.get_object().add_event_attendance(user=form.cleaned_data['user'], email=form.cleaned_data['email'],
anonymous=form.cleaned_data['anonymous'], preferences=form.cleaned_data)
if 'avec' in form.cleaned_data and form.cleaned_data['avec']:
avec_data = {'avec_for': attendee}
for key in form.cleaned_data:
if key.startswith('avec_'):
field_name = key.split('avec_')[1]
value = form.cleaned_data[key]
avec_data[field_name] = value
self.get_object().add_event_attendance(user=avec_data['user'], email=avec_data['email'],
anonymous=avec_data['anonymous'], preferences=avec_data, avec_for=avec_data['avec_for'])
if self.get_context_data().get('event').title.lower() == 'årsfest':
return redirect('/events/arsfest/#/anmalda')
return render(self.request, self.template_name, self.get_context_data())
def form_invalid(self, form):
if self.get_context_data().get('event').title.lower() == 'årsfest':
return render(self.request, 'events/arsfest.html', self.get_context_data(form=form))
return render(self.request, self.template_name, self.get_context_data(form=form))
def ws_send(request, form, public_info):
ws_schema = 'ws' if settings.DEVELOP else 'wss'
url = request.META.get('HTTP_HOST')
path = ws_schema + '://' + url + '/ws' + request.path
try:
ws = create_connection(path)
ws.send(json.dumps(ws_data(form, public_info)))
# Send ws again if avec
if dict(form.cleaned_data).get('avec'):
newform = deepcopy(form)
newform.cleaned_data['user'] = dict(newform.cleaned_data).get('avec_user')
public_info = ''
ws.send(json.dumps(ws_data(newform, public_info)))
ws.close()
except WebSocketBadStatusException:
logger.error("Could not create connection for web socket")
# Alert Datörer
def ws_data(form, public_info):
data = {}
pref = dict(form.cleaned_data) # Creates copy of form
data['user'] = "Anonymous" if pref['anonymous'] else pref['user']
# parse the public info and only send that through websockets.
for index, info in enumerate(public_info):
if str(info) in pref:
data[str(info)] = pref[str(info)]
print(data)
return {"data": data} | 0.311636 | 0.06165 |
import sys
import mock
import keiko.app
class TestApp(object):
"""Smoke test only."""
def setup(self):
keiko.app.app.keiko = mock.MagicMock()
keiko.app.jsonify = mock.MagicMock(return_value='')
self.app = keiko.app.app.test_client()
def test_index(self):
assert self.app.get('/').status_code == 200
def test_get_all_lamps(self):
assert self.app.get('/lamps').status_code == 200
def test_get_lamp(self):
assert self.app.get('/lamps/red').status_code == 200
assert self.app.get('/lamps/yellow').status_code == 200
assert self.app.get('/lamps/green').status_code == 200
assert self.app.get('/lamps/blue').status_code == 400
def test_set_lamp(self):
assert self.app.get('/lamps/red/on').status_code == 200
assert self.app.get('/lamps/yellow/blink').status_code == 200
assert self.app.get('/lamps/green/quickblink').status_code == 200
assert self.app.get('/lamps/red/off').status_code == 200
assert self.app.get('/lamps/blue/on').status_code == 400
assert self.app.get('/lamps/red/light').status_code == 400
def test_set_all_lamps_off(self):
assert self.app.get('/lamps/off').status_code == 200
def test_get_buzzer(self):
assert self.app.get('/buzzer').status_code == 200
def test_set_buzzer(self):
assert self.app.get('/buzzer/on').status_code == 200
assert self.app.get('/buzzer/continuous').status_code == 200
assert self.app.get('/buzzer/intermittent').status_code == 200
assert self.app.get('/buzzer/off').status_code == 200
assert self.app.get('/buzzer/beep').status_code == 400
def test_get_all_dos(self):
assert self.app.get('/do').status_code == 200
def test_get_do(self):
assert self.app.get('/do/1').status_code == 200
assert self.app.get('/do/2').status_code == 200
assert self.app.get('/do/3').status_code == 200
assert self.app.get('/do/4').status_code == 200
assert self.app.get('/do/0').status_code == 400
assert self.app.get('/do/5').status_code == 400
def test_set_do(self):
assert self.app.get('/do/1/on').status_code == 200
assert self.app.get('/do/2/off').status_code == 200
assert self.app.get('/do/0/on').status_code == 400
assert self.app.get('/do/1/blink').status_code == 400
def test_get_all_dis(self):
assert self.app.get('/di').status_code == 200
def test_get_di(self):
assert self.app.get('/di/1').status_code == 200
assert self.app.get('/di/2').status_code == 200
assert self.app.get('/di/3').status_code == 200
assert self.app.get('/di/4').status_code == 200
assert self.app.get('/di/0').status_code == 400
assert self.app.get('/di/5').status_code == 400
def test_get_all_voices(self):
assert self.app.get('/voices').status_code == 200
def test_get_voice(self):
assert self.app.get('/voices/1').status_code == 200
assert self.app.get('/voices/20').status_code == 200
assert self.app.get('/voices/0').status_code == 400
assert self.app.get('/voices/21').status_code == 400
def test_set_voice(self):
assert self.app.get('/voices/1/play').status_code == 200
assert self.app.get('/voices/2/repeat').status_code == 200
assert self.app.get('/voices/3/stop').status_code == 200
assert self.app.get('/voices/0/play').status_code == 400
assert self.app.get('/voices/1/speak').status_code == 400
def test_set_all_voices_stop(self):
assert self.app.get('/voices/stop').status_code == 200
def test_get_contract(self):
assert self.app.get('/contract').status_code == 200
def test_get_model(self):
assert self.app.get('/model').status_code == 200
def test_get_productiondate(self):
assert self.app.get('/productiondate').status_code == 200
def test_get_serialnumber(self):
assert self.app.get('/serialnumber').status_code == 200
def test_get_unitid(self):
assert self.app.get('/unitid').status_code == 200
def test_get_version(self):
assert self.app.get('/version').status_code == 200
class TestMain(object):
def setup(self):
self._argv = sys.argv
sys.argv = []
def teardown(self):
sys.argv = self._argv
def test_main_with_default(self):
with mock.patch('keiko.app.app.run') as m:
sys.argv.extend(['script_path', 'keiko_address'])
keiko.app.main()
assert keiko.app.app.keiko is not None
assert keiko.app.app.debug is False
kwargs = m.call_args[1]
assert kwargs == {'host': '127.0.0.1', 'port': 8080} | tests/test_app.py | import sys
import mock
import keiko.app
class TestApp(object):
"""Smoke test only."""
def setup(self):
keiko.app.app.keiko = mock.MagicMock()
keiko.app.jsonify = mock.MagicMock(return_value='')
self.app = keiko.app.app.test_client()
def test_index(self):
assert self.app.get('/').status_code == 200
def test_get_all_lamps(self):
assert self.app.get('/lamps').status_code == 200
def test_get_lamp(self):
assert self.app.get('/lamps/red').status_code == 200
assert self.app.get('/lamps/yellow').status_code == 200
assert self.app.get('/lamps/green').status_code == 200
assert self.app.get('/lamps/blue').status_code == 400
def test_set_lamp(self):
assert self.app.get('/lamps/red/on').status_code == 200
assert self.app.get('/lamps/yellow/blink').status_code == 200
assert self.app.get('/lamps/green/quickblink').status_code == 200
assert self.app.get('/lamps/red/off').status_code == 200
assert self.app.get('/lamps/blue/on').status_code == 400
assert self.app.get('/lamps/red/light').status_code == 400
def test_set_all_lamps_off(self):
assert self.app.get('/lamps/off').status_code == 200
def test_get_buzzer(self):
assert self.app.get('/buzzer').status_code == 200
def test_set_buzzer(self):
assert self.app.get('/buzzer/on').status_code == 200
assert self.app.get('/buzzer/continuous').status_code == 200
assert self.app.get('/buzzer/intermittent').status_code == 200
assert self.app.get('/buzzer/off').status_code == 200
assert self.app.get('/buzzer/beep').status_code == 400
def test_get_all_dos(self):
assert self.app.get('/do').status_code == 200
def test_get_do(self):
assert self.app.get('/do/1').status_code == 200
assert self.app.get('/do/2').status_code == 200
assert self.app.get('/do/3').status_code == 200
assert self.app.get('/do/4').status_code == 200
assert self.app.get('/do/0').status_code == 400
assert self.app.get('/do/5').status_code == 400
def test_set_do(self):
assert self.app.get('/do/1/on').status_code == 200
assert self.app.get('/do/2/off').status_code == 200
assert self.app.get('/do/0/on').status_code == 400
assert self.app.get('/do/1/blink').status_code == 400
def test_get_all_dis(self):
assert self.app.get('/di').status_code == 200
def test_get_di(self):
assert self.app.get('/di/1').status_code == 200
assert self.app.get('/di/2').status_code == 200
assert self.app.get('/di/3').status_code == 200
assert self.app.get('/di/4').status_code == 200
assert self.app.get('/di/0').status_code == 400
assert self.app.get('/di/5').status_code == 400
def test_get_all_voices(self):
assert self.app.get('/voices').status_code == 200
def test_get_voice(self):
assert self.app.get('/voices/1').status_code == 200
assert self.app.get('/voices/20').status_code == 200
assert self.app.get('/voices/0').status_code == 400
assert self.app.get('/voices/21').status_code == 400
def test_set_voice(self):
assert self.app.get('/voices/1/play').status_code == 200
assert self.app.get('/voices/2/repeat').status_code == 200
assert self.app.get('/voices/3/stop').status_code == 200
assert self.app.get('/voices/0/play').status_code == 400
assert self.app.get('/voices/1/speak').status_code == 400
def test_set_all_voices_stop(self):
assert self.app.get('/voices/stop').status_code == 200
def test_get_contract(self):
assert self.app.get('/contract').status_code == 200
def test_get_model(self):
assert self.app.get('/model').status_code == 200
def test_get_productiondate(self):
assert self.app.get('/productiondate').status_code == 200
def test_get_serialnumber(self):
assert self.app.get('/serialnumber').status_code == 200
def test_get_unitid(self):
assert self.app.get('/unitid').status_code == 200
def test_get_version(self):
assert self.app.get('/version').status_code == 200
class TestMain(object):
def setup(self):
self._argv = sys.argv
sys.argv = []
def teardown(self):
sys.argv = self._argv
def test_main_with_default(self):
with mock.patch('keiko.app.app.run') as m:
sys.argv.extend(['script_path', 'keiko_address'])
keiko.app.main()
assert keiko.app.app.keiko is not None
assert keiko.app.app.debug is False
kwargs = m.call_args[1]
assert kwargs == {'host': '127.0.0.1', 'port': 8080} | 0.410402 | 0.311689 |
class acc:
"""加速度を扱います。"""
TANNI = {"m/s2": 1, "km/h/s": 0.27777777, "ft/h/s": 0.00008466666666, "in/min/s": 0.000423333333, "ft/min/s": 0.00508, "Gal": 0.01,
"in/s2": 0.0254, "ft/s2": 0.3048, "mi/h/s": 0.44704, "kn/s": 0.5144444, "g": 9.80665, "mi/min/s": 26.8224, "mi/s2": 1609.344}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
加速度の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
\"m/s2\":メートル毎秒毎秒(1m/s^2)
\"km/h/s\":キロメートル毎時毎秒(0.27777777m/s^2)
\"ft/h/s\":フィート毎時毎秒(0.00008466666666m/s^2)
\"ft/min/s\":フィート毎分毎秒(0.00508m/s^2)
\"ft/s2\":フィート毎秒毎秒(0.3048m/s^2)
\"in/min/s\":インチ毎分毎秒(0.000423333333m/s^2)
\"in/s2\":インチ毎秒毎秒(0.0254m/s^2)
\"mi/h/s\":マイル毎時毎秒(0.44704m/s^2)
\"mi/min/s\":マイル毎分毎秒(26.8224m/s^2)
\"mi/s2\":マイル毎秒毎秒(1609.344m/s^2)
\"kn/s\":ノット毎秒(0.5144444m/s^2)
\"Gal\":ガル(0.01m/s^2)
\"g\":標準重力加速度(9.80665m/s^2)
"""
to = value*acc.TANNI[tanni]
to = to/acc.TANNI[to_tanni]
return to
class area:
"""面積を扱います。"""
TANNI = {"m2": 1, "a": 100, "ha": 10000, "km2": 1000000, "TUBO": 3.305785124, "BU": 3.305785124,
"SE": 99.173554, "TAN": 991.736, "CHOU": 9917.35537, "ft2": 0.09290304, "in2": 0.00064516, "yd2": 0.83612736}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
面積の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
\"m2\":平方メートル(1m^2)
\"km2\":平方キロメートル(1000000m^2)
\"a\":アール(100m^2)
\"ha\":ヘクタール(10000m^2)
\"TUBO\":坪(3.305785124m^2)
\"BU\":歩(3.305785124m^2)
\"SE\":畝(99.173554m^2)
\"TAN\":反(991.736m^2)
\"CHOU\":町(9917.35537m^2)
\"ft2\":平方フィート(0.09290304m^2)
\"in2\":平方インチ(0.00064516m^2)
\"yd2\":平方ヤード(0.83612736m^2)
"""
to = value*area.TANNI[tanni]
to = to/area.TANNI[to_tanni]
return to
class length:
"""長さを扱います。"""
TANNI = {"m": 1, "au": 149597870700, "KAIRI": 1852, "in": 0.0254, "ft": 0.3048, "yd": 0.9144, "mile": 1604.344,
"RI": 3927.27, "HIRO": 1.1818, "KEN": 1.1818, "SHAKU": 0.30303, "SUN": 0.030303, "ly": 9460730472580800}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
長さの単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
\"m\":メートル
\"au\":天文単位(149597870700m)
\"KAIRI\":海里(1852m)
\"in\":インチ(0.0254m)
\"ft\":フィート(0.3048m)
\"yd\":ヤード(0.9144m)
\"mile\":マイル(1604.344m)
\"RI\":里(3927.27m)
\"HIRO\":尋(1.1818m)
\"KEN\":間(1.1818m)
\"SHAKU\":尺(0.30303)
\"SUN\":寸(0.030303m)
\"ly\":光年(9460730472580800m)
"""
to = value*length.TANNI[tanni]
to = to/length.TANNI[to_tanni]
return to
class mass:
"""質量を扱います。"""
TANNI = {"g": 1, "kg": 1000, "t": 1000000, "gamma": 0.000001, "kt": 0.2, "oz": 28.349523125,
"lb": 453.59237, "q": 100000, "mom": 3.75, "KAN": 3750, "RYOU": 37.5, "KIN": 600}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
質量の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
\"g\":グラム
\"kg\":キログラム(1000g)
\"t\":トン(1000000g)
\"gamma\":γ(0.000001g)
\"kt\":カラット(0.2g)
\"oz\":オンス(28.349523125g)
\"lb\":ポンド(453.59237g)
\"q\":キンタル(100000g)
\"mom\":匁(3.75g)
\"KAN\":貫(3750g)
\"RYOU\":両(37.5g)
\"KIN\":斤(600g)
"""
to = value*mass.TANNI[tanni]
to = to/mass.TANNI[to_tanni]
return to
class prefix:
"""接頭辞を扱います。"""
TANNI = {"nomal": 1, "Y": 1000000000000000000000000, "Z": 1000000000000000000000, "E": 1000000000000000000, "P": 1000000000000000, "T": 1000000000000, "G": 1000000000, "M": 1000000, "k": 1000, "h": 100,
"da": 10, "d": 0.1, "c": 0.01, "m": 0.001, "micro": 0.000001, "n": 0.000000001, "p": 0.000000000001, "f": 0.000000000000001, "a": 0.000000000000000001, "z": 0.000000000000000000001, "y": 0.000000000000000001}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
接頭辞の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
"Y":10^24
"Z":10^21
"E":10^18
"P":10^15
"T":10^12
"G":10^9
"M":10^6
"k":10^3
"h":10^2
"da":10
"normal":1
"d":10^-1
"c":10^-2
"m":10^-3
"micro":10^-6(μ)
"n":10^-9
"p":10^-12
"f":10^-15
"a":10^-18
"z":10^-21
"y":10^-24
"""
to = value*prefix.TANNI[tanni]
to = to/prefix.TANNI[to_tanni]
return to
class speed:
"""速度を扱います。"""
TANNI = {"m/s": 1, "ft/h": 0.00008466667, "in/min": 0.000423333, "ft/min": 0.00508, "in/s": 0.0254, "km/h": 0.2777778,
"ft/s": 0.3048, "mi/h": 0.44704, "kn": 0.514444, "mi/min": 26.8224, "mi/s": 1.609344, "c": 299792458}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
速度の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
"m/s":メートル毎秒(1m/s)
"km/h":キロメートル毎時(0.2777778m/s)
"ft/h":フィート毎時(0.00008466667m/s)
"ft/min":フィート毎分(0.00508m/s)
"ft/s":フィート毎秒(0.3048m/s)
"in/min":インチ毎分(0.000423333m/s)
"in/s":インチ毎秒(0.0254m/s)
"mi/h":マイル毎時(0.44704m/s)
"mi/min":マイル毎分(26.8224m/s)
"mi/s":マイル毎秒(1.609344m/s)
"kn":ノット(0.514444m/s)
"c":真空中の光速度(299792458m/s)
"""
to = value*speed.TANNI[tanni]
to = to/speed.TANNI[to_tanni]
return to
class temperature:
"""温度を扱います。"""
TANNI_P = {"K": 0, "C": 237.15, "F": 459.67, "Ra": 0}
TANNI_M = {"K": 1, "C": 1, "F": 5/9, "Ra": 5/9}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
温度の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
"K":ケルビン
"C":セルシウス度
"F":ファーレンハイト度
"Ra":ランキン度
"""
to = value+temperature.TANNI_P[tanni]
to = to*temperature.TANNI_M[tanni]
to = to/temperature.TANNI_M[to_tanni]
to = to-temperature.TANNI_P[to_tanni]
return to
class time_unit:
"""時間を扱います。"""
TANNI = {"s": 1, "shake": 0.00000001, "TU": 0.001024, "jiffy_e": 1/50, "jiffy_w": 1/60, "min": 60, "moment": 90, "KOKU": 900, "KOKU_old": 864, "h": 60*60, "d": 60*60*24, "wk": 60*60*24*7, "JUN": 60*60*24 *
10, "fortnight": 60*60*24*14, "SAKUBO": 2551442.27, "mo": 60*60*24*30, "quarter": 7776000, "semester": 10872000, "y": 31536000, "greg": 31556952, "juli": 31557600, "year": 31558149.764928, "c": 3153600000}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
時間の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
"s":秒(1s)
"shake":シェイク(0.00000001s)
"TU":Time Unit(0.001024s)
"jiffy_e":ジフィ(電子工学,東日本)(1/50)
"jiffy_w":ジフィ(電子工学,西日本)(1/60)
"min":分(60s)
"moment":モーメント(90s)
"KOKU":刻(中国,100刻制)(900s)
"KOKU_old":刻(中国,96刻制)(864s)
"h":時(60*60s)
"d":日(606024s)
"wk":週(606024*7s)
"JUN":旬(606024*10s)
"fortnight":フォートナイト(半月)(606024*14s)
"SAKUBO":朔望月(2551442.27s)
"mo":月(30d)(606024*30s)
"quarter":四半期(7776000s)
"semester":セメスター(10872000s)
"y":年(365d)(31536000s)
"greg":グレゴリオ年(31556952s)
"juli":ユリウス年(31557600s)
"year":恒星年(31558149.764928s)
"c":世紀(365d*100)(3153600000s)
"""
to = value*time_unit.TANNI[tanni]
to = to/time_unit.TANNI[to_tanni]
return to
class volume:
"""体積を扱います。"""
TANNI = {"m3": 1, "L": 0.001, "KOSAJI": 0.000005, "OSAJI": 0.000015, "c": 0.00025, "lambda": 0.000000001, "acft": 1233.48183754752, "drop": 0.00000005, "in3": 0.000016387064,
"ft3": 0.028316846592, "yd3": 0.764554857984, "mi3": 4168.181825440579584, "SHOU": 0.0018039, "KOKU": 0.18039, "GOU": 0.00018039, "TO": 0.018039, "SHAKU": 0.000018039}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
"m3":立方メートル(1m^3)
"L":リットル(0.001m^3)
"KOSAJI":小さじ(0.000005m^3)
"OSAJI":大さじ(0.000015m^3)
"c":カップ(0.00025m^3)
"lambda":λ(0.000000001m^3)
"acft":エーカー・フィート(1233.48183754752m^3)
"drop":ドロップ(0.00000005m^3)
"in3":立方インチ(0.000016387064m^3)
"ft3":立方フィート(0.028316846592m^3)
"yd3":立方ヤード(0.764554857984m^3)
"mi3":立方マイル(4168.181825440579584m^3)
"KOKU":石(0.18039m^3)
"TO":斗(0.018039m^3)
"SHOU":升(0.0018039m^3)
"GOU":合(0.00018039m^3)
"SHAKU":勺(0.000018039m^3)
"""
to = value*volume.TANNI[tanni]
to = to/volume.TANNI[to_tanni]
return to | Python/unit_change.py | class acc:
"""加速度を扱います。"""
TANNI = {"m/s2": 1, "km/h/s": 0.27777777, "ft/h/s": 0.00008466666666, "in/min/s": 0.000423333333, "ft/min/s": 0.00508, "Gal": 0.01,
"in/s2": 0.0254, "ft/s2": 0.3048, "mi/h/s": 0.44704, "kn/s": 0.5144444, "g": 9.80665, "mi/min/s": 26.8224, "mi/s2": 1609.344}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
加速度の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
\"m/s2\":メートル毎秒毎秒(1m/s^2)
\"km/h/s\":キロメートル毎時毎秒(0.27777777m/s^2)
\"ft/h/s\":フィート毎時毎秒(0.00008466666666m/s^2)
\"ft/min/s\":フィート毎分毎秒(0.00508m/s^2)
\"ft/s2\":フィート毎秒毎秒(0.3048m/s^2)
\"in/min/s\":インチ毎分毎秒(0.000423333333m/s^2)
\"in/s2\":インチ毎秒毎秒(0.0254m/s^2)
\"mi/h/s\":マイル毎時毎秒(0.44704m/s^2)
\"mi/min/s\":マイル毎分毎秒(26.8224m/s^2)
\"mi/s2\":マイル毎秒毎秒(1609.344m/s^2)
\"kn/s\":ノット毎秒(0.5144444m/s^2)
\"Gal\":ガル(0.01m/s^2)
\"g\":標準重力加速度(9.80665m/s^2)
"""
to = value*acc.TANNI[tanni]
to = to/acc.TANNI[to_tanni]
return to
class area:
"""面積を扱います。"""
TANNI = {"m2": 1, "a": 100, "ha": 10000, "km2": 1000000, "TUBO": 3.305785124, "BU": 3.305785124,
"SE": 99.173554, "TAN": 991.736, "CHOU": 9917.35537, "ft2": 0.09290304, "in2": 0.00064516, "yd2": 0.83612736}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
面積の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
\"m2\":平方メートル(1m^2)
\"km2\":平方キロメートル(1000000m^2)
\"a\":アール(100m^2)
\"ha\":ヘクタール(10000m^2)
\"TUBO\":坪(3.305785124m^2)
\"BU\":歩(3.305785124m^2)
\"SE\":畝(99.173554m^2)
\"TAN\":反(991.736m^2)
\"CHOU\":町(9917.35537m^2)
\"ft2\":平方フィート(0.09290304m^2)
\"in2\":平方インチ(0.00064516m^2)
\"yd2\":平方ヤード(0.83612736m^2)
"""
to = value*area.TANNI[tanni]
to = to/area.TANNI[to_tanni]
return to
class length:
"""長さを扱います。"""
TANNI = {"m": 1, "au": 149597870700, "KAIRI": 1852, "in": 0.0254, "ft": 0.3048, "yd": 0.9144, "mile": 1604.344,
"RI": 3927.27, "HIRO": 1.1818, "KEN": 1.1818, "SHAKU": 0.30303, "SUN": 0.030303, "ly": 9460730472580800}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
長さの単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
\"m\":メートル
\"au\":天文単位(149597870700m)
\"KAIRI\":海里(1852m)
\"in\":インチ(0.0254m)
\"ft\":フィート(0.3048m)
\"yd\":ヤード(0.9144m)
\"mile\":マイル(1604.344m)
\"RI\":里(3927.27m)
\"HIRO\":尋(1.1818m)
\"KEN\":間(1.1818m)
\"SHAKU\":尺(0.30303)
\"SUN\":寸(0.030303m)
\"ly\":光年(9460730472580800m)
"""
to = value*length.TANNI[tanni]
to = to/length.TANNI[to_tanni]
return to
class mass:
"""質量を扱います。"""
TANNI = {"g": 1, "kg": 1000, "t": 1000000, "gamma": 0.000001, "kt": 0.2, "oz": 28.349523125,
"lb": 453.59237, "q": 100000, "mom": 3.75, "KAN": 3750, "RYOU": 37.5, "KIN": 600}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
質量の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
\"g\":グラム
\"kg\":キログラム(1000g)
\"t\":トン(1000000g)
\"gamma\":γ(0.000001g)
\"kt\":カラット(0.2g)
\"oz\":オンス(28.349523125g)
\"lb\":ポンド(453.59237g)
\"q\":キンタル(100000g)
\"mom\":匁(3.75g)
\"KAN\":貫(3750g)
\"RYOU\":両(37.5g)
\"KIN\":斤(600g)
"""
to = value*mass.TANNI[tanni]
to = to/mass.TANNI[to_tanni]
return to
class prefix:
"""接頭辞を扱います。"""
TANNI = {"nomal": 1, "Y": 1000000000000000000000000, "Z": 1000000000000000000000, "E": 1000000000000000000, "P": 1000000000000000, "T": 1000000000000, "G": 1000000000, "M": 1000000, "k": 1000, "h": 100,
"da": 10, "d": 0.1, "c": 0.01, "m": 0.001, "micro": 0.000001, "n": 0.000000001, "p": 0.000000000001, "f": 0.000000000000001, "a": 0.000000000000000001, "z": 0.000000000000000000001, "y": 0.000000000000000001}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
接頭辞の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
"Y":10^24
"Z":10^21
"E":10^18
"P":10^15
"T":10^12
"G":10^9
"M":10^6
"k":10^3
"h":10^2
"da":10
"normal":1
"d":10^-1
"c":10^-2
"m":10^-3
"micro":10^-6(μ)
"n":10^-9
"p":10^-12
"f":10^-15
"a":10^-18
"z":10^-21
"y":10^-24
"""
to = value*prefix.TANNI[tanni]
to = to/prefix.TANNI[to_tanni]
return to
class speed:
"""速度を扱います。"""
TANNI = {"m/s": 1, "ft/h": 0.00008466667, "in/min": 0.000423333, "ft/min": 0.00508, "in/s": 0.0254, "km/h": 0.2777778,
"ft/s": 0.3048, "mi/h": 0.44704, "kn": 0.514444, "mi/min": 26.8224, "mi/s": 1.609344, "c": 299792458}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
速度の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
"m/s":メートル毎秒(1m/s)
"km/h":キロメートル毎時(0.2777778m/s)
"ft/h":フィート毎時(0.00008466667m/s)
"ft/min":フィート毎分(0.00508m/s)
"ft/s":フィート毎秒(0.3048m/s)
"in/min":インチ毎分(0.000423333m/s)
"in/s":インチ毎秒(0.0254m/s)
"mi/h":マイル毎時(0.44704m/s)
"mi/min":マイル毎分(26.8224m/s)
"mi/s":マイル毎秒(1.609344m/s)
"kn":ノット(0.514444m/s)
"c":真空中の光速度(299792458m/s)
"""
to = value*speed.TANNI[tanni]
to = to/speed.TANNI[to_tanni]
return to
class temperature:
"""温度を扱います。"""
TANNI_P = {"K": 0, "C": 237.15, "F": 459.67, "Ra": 0}
TANNI_M = {"K": 1, "C": 1, "F": 5/9, "Ra": 5/9}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
温度の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
"K":ケルビン
"C":セルシウス度
"F":ファーレンハイト度
"Ra":ランキン度
"""
to = value+temperature.TANNI_P[tanni]
to = to*temperature.TANNI_M[tanni]
to = to/temperature.TANNI_M[to_tanni]
to = to-temperature.TANNI_P[to_tanni]
return to
class time_unit:
"""時間を扱います。"""
TANNI = {"s": 1, "shake": 0.00000001, "TU": 0.001024, "jiffy_e": 1/50, "jiffy_w": 1/60, "min": 60, "moment": 90, "KOKU": 900, "KOKU_old": 864, "h": 60*60, "d": 60*60*24, "wk": 60*60*24*7, "JUN": 60*60*24 *
10, "fortnight": 60*60*24*14, "SAKUBO": 2551442.27, "mo": 60*60*24*30, "quarter": 7776000, "semester": 10872000, "y": 31536000, "greg": 31556952, "juli": 31557600, "year": 31558149.764928, "c": 3153600000}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
時間の単位変換\n
引数は(変換する値,変換前の単位,変換後の単位)となります。\n
対応単位\n
"s":秒(1s)
"shake":シェイク(0.00000001s)
"TU":Time Unit(0.001024s)
"jiffy_e":ジフィ(電子工学,東日本)(1/50)
"jiffy_w":ジフィ(電子工学,西日本)(1/60)
"min":分(60s)
"moment":モーメント(90s)
"KOKU":刻(中国,100刻制)(900s)
"KOKU_old":刻(中国,96刻制)(864s)
"h":時(60*60s)
"d":日(606024s)
"wk":週(606024*7s)
"JUN":旬(606024*10s)
"fortnight":フォートナイト(半月)(606024*14s)
"SAKUBO":朔望月(2551442.27s)
"mo":月(30d)(606024*30s)
"quarter":四半期(7776000s)
"semester":セメスター(10872000s)
"y":年(365d)(31536000s)
"greg":グレゴリオ年(31556952s)
"juli":ユリウス年(31557600s)
"year":恒星年(31558149.764928s)
"c":世紀(365d*100)(3153600000s)
"""
to = value*time_unit.TANNI[tanni]
to = to/time_unit.TANNI[to_tanni]
return to
class volume:
"""体積を扱います。"""
TANNI = {"m3": 1, "L": 0.001, "KOSAJI": 0.000005, "OSAJI": 0.000015, "c": 0.00025, "lambda": 0.000000001, "acft": 1233.48183754752, "drop": 0.00000005, "in3": 0.000016387064,
"ft3": 0.028316846592, "yd3": 0.764554857984, "mi3": 4168.181825440579584, "SHOU": 0.0018039, "KOKU": 0.18039, "GOU": 0.00018039, "TO": 0.018039, "SHAKU": 0.000018039}
def change(value:float, tanni:str, to_tanni:str)->float:
"""
"m3":立方メートル(1m^3)
"L":リットル(0.001m^3)
"KOSAJI":小さじ(0.000005m^3)
"OSAJI":大さじ(0.000015m^3)
"c":カップ(0.00025m^3)
"lambda":λ(0.000000001m^3)
"acft":エーカー・フィート(1233.48183754752m^3)
"drop":ドロップ(0.00000005m^3)
"in3":立方インチ(0.000016387064m^3)
"ft3":立方フィート(0.028316846592m^3)
"yd3":立方ヤード(0.764554857984m^3)
"mi3":立方マイル(4168.181825440579584m^3)
"KOKU":石(0.18039m^3)
"TO":斗(0.018039m^3)
"SHOU":升(0.0018039m^3)
"GOU":合(0.00018039m^3)
"SHAKU":勺(0.000018039m^3)
"""
to = value*volume.TANNI[tanni]
to = to/volume.TANNI[to_tanni]
return to | 0.390825 | 0.513668 |
from opqua.model import Model
model = Model()
model.newSetup('setup_normal', preset='vector-borne')
model.newSetup(
'setup_cluster',
contact_rate_host_vector = ( 2 *
model.setups['setup_normal'].contact_rate_host_vector ),
preset='vector-borne'
) # uses default parameters but doubles contact rate of the first setup
model.newPopulation('population_A','setup_normal', num_hosts=20, num_vectors=20)
model.newPopulation('population_B','setup_normal', num_hosts=20, num_vectors=20)
# Create two populations that will be connected.
model.newPopulation(
'isolated_population','setup_normal', num_hosts=20, num_vectors=20
) # A third population will remain isolated.
model.createInterconnectedPopulations(
5,'clustered_population_','setup_cluster',
host_migration_rate=2e-3, vector_migration_rate=0,
host_contact_rate=0, vector_contact_rate=0,
num_hosts=20, num_vectors=20
)
# Create a cluster of 5 populations connected to each other with a migration
# rate of 2e-3 between each of them in both directions. Each population has
# an numbered ID with the prefix "clustered_population_", has the parameters
# defined in the "setup_cluster" setup, and has 20 hosts and vectors.
model.linkPopulationsHostMigration('population_A','clustered_population_4',2e-3)
# We link population_A to one of the clustered populations with a one-way
# migration rate of 2e-3.
model.linkPopulationsHostMigration('population_A','population_B',2e-3)
# We link population_A to population_B with a one-way migration rate of
# 2e-3.
model.addPathogensToHosts( 'population_A',{'AAAAAAAAAA':5} )
# population_A starts with AAAAAAAAAA genotype pathogens.
model.addPathogensToHosts( 'population_B',{'GGGGGGGGGG':5} )
# population_B starts with GGGGGGGGGG genotype pathogens.
output = model.run(0,100,time_sampling=0)
data = model.saveToDataFrame('metapopulations_migration_example.csv')
graph = model.populationsPlot( # Plot infected hosts per population over time.
'metapopulations_migration_example.png', data,
num_top_populations=8, # plot all 8 populations
track_specific_populations=['isolated_population'],
# Make sure to plot th isolated population totals if not in the top
# infected populations.
y_label='Infected hosts' # change y label
) | examples/tutorials/metapopulations/metapopulations_migration_example.py | from opqua.model import Model
model = Model()
model.newSetup('setup_normal', preset='vector-borne')
model.newSetup(
'setup_cluster',
contact_rate_host_vector = ( 2 *
model.setups['setup_normal'].contact_rate_host_vector ),
preset='vector-borne'
) # uses default parameters but doubles contact rate of the first setup
model.newPopulation('population_A','setup_normal', num_hosts=20, num_vectors=20)
model.newPopulation('population_B','setup_normal', num_hosts=20, num_vectors=20)
# Create two populations that will be connected.
model.newPopulation(
'isolated_population','setup_normal', num_hosts=20, num_vectors=20
) # A third population will remain isolated.
model.createInterconnectedPopulations(
5,'clustered_population_','setup_cluster',
host_migration_rate=2e-3, vector_migration_rate=0,
host_contact_rate=0, vector_contact_rate=0,
num_hosts=20, num_vectors=20
)
# Create a cluster of 5 populations connected to each other with a migration
# rate of 2e-3 between each of them in both directions. Each population has
# an numbered ID with the prefix "clustered_population_", has the parameters
# defined in the "setup_cluster" setup, and has 20 hosts and vectors.
model.linkPopulationsHostMigration('population_A','clustered_population_4',2e-3)
# We link population_A to one of the clustered populations with a one-way
# migration rate of 2e-3.
model.linkPopulationsHostMigration('population_A','population_B',2e-3)
# We link population_A to population_B with a one-way migration rate of
# 2e-3.
model.addPathogensToHosts( 'population_A',{'AAAAAAAAAA':5} )
# population_A starts with AAAAAAAAAA genotype pathogens.
model.addPathogensToHosts( 'population_B',{'GGGGGGGGGG':5} )
# population_B starts with GGGGGGGGGG genotype pathogens.
output = model.run(0,100,time_sampling=0)
data = model.saveToDataFrame('metapopulations_migration_example.csv')
graph = model.populationsPlot( # Plot infected hosts per population over time.
'metapopulations_migration_example.png', data,
num_top_populations=8, # plot all 8 populations
track_specific_populations=['isolated_population'],
# Make sure to plot th isolated population totals if not in the top
# infected populations.
y_label='Infected hosts' # change y label
) | 0.637369 | 0.478102 |
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from organization.models import Organization
from squac.test_mixins import sample_user, create_group
'''Tests for org models:
*Org
to run only the app tests:
/mg.sh "test organizations && flake8"
to run only this file
./mg.sh "test organization.tests.test_organization_api && flake8"
'''
class OrganizationAPITests(TestCase):
def setUp(self):
self.client = APIClient()
self.group_viewer = create_group('viewer', [])
self.group_reporter = create_group('reporter', [])
self.group_contributor = create_group('contributor', [])
self.group_org_admin = create_group("org_admin", [])
self.org1 = Organization.objects.create(name="UW")
self.org2 = Organization.objects.create(name="CVO")
self.staff = sample_user(
email="<EMAIL>",
password="<PASSWORD>",
organization=self.org1
)
self.staff.is_staff = True
self.client.force_authenticate(self.staff)
self.org1user = sample_user("<EMAIL>", 'secret', self.org1)
self.org2user = sample_user("<EMAIL>", 'secret', self.org2)
def test_get_organization(self):
'''test staff get other organization '''
url = reverse('organization:organization-detail',
kwargs={'pk': self.org2.id})
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_get_organization_user(self):
'''test get org user'''
url = reverse('organization:organizationuser-detail',
kwargs={'pk': self.org1user.id})
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_org_user_new_user(self):
'''create org_user for new user'''
url_org = reverse('organization:organization-detail',
kwargs={'pk': self.org1.id})
res = self.client.get(url_org)
self.assertEqual(len(res.data['users']), 2)
url = reverse('organization:organizationuser-list')
payload = {
'email': '<EMAIL>',
"organization": self.org1.id,
"groups": [
self.group_contributor.id
]
}
res = self.client.post(url, payload, format='json')
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
# check if new user shows in org
res = self.client.get(url_org)
self.assertEqual(len(res.data['users']), 3)
# now try create again, should fail on uniqueness
res = self.client.post(url, payload, format='json')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_org_user(self):
'''change org user to admin
'''
user = self.org1user
self.assertFalse(user.is_org_admin)
url = reverse('organization:organizationuser-detail', args=[user.id])
payload = {
'email': user.email,
"organization": self.org1.id,
'is_org_admin': True,
'firstname': "Seymore",
'lastname': 'things',
'groups': [
self.group_contributor.id
]
}
res = self.client.patch(url, payload, format='json')
# should now have 4 groups since contrib needs viewer and reporter
# and we made admin
self.assertEqual(res.data['firstname'], 'Seymore')
self.assertEqual(len(res.data['groups']), 4)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertTrue(res.data['is_org_admin']) | app/organization/tests/test_organization_api.py | from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from organization.models import Organization
from squac.test_mixins import sample_user, create_group
'''Tests for org models:
*Org
to run only the app tests:
/mg.sh "test organizations && flake8"
to run only this file
./mg.sh "test organization.tests.test_organization_api && flake8"
'''
class OrganizationAPITests(TestCase):
def setUp(self):
self.client = APIClient()
self.group_viewer = create_group('viewer', [])
self.group_reporter = create_group('reporter', [])
self.group_contributor = create_group('contributor', [])
self.group_org_admin = create_group("org_admin", [])
self.org1 = Organization.objects.create(name="UW")
self.org2 = Organization.objects.create(name="CVO")
self.staff = sample_user(
email="<EMAIL>",
password="<PASSWORD>",
organization=self.org1
)
self.staff.is_staff = True
self.client.force_authenticate(self.staff)
self.org1user = sample_user("<EMAIL>", 'secret', self.org1)
self.org2user = sample_user("<EMAIL>", 'secret', self.org2)
def test_get_organization(self):
'''test staff get other organization '''
url = reverse('organization:organization-detail',
kwargs={'pk': self.org2.id})
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_get_organization_user(self):
'''test get org user'''
url = reverse('organization:organizationuser-detail',
kwargs={'pk': self.org1user.id})
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_org_user_new_user(self):
'''create org_user for new user'''
url_org = reverse('organization:organization-detail',
kwargs={'pk': self.org1.id})
res = self.client.get(url_org)
self.assertEqual(len(res.data['users']), 2)
url = reverse('organization:organizationuser-list')
payload = {
'email': '<EMAIL>',
"organization": self.org1.id,
"groups": [
self.group_contributor.id
]
}
res = self.client.post(url, payload, format='json')
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
# check if new user shows in org
res = self.client.get(url_org)
self.assertEqual(len(res.data['users']), 3)
# now try create again, should fail on uniqueness
res = self.client.post(url, payload, format='json')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_org_user(self):
'''change org user to admin
'''
user = self.org1user
self.assertFalse(user.is_org_admin)
url = reverse('organization:organizationuser-detail', args=[user.id])
payload = {
'email': user.email,
"organization": self.org1.id,
'is_org_admin': True,
'firstname': "Seymore",
'lastname': 'things',
'groups': [
self.group_contributor.id
]
}
res = self.client.patch(url, payload, format='json')
# should now have 4 groups since contrib needs viewer and reporter
# and we made admin
self.assertEqual(res.data['firstname'], 'Seymore')
self.assertEqual(len(res.data['groups']), 4)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertTrue(res.data['is_org_admin']) | 0.360602 | 0.281529 |
from PIL import Image
import numpy as np
from os import path
from json import load
import random
class bingoCard:
def __init__(self,background="map_background.png",save="overlay.png",icons="icons"):
self.card = Image.open(background)
self.card = self.card.convert("RGBA")
self.location=save
self.icons=icons
with open("icons.json","r") as file:
items=load(file)
self.lookup=items
self.items={}
self.found=np.zeros((5,5))
def addSquare(self,name,x,y,index=0):
self.items[name]=[x,y,index]
imName=self.lookup[name][index]
self.markSquare(x,y,name=imName)
def markSquare(self,x,y,name="cross.png"):
icon=Image.open(path.join(self.icons,name))
icon=icon.convert("RGBA")
icon=icon.resize((16,16,))
offset=(16+x*19,16+y*19,)
self.card.paste(icon,offset,icon)
self.card.save(self.location)
def foundItem(self,item,index=0):
found=False
if item in self.items:
if index == self.items[item][2]:
self.markSquare(self.items[item][0],self.items[item][1])
self.found[x,y]=1
found=True
def checkBingo(self):
sums=[]
for i in range(5):
sums.append(self.found[:,i].sum())
sums.append(self.found[i,:].sum())
sums.append(sum([self.found[0,0],self.found[1,1],self.found[2,2],self.found[3,3],self.found[4,4]]))
sums.append(sum([self.found[4,0],self.found[3,1],self.found[2,2],self.found[1,3],self.found[0,4]]))
print(sums)
return max(sums)==5
def makeCard(self):
choices=list(self.lookup.keys())
choices.remove("emerald")
chosen=random.sample(choices,25)
chosen[12]="emerald"
for x in range(5):
for y in range(5):
item=chosen[y+x*5]
imgname=random.choice(self.lookup[item])
idx=self.lookup[item].index(imgname)
self.addSquare(item,x,y,index=idx) | bingoCard.py | from PIL import Image
import numpy as np
from os import path
from json import load
import random
class bingoCard:
def __init__(self,background="map_background.png",save="overlay.png",icons="icons"):
self.card = Image.open(background)
self.card = self.card.convert("RGBA")
self.location=save
self.icons=icons
with open("icons.json","r") as file:
items=load(file)
self.lookup=items
self.items={}
self.found=np.zeros((5,5))
def addSquare(self,name,x,y,index=0):
self.items[name]=[x,y,index]
imName=self.lookup[name][index]
self.markSquare(x,y,name=imName)
def markSquare(self,x,y,name="cross.png"):
icon=Image.open(path.join(self.icons,name))
icon=icon.convert("RGBA")
icon=icon.resize((16,16,))
offset=(16+x*19,16+y*19,)
self.card.paste(icon,offset,icon)
self.card.save(self.location)
def foundItem(self,item,index=0):
found=False
if item in self.items:
if index == self.items[item][2]:
self.markSquare(self.items[item][0],self.items[item][1])
self.found[x,y]=1
found=True
def checkBingo(self):
sums=[]
for i in range(5):
sums.append(self.found[:,i].sum())
sums.append(self.found[i,:].sum())
sums.append(sum([self.found[0,0],self.found[1,1],self.found[2,2],self.found[3,3],self.found[4,4]]))
sums.append(sum([self.found[4,0],self.found[3,1],self.found[2,2],self.found[1,3],self.found[0,4]]))
print(sums)
return max(sums)==5
def makeCard(self):
choices=list(self.lookup.keys())
choices.remove("emerald")
chosen=random.sample(choices,25)
chosen[12]="emerald"
for x in range(5):
for y in range(5):
item=chosen[y+x*5]
imgname=random.choice(self.lookup[item])
idx=self.lookup[item].index(imgname)
self.addSquare(item,x,y,index=idx) | 0.14137 | 0.123603 |
import sys
import os
import argparse
import traceback
from disk import mount_point
from mod import set_self_dir, run_shell, run_shell_input
'''
How to build VMbox image?
VBoxManage internalcommands createrawvmdk -filename run/image/disk.vmdk -rawdisk run/image/disk.img
'''
# set ovmf_path if boot from UEFI
ovmf_path = '/usr/share/ovmf/x64/OVMF_CODE.fd'
qemu = 'qemu-system-x86_64 -drive file=../run/image/disk.img,format=raw,index=0 -m 64 -s -smp 4,sockets=1,cores=4,threads=1 -serial file:../run/kernel_out.log'
qemu_uefi = 'qemu-system-x86_64 -drive file=' + ovmf_path + ',format=raw,readonly,if=pflash -drive file=../run/image/disk.img,format=raw,index=0 -m 64 -s -smp 4,sockets=1,cores=4 -serial file:../run/kernel_out.log'
qemu_headless_str = ' -nographic -vnc :1'
bochs = 'bochs -f ../run/cfg/bochs/bochsrc.txt'
vbox = 'VBoxManage startvm boot'
if __name__ == "__main__":
set_self_dir()
parser = argparse.ArgumentParser(
description='run tools: run kernel')
parser.add_argument(
"-n", "--nographic", action='store_true', help="try don't show GUI")
parser.add_argument(
"-u", "--uefi", action='store_true', help="open qemu with uefi firmware")
parser.add_argument("emulator_name", type=str,
choices=["q", "b", "v"], help="q: run qemu\nb: run bochs\nv: run virtual box")
args = parser.parse_args()
base_mnt = None
if args.uefi:
base_mnt = mount_point("../run/image/disk.img", 2)
else:
base_mnt = mount_point("../run/image/disk.img", 1)
if base_mnt == "" or base_mnt == None:
print("Mount disk before run.\n try 'python disk.py mount'")
exit(-1)
try:
run_shell('cp -R ../build/bin/system/* ' + base_mnt + '/boot/')
run_shell('sync')
tp = args.emulator_name
if tp == 'q':
if args.uefi:
print('run qemu uefi')
if args.nographic:
run_shell(qemu_uefi + qemu_headless_str)
else:
run_shell(qemu_uefi)
else:
print("run qemu")
if args.nographic:
run_shell(qemu + qemu_headless_str)
else:
run_shell(qemu)
elif tp == 'b':
print('run bochs')
run_shell_input(bochs)
elif tp == 'v':
print('run VBox')
run_shell(vbox)
except Exception:
traceback.print_exc()
parser.print_help()
exit(-1) | util/run.py | import sys
import os
import argparse
import traceback
from disk import mount_point
from mod import set_self_dir, run_shell, run_shell_input
'''
How to build VMbox image?
VBoxManage internalcommands createrawvmdk -filename run/image/disk.vmdk -rawdisk run/image/disk.img
'''
# set ovmf_path if boot from UEFI
ovmf_path = '/usr/share/ovmf/x64/OVMF_CODE.fd'
qemu = 'qemu-system-x86_64 -drive file=../run/image/disk.img,format=raw,index=0 -m 64 -s -smp 4,sockets=1,cores=4,threads=1 -serial file:../run/kernel_out.log'
qemu_uefi = 'qemu-system-x86_64 -drive file=' + ovmf_path + ',format=raw,readonly,if=pflash -drive file=../run/image/disk.img,format=raw,index=0 -m 64 -s -smp 4,sockets=1,cores=4 -serial file:../run/kernel_out.log'
qemu_headless_str = ' -nographic -vnc :1'
bochs = 'bochs -f ../run/cfg/bochs/bochsrc.txt'
vbox = 'VBoxManage startvm boot'
if __name__ == "__main__":
set_self_dir()
parser = argparse.ArgumentParser(
description='run tools: run kernel')
parser.add_argument(
"-n", "--nographic", action='store_true', help="try don't show GUI")
parser.add_argument(
"-u", "--uefi", action='store_true', help="open qemu with uefi firmware")
parser.add_argument("emulator_name", type=str,
choices=["q", "b", "v"], help="q: run qemu\nb: run bochs\nv: run virtual box")
args = parser.parse_args()
base_mnt = None
if args.uefi:
base_mnt = mount_point("../run/image/disk.img", 2)
else:
base_mnt = mount_point("../run/image/disk.img", 1)
if base_mnt == "" or base_mnt == None:
print("Mount disk before run.\n try 'python disk.py mount'")
exit(-1)
try:
run_shell('cp -R ../build/bin/system/* ' + base_mnt + '/boot/')
run_shell('sync')
tp = args.emulator_name
if tp == 'q':
if args.uefi:
print('run qemu uefi')
if args.nographic:
run_shell(qemu_uefi + qemu_headless_str)
else:
run_shell(qemu_uefi)
else:
print("run qemu")
if args.nographic:
run_shell(qemu + qemu_headless_str)
else:
run_shell(qemu)
elif tp == 'b':
print('run bochs')
run_shell_input(bochs)
elif tp == 'v':
print('run VBox')
run_shell(vbox)
except Exception:
traceback.print_exc()
parser.print_help()
exit(-1) | 0.079896 | 0.05621 |
from hrmachine import Machine, Letter
from hrmachine.utils import string_to_values
def test_string_to_values():
expected = [Letter.P, Letter.Y, Letter.T, Letter.H, Letter.O, Letter.N]
assert string_to_values('python') == expected
expected = [Letter.L, Letter.I, Letter.N, Letter.U, Letter.X, 0]
assert string_to_values('linux', zero_terminated=True) == expected
expected = [8, 0, Letter.B, Letter.I, Letter.T, 0]
assert string_to_values('8 bit', zero_terminated=True) == expected
def test_alphabetizer():
registers = [None] * 25
registers[23] = 0
registers[24] = 10
machine = Machine(registers)
inbox = string_to_values('cauebs python', zero_terminated=True)
outbox = machine.run_file('examples/alphabetizer.hr', inbox)
assert outbox == string_to_values('cauebs')
inbox = string_to_values('human resource', zero_terminated=True)
outbox = machine.run_file('examples/alphabetizer.hr',
inbox, registers=registers)
assert outbox == string_to_values('human')
inbox = string_to_values('mypy pytest', zero_terminated=True)
outbox = machine.run_file('examples/alphabetizer.hr',
inbox, registers=registers)
assert outbox == string_to_values('mypy')
def test_digit_exploder():
registers = [None] * 12
registers[9] = 0
registers[10] = 10
registers[11] = 100
machine = Machine(registers)
outbox = machine.run_file('examples/digit-exploder.hr',
inbox=[9, 809, 838, 66])
assert outbox == [9, 8, 0, 9, 8, 3, 8, 6, 6]
outbox = machine.run_file('examples/digit-exploder.hr',
inbox=[819, 14, 544, 92],
registers=registers)
assert outbox == [8, 1, 9, 1, 4, 5, 4, 4, 9, 2]
outbox = machine.run_file('examples/digit-exploder.hr',
inbox=[638, 504, 5, 21, 33],
registers=registers)
assert outbox == [6, 3, 8, 5, 0, 4, 5, 2, 1, 3, 3]
def test_vowel_incinerator():
registers = [None] * 10
registers[0:5] = string_to_values('aeiou')
registers[5] = 0
machine = Machine(registers)
outbox = machine.run_file('examples/vowel-incinerator.hr',
inbox=string_to_values('AOJEQOQDJTNJQEBNEJCJ'))
assert outbox == string_to_values('JQQDJTNJQBNJCJ')
def test_string_reverse():
registers = [None] * 15
registers[14] = 0
machine = Machine(registers)
inbox = string_to_values('cauebs python', zero_terminated=True)
outbox = machine.run_file('examples/string-reverse.hr', inbox)
assert outbox == string_to_values('sbeuacnohtyp')
inbox = string_to_values('human resource', zero_terminated=True)
outbox = machine.run_file('examples/string-reverse.hr',
inbox, registers=registers)
assert outbox == string_to_values('namuhecruoser')
inbox = string_to_values('mypy pytest', zero_terminated=True)
outbox = machine.run_file('examples/string-reverse.hr',
inbox, registers=registers)
assert outbox == string_to_values('ypymtsetyp')
if __name__ == '__main__':
test_alphabetizer() | tests.py | from hrmachine import Machine, Letter
from hrmachine.utils import string_to_values
def test_string_to_values():
expected = [Letter.P, Letter.Y, Letter.T, Letter.H, Letter.O, Letter.N]
assert string_to_values('python') == expected
expected = [Letter.L, Letter.I, Letter.N, Letter.U, Letter.X, 0]
assert string_to_values('linux', zero_terminated=True) == expected
expected = [8, 0, Letter.B, Letter.I, Letter.T, 0]
assert string_to_values('8 bit', zero_terminated=True) == expected
def test_alphabetizer():
registers = [None] * 25
registers[23] = 0
registers[24] = 10
machine = Machine(registers)
inbox = string_to_values('cauebs python', zero_terminated=True)
outbox = machine.run_file('examples/alphabetizer.hr', inbox)
assert outbox == string_to_values('cauebs')
inbox = string_to_values('human resource', zero_terminated=True)
outbox = machine.run_file('examples/alphabetizer.hr',
inbox, registers=registers)
assert outbox == string_to_values('human')
inbox = string_to_values('mypy pytest', zero_terminated=True)
outbox = machine.run_file('examples/alphabetizer.hr',
inbox, registers=registers)
assert outbox == string_to_values('mypy')
def test_digit_exploder():
registers = [None] * 12
registers[9] = 0
registers[10] = 10
registers[11] = 100
machine = Machine(registers)
outbox = machine.run_file('examples/digit-exploder.hr',
inbox=[9, 809, 838, 66])
assert outbox == [9, 8, 0, 9, 8, 3, 8, 6, 6]
outbox = machine.run_file('examples/digit-exploder.hr',
inbox=[819, 14, 544, 92],
registers=registers)
assert outbox == [8, 1, 9, 1, 4, 5, 4, 4, 9, 2]
outbox = machine.run_file('examples/digit-exploder.hr',
inbox=[638, 504, 5, 21, 33],
registers=registers)
assert outbox == [6, 3, 8, 5, 0, 4, 5, 2, 1, 3, 3]
def test_vowel_incinerator():
registers = [None] * 10
registers[0:5] = string_to_values('aeiou')
registers[5] = 0
machine = Machine(registers)
outbox = machine.run_file('examples/vowel-incinerator.hr',
inbox=string_to_values('AOJEQOQDJTNJQEBNEJCJ'))
assert outbox == string_to_values('JQQDJTNJQBNJCJ')
def test_string_reverse():
registers = [None] * 15
registers[14] = 0
machine = Machine(registers)
inbox = string_to_values('cauebs python', zero_terminated=True)
outbox = machine.run_file('examples/string-reverse.hr', inbox)
assert outbox == string_to_values('sbeuacnohtyp')
inbox = string_to_values('human resource', zero_terminated=True)
outbox = machine.run_file('examples/string-reverse.hr',
inbox, registers=registers)
assert outbox == string_to_values('namuhecruoser')
inbox = string_to_values('mypy pytest', zero_terminated=True)
outbox = machine.run_file('examples/string-reverse.hr',
inbox, registers=registers)
assert outbox == string_to_values('ypymtsetyp')
if __name__ == '__main__':
test_alphabetizer() | 0.693577 | 0.792103 |
import padog
import PA_STABLIZE
from math import pi,tan
x1=0;x2=0;x3=0;x4=0;y1=0;y2=0;y3=0;y4=0
def pit_cause_cg_adjust(sita,h,Kp):
result=round((h*tan((sita)*Kp)),2)
return result
def foward_cg_stab(r1,r4,r2,r3,gait_need,enable):
if enable==True and (abs(r1)+abs(r4)+abs(r2)+abs(r3))!=0: # When Key_stap set True in padog, self stab can be use here.
PA_STABLIZE.get_imu_value()
if PA_STABLIZE.gyro_cal_sta==1: #等待gyro校准完成
gyro_p=PA_STABLIZE.filter_data_p
padog.X_goal=padog.in_y+gait_need+pit_cause_cg_adjust((gyro_p-PA_STABLIZE.p_origin)*pi/180,110,1.1)+PA_STABLIZE.gyro_x_fitted*5
def trot(t,x_target,z_target,r1,r4,r2,r3):
global x1,x2,x3,x4,y1,y2,y3,y4
Tf=0.5
#陀螺仪引入
foward_cg_stab(r1,r4,r2,r3,0,padog.key_stab)
if t<Tf:
phase_1_swing=padog.swing_curve_generate(t,Tf,x_target,z_target,0,0,0)
phase_1_support=padog.support_curve_generate(0.5+t,Tf,x_target,0.5,0)
#TROT
x1=phase_1_swing[0]*r1;x2=phase_1_support[0]*r2;x3=phase_1_swing[0]*r3;x4=phase_1_support[0]*r4
y1=phase_1_swing[1];y2=phase_1_support[1];y3=phase_1_swing[1];y4=phase_1_support[1]
if t>=Tf:
phase_2_swing=padog.swing_curve_generate(t-0.5,Tf,x_target,z_target,0,0,0)
phase_2_support=padog.support_curve_generate(t,Tf,x_target,0.5,0)
#TROT
x1=phase_2_support[0]*r1;x2=phase_2_swing[0]*r2;x3=phase_2_support[0]*r3;x4=phase_2_swing[0]*r4
y1=phase_2_support[1];y2=phase_2_swing[1];y3=phase_2_support[1];y4=phase_2_swing[1]
return x1,x2,x3,x4,y1,y2,y3,y4
def walk(t,x_target,z_target,r1,r4,r2,r3):
global x1,x2,x3,x4,y1,y2,y3,y4
Tf=0.5
#陀螺仪引入
if t<Tf:
foward_cg_stab(r1,r4,r2,r3,-30,True)
if abs(padog.X_S-padog.X_goal)<1:
padog.t=padog.t+padog.speed/5
phase_w_swing=padog.swing_curve_generate(t,Tf,x_target,z_target,0,0,0)
x1=phase_w_swing[0];x2=0;x3=0;x4=0
y1=phase_w_swing[1];y2=0;y3=0;y4=0
if t>=Tf and t<2*Tf:
foward_cg_stab(r1,r4,r2,r3,-30,True)
if abs(padog.X_S-padog.X_goal)<1:
padog.t=padog.t+padog.speed/5
phase_w_swing=padog.swing_curve_generate(t-0.5,Tf,x_target,z_target,0,0,0)
x1=x_target;x2=phase_w_swing[0];x3=0;x4=0
y1=0;y2=phase_w_swing[1];y3=0;y4=0
if t>=2*Tf and t<3*Tf:
foward_cg_stab(r1,r4,r2,r3,40,True)
if abs(padog.X_S-padog.X_goal)<1:
padog.t=padog.t+padog.speed/5
phase_w_swing=padog.swing_curve_generate(t-1,Tf,x_target,z_target,0,0,0)
x1=x_target;x2=x_target;x3=phase_w_swing[0];x4=0
y1=0;y2=0;y3=phase_w_swing[1];y4=0
if t>=3*Tf and t<4*Tf:
foward_cg_stab(r1,r4,r2,r3,40,True)
if abs(padog.X_S-padog.X_goal)<1:
padog.t=padog.t+padog.speed/5
phase_w_swing=padog.swing_curve_generate(t-1.5,Tf,x_target,z_target,0,0,0)
x1=x_target;x2=x_target;x3=x_target;x4=phase_w_swing[0]
y1=0;y2=0;y3=0;y4=phase_w_swing[1]
if t>=4*Tf:
foward_cg_stab(r1,r4,r2,r3,-30,True)
padog.t=padog.t+padog.speed/5
phase_w_support=padog.support_curve_generate(t-1.5,Tf,x_target,0.5,0)
x1=phase_w_support[0];x2=phase_w_support[0];x3=phase_w_support[0];x4=phase_w_support[0]
y1=phase_w_support[1];y2=phase_w_support[1];y3=phase_w_support[1];y4=phase_w_support[1]
return x1,x2,x3,x4,y1,y2,y3,y4 | Py Apple Dynamics V7.3 SRC/PA-Dynamics V7.3/PA_GAIT.py | import padog
import PA_STABLIZE
from math import pi,tan
x1=0;x2=0;x3=0;x4=0;y1=0;y2=0;y3=0;y4=0
def pit_cause_cg_adjust(sita,h,Kp):
result=round((h*tan((sita)*Kp)),2)
return result
def foward_cg_stab(r1,r4,r2,r3,gait_need,enable):
if enable==True and (abs(r1)+abs(r4)+abs(r2)+abs(r3))!=0: # When Key_stap set True in padog, self stab can be use here.
PA_STABLIZE.get_imu_value()
if PA_STABLIZE.gyro_cal_sta==1: #等待gyro校准完成
gyro_p=PA_STABLIZE.filter_data_p
padog.X_goal=padog.in_y+gait_need+pit_cause_cg_adjust((gyro_p-PA_STABLIZE.p_origin)*pi/180,110,1.1)+PA_STABLIZE.gyro_x_fitted*5
def trot(t,x_target,z_target,r1,r4,r2,r3):
global x1,x2,x3,x4,y1,y2,y3,y4
Tf=0.5
#陀螺仪引入
foward_cg_stab(r1,r4,r2,r3,0,padog.key_stab)
if t<Tf:
phase_1_swing=padog.swing_curve_generate(t,Tf,x_target,z_target,0,0,0)
phase_1_support=padog.support_curve_generate(0.5+t,Tf,x_target,0.5,0)
#TROT
x1=phase_1_swing[0]*r1;x2=phase_1_support[0]*r2;x3=phase_1_swing[0]*r3;x4=phase_1_support[0]*r4
y1=phase_1_swing[1];y2=phase_1_support[1];y3=phase_1_swing[1];y4=phase_1_support[1]
if t>=Tf:
phase_2_swing=padog.swing_curve_generate(t-0.5,Tf,x_target,z_target,0,0,0)
phase_2_support=padog.support_curve_generate(t,Tf,x_target,0.5,0)
#TROT
x1=phase_2_support[0]*r1;x2=phase_2_swing[0]*r2;x3=phase_2_support[0]*r3;x4=phase_2_swing[0]*r4
y1=phase_2_support[1];y2=phase_2_swing[1];y3=phase_2_support[1];y4=phase_2_swing[1]
return x1,x2,x3,x4,y1,y2,y3,y4
def walk(t,x_target,z_target,r1,r4,r2,r3):
global x1,x2,x3,x4,y1,y2,y3,y4
Tf=0.5
#陀螺仪引入
if t<Tf:
foward_cg_stab(r1,r4,r2,r3,-30,True)
if abs(padog.X_S-padog.X_goal)<1:
padog.t=padog.t+padog.speed/5
phase_w_swing=padog.swing_curve_generate(t,Tf,x_target,z_target,0,0,0)
x1=phase_w_swing[0];x2=0;x3=0;x4=0
y1=phase_w_swing[1];y2=0;y3=0;y4=0
if t>=Tf and t<2*Tf:
foward_cg_stab(r1,r4,r2,r3,-30,True)
if abs(padog.X_S-padog.X_goal)<1:
padog.t=padog.t+padog.speed/5
phase_w_swing=padog.swing_curve_generate(t-0.5,Tf,x_target,z_target,0,0,0)
x1=x_target;x2=phase_w_swing[0];x3=0;x4=0
y1=0;y2=phase_w_swing[1];y3=0;y4=0
if t>=2*Tf and t<3*Tf:
foward_cg_stab(r1,r4,r2,r3,40,True)
if abs(padog.X_S-padog.X_goal)<1:
padog.t=padog.t+padog.speed/5
phase_w_swing=padog.swing_curve_generate(t-1,Tf,x_target,z_target,0,0,0)
x1=x_target;x2=x_target;x3=phase_w_swing[0];x4=0
y1=0;y2=0;y3=phase_w_swing[1];y4=0
if t>=3*Tf and t<4*Tf:
foward_cg_stab(r1,r4,r2,r3,40,True)
if abs(padog.X_S-padog.X_goal)<1:
padog.t=padog.t+padog.speed/5
phase_w_swing=padog.swing_curve_generate(t-1.5,Tf,x_target,z_target,0,0,0)
x1=x_target;x2=x_target;x3=x_target;x4=phase_w_swing[0]
y1=0;y2=0;y3=0;y4=phase_w_swing[1]
if t>=4*Tf:
foward_cg_stab(r1,r4,r2,r3,-30,True)
padog.t=padog.t+padog.speed/5
phase_w_support=padog.support_curve_generate(t-1.5,Tf,x_target,0.5,0)
x1=phase_w_support[0];x2=phase_w_support[0];x3=phase_w_support[0];x4=phase_w_support[0]
y1=phase_w_support[1];y2=phase_w_support[1];y3=phase_w_support[1];y4=phase_w_support[1]
return x1,x2,x3,x4,y1,y2,y3,y4 | 0.172939 | 0.136033 |
import argparse
import girder_worker
import os
import sys
def get_config(section, option):
return girder_worker.config.get(section, option)
def set_config(section, option, value):
if not girder_worker.config.has_section(section):
girder_worker.config.add_section(section)
girder_worker.config.set(section, option, value)
write_config()
def write_config(fd=None):
if fd is None:
path = os.path.join(girder_worker.PACKAGE_DIR, 'worker.local.cfg')
with open(path, 'w') as fd:
girder_worker.config.write(fd)
else:
girder_worker.config.write(fd)
def rm_config(section, option):
girder_worker.config.remove_option(section, option)
write_config()
def main():
parser = argparse.ArgumentParser(
description='Get and set configuration values for the worker')
subparsers = parser.add_subparsers(help='sub-command help', dest='cmd')
get_parser = subparsers.add_parser('get', help='get a config value')
set_parser = subparsers.add_parser('set', help='set a config value')
rm_parser = subparsers.add_parser('rm', help='remove a config option')
subparsers.add_parser('list', help='show all config values')
get_parser.add_argument(
'section', help='The section containing the option.')
get_parser.add_argument('option', help='The option to retrieve.')
set_parser.add_argument(
'section', help='The section containing the option.')
set_parser.add_argument('option', help='The option to set.')
set_parser.add_argument('value', help='The value to set the option to.')
rm_parser.add_argument(
'section', help='The section containing the option to remove.')
rm_parser.add_argument('option', help='The option to remove.')
args = parser.parse_args()
if args.cmd == 'get':
print(get_config(args.section, args.option))
elif args.cmd == 'set':
set_config(args.section, args.option, args.value)
elif args.cmd == 'list':
write_config(sys.stdout)
elif args.cmd == 'rm':
rm_config(args.section, args.option)
if __name__ == '__main__':
main() # pragma: no cover | packages/girder_worker/girder_worker/configure.py | import argparse
import girder_worker
import os
import sys
def get_config(section, option):
return girder_worker.config.get(section, option)
def set_config(section, option, value):
if not girder_worker.config.has_section(section):
girder_worker.config.add_section(section)
girder_worker.config.set(section, option, value)
write_config()
def write_config(fd=None):
if fd is None:
path = os.path.join(girder_worker.PACKAGE_DIR, 'worker.local.cfg')
with open(path, 'w') as fd:
girder_worker.config.write(fd)
else:
girder_worker.config.write(fd)
def rm_config(section, option):
girder_worker.config.remove_option(section, option)
write_config()
def main():
parser = argparse.ArgumentParser(
description='Get and set configuration values for the worker')
subparsers = parser.add_subparsers(help='sub-command help', dest='cmd')
get_parser = subparsers.add_parser('get', help='get a config value')
set_parser = subparsers.add_parser('set', help='set a config value')
rm_parser = subparsers.add_parser('rm', help='remove a config option')
subparsers.add_parser('list', help='show all config values')
get_parser.add_argument(
'section', help='The section containing the option.')
get_parser.add_argument('option', help='The option to retrieve.')
set_parser.add_argument(
'section', help='The section containing the option.')
set_parser.add_argument('option', help='The option to set.')
set_parser.add_argument('value', help='The value to set the option to.')
rm_parser.add_argument(
'section', help='The section containing the option to remove.')
rm_parser.add_argument('option', help='The option to remove.')
args = parser.parse_args()
if args.cmd == 'get':
print(get_config(args.section, args.option))
elif args.cmd == 'set':
set_config(args.section, args.option, args.value)
elif args.cmd == 'list':
write_config(sys.stdout)
elif args.cmd == 'rm':
rm_config(args.section, args.option)
if __name__ == '__main__':
main() # pragma: no cover | 0.263884 | 0.063337 |
import logging
from rest_framework import serializers
from . import models
from recipients.models import Recipient
logger = logging.getLogger('adoptions')
class AdoptionSerializer(serializers.ModelSerializer):
status = serializers.CharField(read_only=True)
status_set_at = serializers.DateTimeField(read_only=True)
recipient_uid = serializers.UUIDField(source='recipient.uid')
recipient_background = serializers.CharField(source='recipient.background_story', read_only=True)
recipient_tags = serializers.SerializerMethodField()
class Meta:
model = models.Adoption
fields = (
'id',
'recipient_uid',
'recipient_tags',
'recipient_background',
'status',
'status_set_at',
)
def get_recipient_tags(self, obj):
return ','.join(obj.recipient.recipient_tags)
def create(self, validated_data):
request = self.context['request']
recipient_uid = validated_data.pop('recipient')['uid']
try:
recipient = Recipient.objects.get(uid=recipient_uid)
except Recipient.DoesNotExist:
logger.warning('Adoption request for not exists recipient uid: %s', recipient_uid)
raise serializers.ValidationError('Recipient does not exists')
if (
self.Meta.model.objects.filter(
recipient=recipient,
status__in=[
self.Meta.model.STATUS_PENDING_APPROVAL,
self.Meta.model.STATUS_APPROVED,
]
).exclude(
adopter_id=request.user.pk,
).exists()
):
logger.warning('Adoption request for recipient uid: %s already exists', recipient_uid)
raise serializers.ValidationError('Adoption request exists')
adoption, created = self.Meta.model.objects.get_or_create(
adopter_id=request.user.pk,
recipient=recipient,
)
# Handle idempotency
if not created:
logger.warning('Adoption request for recipient uid: %s already exists', recipient_uid)
return adoption
class ApprovedAdoptionSerializer(serializers.ModelSerializer):
recipient_uid = serializers.UUIDField(source='recipient.uid', read_only=True)
recipient_fullname = serializers.CharField(source='recipient.full_name', read_only=True)
recipient_phone = serializers.CharField(source='recipient.phone', read_only=True)
recipient_address = serializers.CharField(source='recipient.address', read_only=True)
last_delivery_at = serializers.DateTimeField(read_only=True, allow_null=True)
class Meta:
model = models.Adoption
fields = (
'recipient_uid',
'recipient_fullname',
'recipient_phone',
'recipient_address',
'status_set_at',
'created_at',
'last_delivery_at',
)
class PackageTypeSerializer(serializers.ModelSerializer):
class Meta:
model = models.PackageType
fields = (
'id',
'name',
'description',
)
class DeliverySerializer(serializers.ModelSerializer):
planned_delivery_date = serializers.DateField(read_only=True)
delivery_to = serializers.UUIDField(label='delivery_to', source='delivery_to.uid')
delivery_to_fullname = serializers.CharField(source='delivery_to.full_name', read_only=True)
delivery_to_phone = serializers.CharField(source='delivery_to.phone', read_only=True)
delivery_to_address = serializers.CharField(source='delivery_to.address', read_only=True)
package_type_name = serializers.CharField(source='package_type.name', read_only=True)
status_set_at = serializers.DateTimeField(
format='%d/%m/%y',
read_only=True,
)
def validate_status(self, value):
if value not in (
models.Delivery.STATUS_PENDING,
models.Delivery.STATUS_DELIVERED,
):
raise serializers.ValidationError('Invalid status %s for delivery' % value)
return value
class Meta:
model = models.Delivery
fields = (
'id',
'delivery_to',
'delivery_to_fullname',
'delivery_to_phone',
'delivery_to_address',
'planned_delivery_date',
'status',
'package_type',
'package_type_name',
'package_description',
'delivery_description',
'status_set_at',
)
def create(self, validated_data):
request = self.context['request']
recipient_uid = validated_data.pop('delivery_to')['uid']
try:
recipient = Recipient.objects.get(uid=recipient_uid)
except Recipient.DoesNotExist:
raise serializers.ValidationError('Recipient %s does not exists' % recipient_uid)
return self.Meta.model.objects.create(
delivery_from_id=request.user.pk,
delivery_to=recipient,
**validated_data,
)
def update(self, instance, validated_data):
if instance.status not in (
models.Delivery.STATUS_PLANNED,
models.Delivery.STATUS_PENDING
):
raise serializers.ValidationError('Delivery status: %s cannot be changed' % instance.status)
instance.delivery_description = validated_data.get('delivery_description')
instance.status = validated_data['status']
instance.save()
return instance | server/adoptions/serializers.py | import logging
from rest_framework import serializers
from . import models
from recipients.models import Recipient
logger = logging.getLogger('adoptions')
class AdoptionSerializer(serializers.ModelSerializer):
status = serializers.CharField(read_only=True)
status_set_at = serializers.DateTimeField(read_only=True)
recipient_uid = serializers.UUIDField(source='recipient.uid')
recipient_background = serializers.CharField(source='recipient.background_story', read_only=True)
recipient_tags = serializers.SerializerMethodField()
class Meta:
model = models.Adoption
fields = (
'id',
'recipient_uid',
'recipient_tags',
'recipient_background',
'status',
'status_set_at',
)
def get_recipient_tags(self, obj):
return ','.join(obj.recipient.recipient_tags)
def create(self, validated_data):
request = self.context['request']
recipient_uid = validated_data.pop('recipient')['uid']
try:
recipient = Recipient.objects.get(uid=recipient_uid)
except Recipient.DoesNotExist:
logger.warning('Adoption request for not exists recipient uid: %s', recipient_uid)
raise serializers.ValidationError('Recipient does not exists')
if (
self.Meta.model.objects.filter(
recipient=recipient,
status__in=[
self.Meta.model.STATUS_PENDING_APPROVAL,
self.Meta.model.STATUS_APPROVED,
]
).exclude(
adopter_id=request.user.pk,
).exists()
):
logger.warning('Adoption request for recipient uid: %s already exists', recipient_uid)
raise serializers.ValidationError('Adoption request exists')
adoption, created = self.Meta.model.objects.get_or_create(
adopter_id=request.user.pk,
recipient=recipient,
)
# Handle idempotency
if not created:
logger.warning('Adoption request for recipient uid: %s already exists', recipient_uid)
return adoption
class ApprovedAdoptionSerializer(serializers.ModelSerializer):
recipient_uid = serializers.UUIDField(source='recipient.uid', read_only=True)
recipient_fullname = serializers.CharField(source='recipient.full_name', read_only=True)
recipient_phone = serializers.CharField(source='recipient.phone', read_only=True)
recipient_address = serializers.CharField(source='recipient.address', read_only=True)
last_delivery_at = serializers.DateTimeField(read_only=True, allow_null=True)
class Meta:
model = models.Adoption
fields = (
'recipient_uid',
'recipient_fullname',
'recipient_phone',
'recipient_address',
'status_set_at',
'created_at',
'last_delivery_at',
)
class PackageTypeSerializer(serializers.ModelSerializer):
class Meta:
model = models.PackageType
fields = (
'id',
'name',
'description',
)
class DeliverySerializer(serializers.ModelSerializer):
planned_delivery_date = serializers.DateField(read_only=True)
delivery_to = serializers.UUIDField(label='delivery_to', source='delivery_to.uid')
delivery_to_fullname = serializers.CharField(source='delivery_to.full_name', read_only=True)
delivery_to_phone = serializers.CharField(source='delivery_to.phone', read_only=True)
delivery_to_address = serializers.CharField(source='delivery_to.address', read_only=True)
package_type_name = serializers.CharField(source='package_type.name', read_only=True)
status_set_at = serializers.DateTimeField(
format='%d/%m/%y',
read_only=True,
)
def validate_status(self, value):
if value not in (
models.Delivery.STATUS_PENDING,
models.Delivery.STATUS_DELIVERED,
):
raise serializers.ValidationError('Invalid status %s for delivery' % value)
return value
class Meta:
model = models.Delivery
fields = (
'id',
'delivery_to',
'delivery_to_fullname',
'delivery_to_phone',
'delivery_to_address',
'planned_delivery_date',
'status',
'package_type',
'package_type_name',
'package_description',
'delivery_description',
'status_set_at',
)
def create(self, validated_data):
request = self.context['request']
recipient_uid = validated_data.pop('delivery_to')['uid']
try:
recipient = Recipient.objects.get(uid=recipient_uid)
except Recipient.DoesNotExist:
raise serializers.ValidationError('Recipient %s does not exists' % recipient_uid)
return self.Meta.model.objects.create(
delivery_from_id=request.user.pk,
delivery_to=recipient,
**validated_data,
)
def update(self, instance, validated_data):
if instance.status not in (
models.Delivery.STATUS_PLANNED,
models.Delivery.STATUS_PENDING
):
raise serializers.ValidationError('Delivery status: %s cannot be changed' % instance.status)
instance.delivery_description = validated_data.get('delivery_description')
instance.status = validated_data['status']
instance.save()
return instance | 0.47171 | 0.177419 |
import pathlib
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(suppress=True)
PATH_HERE = pathlib.Path(__file__).parent.resolve()
CSV_PATH = PATH_HERE.joinpath("../../data/norm_1000_12_34.csv").resolve()
CSV_VALUES = np.loadtxt(CSV_PATH)
print(f"Loaded {len(CSV_VALUES)} values from {CSV_PATH}")
print(f"Mean = {np.mean(CSV_VALUES)}")
print(f"Standard Deviation = {np.std(CSV_VALUES)}")
def makeFigure(values: np.ndarray):
"""
Save a histogram plot to allow quick inspection
of the distribution of CSV values.
"""
hist, edges = np.histogram(values, bins=25, range=(-25, 100))
print(f"Hist = {hist}")
print(f"Edges = {edges}")
plt.stem(edges[:-1], hist, basefmt=" ")
plt.title(CSV_PATH.name)
plt.ylabel("Count")
plt.xlabel("Bin (Left Edge)")
plt.grid(alpha=.2)
plt.savefig(str(CSV_PATH)+"_hist.png")
plt.show()
def cSharpArray(values: np.ndarray):
return "{ " + ", ".join([str(x) for x in values]) + " }"
def testCaseRange(values: np.ndarray, rangeMin: float, rangeMax: float, rangeBins: int, plotToo: bool = False):
"""
Create a C# test case for histogram calculations.
Consistent with np.hist "Values outside the range are ignored"
"""
print()
print(f"float rangeMin = {rangeMin};")
print(f"float rangeMax = {rangeMax};")
print(f"int rangeBins = {rangeBins};")
counts, edges = np.histogram(values,
bins=rangeBins,
range=(rangeMin, rangeMax),
density=False)
print(f"int[] count = {cSharpArray(counts)};")
print(f"float[] edges = {cSharpArray(edges)};")
if plotToo:
plt.stem(edges[:-1], counts, basefmt=" ")
plt.show()
densities, edges = np.histogram(values,
bins=rangeBins,
range=(rangeMin, rangeMax),
density=True)
print(f"float[] densities = {cSharpArray(densities)};")
if __name__ == "__main__":
# makeFigure(CSV_VALUES)
testCaseRange(CSV_VALUES, -25, 100, 25)
testCaseRange(CSV_VALUES, 10, 45, 80)
print("DONE") | dev/python/histogram/hist.py | import pathlib
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(suppress=True)
PATH_HERE = pathlib.Path(__file__).parent.resolve()
CSV_PATH = PATH_HERE.joinpath("../../data/norm_1000_12_34.csv").resolve()
CSV_VALUES = np.loadtxt(CSV_PATH)
print(f"Loaded {len(CSV_VALUES)} values from {CSV_PATH}")
print(f"Mean = {np.mean(CSV_VALUES)}")
print(f"Standard Deviation = {np.std(CSV_VALUES)}")
def makeFigure(values: np.ndarray):
"""
Save a histogram plot to allow quick inspection
of the distribution of CSV values.
"""
hist, edges = np.histogram(values, bins=25, range=(-25, 100))
print(f"Hist = {hist}")
print(f"Edges = {edges}")
plt.stem(edges[:-1], hist, basefmt=" ")
plt.title(CSV_PATH.name)
plt.ylabel("Count")
plt.xlabel("Bin (Left Edge)")
plt.grid(alpha=.2)
plt.savefig(str(CSV_PATH)+"_hist.png")
plt.show()
def cSharpArray(values: np.ndarray):
return "{ " + ", ".join([str(x) for x in values]) + " }"
def testCaseRange(values: np.ndarray, rangeMin: float, rangeMax: float, rangeBins: int, plotToo: bool = False):
"""
Create a C# test case for histogram calculations.
Consistent with np.hist "Values outside the range are ignored"
"""
print()
print(f"float rangeMin = {rangeMin};")
print(f"float rangeMax = {rangeMax};")
print(f"int rangeBins = {rangeBins};")
counts, edges = np.histogram(values,
bins=rangeBins,
range=(rangeMin, rangeMax),
density=False)
print(f"int[] count = {cSharpArray(counts)};")
print(f"float[] edges = {cSharpArray(edges)};")
if plotToo:
plt.stem(edges[:-1], counts, basefmt=" ")
plt.show()
densities, edges = np.histogram(values,
bins=rangeBins,
range=(rangeMin, rangeMax),
density=True)
print(f"float[] densities = {cSharpArray(densities)};")
if __name__ == "__main__":
# makeFigure(CSV_VALUES)
testCaseRange(CSV_VALUES, -25, 100, 25)
testCaseRange(CSV_VALUES, 10, 45, 80)
print("DONE") | 0.589362 | 0.485844 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Courses',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('code', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Dept',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Profs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('webpage', models.URLField()),
('pic_url', models.URLField()),
('dept', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rate.Dept')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50)),
('actual_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('comment', models.CharField(max_length=1024)),
('isAnonymous', models.BooleanField(default=False)),
('difficulty', models.SmallIntegerField()),
('content_quality', models.SmallIntegerField()),
('grading', models.SmallIntegerField()),
('attendance', models.SmallIntegerField()),
('overall_rating', models.SmallIntegerField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rate.Courses')),
('prof', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rate.Profs')),
],
),
migrations.AddField(
model_name='courses',
name='dept',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rate.Dept'),
),
] | rate/migrations/0001_initial.py |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Courses',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('code', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Dept',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Profs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('webpage', models.URLField()),
('pic_url', models.URLField()),
('dept', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rate.Dept')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50)),
('actual_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('comment', models.CharField(max_length=1024)),
('isAnonymous', models.BooleanField(default=False)),
('difficulty', models.SmallIntegerField()),
('content_quality', models.SmallIntegerField()),
('grading', models.SmallIntegerField()),
('attendance', models.SmallIntegerField()),
('overall_rating', models.SmallIntegerField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rate.Courses')),
('prof', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rate.Profs')),
],
),
migrations.AddField(
model_name='courses',
name='dept',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rate.Dept'),
),
] | 0.579281 | 0.144511 |
import asyncio
import itertools
import threading
import time
import rclpy
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from std_srvs.srv import SetBool
import asyncx
import asyncx_ros2
_thread = asyncx.EventLoopThread()
SERVICE_NAME = "example_add"
class NodeBase(Node):
def __init__(self, node_name: str) -> None:
super().__init__(node_name)
self._name = node_name
self._thread = threading.Thread(target=self._spin)
self._executor = SingleThreadedExecutor()
def _spin(self) -> None:
rclpy.spin(self, executor=self._executor)
def start(self) -> None:
self._thread.start()
def stop(self) -> None:
print(f"Stopping node: {self._name}")
self._executor.shutdown(timeout_sec=0.1)
class Server(NodeBase):
def __init__(self) -> None:
super().__init__("server")
self._server = self.create_service(
SetBool,
SERVICE_NAME,
self._set_bool,
callback_group=ReentrantCallbackGroup(),
)
self._counter = itertools.count()
@asyncx_ros2.wrap_as_ros_coroutine(_thread.get_loop)
async def _set_bool(
self, request: SetBool.Request, response: SetBool.Response
) -> SetBool.Response:
stime = time.time()
val = next(self._counter)
self.get_logger().info(f"counter={val}, get request")
await asyncio.sleep(1.0)
elapsed = time.time() - stime
self.get_logger().info(f"counter={val}, return response (elapsed: {elapsed})")
return response
class Client(NodeBase):
def __init__(self) -> None:
super().__init__("client")
self._client = self.create_client(
SetBool,
SERVICE_NAME,
callback_group=ReentrantCallbackGroup(),
)
self._timer = self.create_timer(
0.5,
self.timer_callback,
callback_group=ReentrantCallbackGroup(),
)
self._counter = itertools.count()
def run(self) -> None:
executor = SingleThreadedExecutor()
rclpy.spin(self, executor=executor)
async def _get_request(self, val: int) -> SetBool.Request:
request = SetBool.Request()
request.data = val % 2 == 0
await asyncio.sleep(1.0)
return request
@asyncx_ros2.wrap_as_ros_coroutine(_thread.get_loop)
async def timer_callback(self) -> None:
stime = time.time()
val = next(self._counter)
self.get_logger().info(f"counter={val}, timer callback")
request = await self._get_request(val)
self.get_logger().info(f"counter={val}, send request")
await asyncx_ros2.ensure_aio_future(self._client.call_async(request))
elapsed = time.time() - stime
self.get_logger().info(f"counter={val}, completed (elapsed: {elapsed})")
def main() -> None:
rclpy.init()
with _thread:
print("Press enter to stop")
server = Server()
client = Client()
server.start()
client.start()
try:
input()
finally:
print("Terminating nodes")
client.stop()
server.stop()
server.destroy_node()
client.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main() | examples/ros2/service.py | import asyncio
import itertools
import threading
import time
import rclpy
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from std_srvs.srv import SetBool
import asyncx
import asyncx_ros2
_thread = asyncx.EventLoopThread()
SERVICE_NAME = "example_add"
class NodeBase(Node):
def __init__(self, node_name: str) -> None:
super().__init__(node_name)
self._name = node_name
self._thread = threading.Thread(target=self._spin)
self._executor = SingleThreadedExecutor()
def _spin(self) -> None:
rclpy.spin(self, executor=self._executor)
def start(self) -> None:
self._thread.start()
def stop(self) -> None:
print(f"Stopping node: {self._name}")
self._executor.shutdown(timeout_sec=0.1)
class Server(NodeBase):
def __init__(self) -> None:
super().__init__("server")
self._server = self.create_service(
SetBool,
SERVICE_NAME,
self._set_bool,
callback_group=ReentrantCallbackGroup(),
)
self._counter = itertools.count()
@asyncx_ros2.wrap_as_ros_coroutine(_thread.get_loop)
async def _set_bool(
self, request: SetBool.Request, response: SetBool.Response
) -> SetBool.Response:
stime = time.time()
val = next(self._counter)
self.get_logger().info(f"counter={val}, get request")
await asyncio.sleep(1.0)
elapsed = time.time() - stime
self.get_logger().info(f"counter={val}, return response (elapsed: {elapsed})")
return response
class Client(NodeBase):
def __init__(self) -> None:
super().__init__("client")
self._client = self.create_client(
SetBool,
SERVICE_NAME,
callback_group=ReentrantCallbackGroup(),
)
self._timer = self.create_timer(
0.5,
self.timer_callback,
callback_group=ReentrantCallbackGroup(),
)
self._counter = itertools.count()
def run(self) -> None:
executor = SingleThreadedExecutor()
rclpy.spin(self, executor=executor)
async def _get_request(self, val: int) -> SetBool.Request:
request = SetBool.Request()
request.data = val % 2 == 0
await asyncio.sleep(1.0)
return request
@asyncx_ros2.wrap_as_ros_coroutine(_thread.get_loop)
async def timer_callback(self) -> None:
stime = time.time()
val = next(self._counter)
self.get_logger().info(f"counter={val}, timer callback")
request = await self._get_request(val)
self.get_logger().info(f"counter={val}, send request")
await asyncx_ros2.ensure_aio_future(self._client.call_async(request))
elapsed = time.time() - stime
self.get_logger().info(f"counter={val}, completed (elapsed: {elapsed})")
def main() -> None:
rclpy.init()
with _thread:
print("Press enter to stop")
server = Server()
client = Client()
server.start()
client.start()
try:
input()
finally:
print("Terminating nodes")
client.stop()
server.stop()
server.destroy_node()
client.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main() | 0.390476 | 0.100879 |
import pytest
import time
import random
from endpoints.stores import ReadStoreError
from services import service_factories
from tools import tools
from endpoints.tests.emulators import (
DSMR_Emulator,
Fronius_Emulator
)
from services.strategies.basic import Log_Strategy
from services.events import Periodic_Event
from services.service_factories import Logger_Factory
from tools.test_tools.checks import (
df_matches_reader,
has_n_rows
)
from endpoints.tests.readers_fixtures import reader_fixture
logger_factory = service_factories.Logger_Factory()
config_store = tools.Config_Store(filename=tools.get_config_file())
@pytest.fixture(scope='function')
def dsmr_logger():
dsmr_reader_key = config_store.get('dsmr_logger', 'reader')
emulator = DSMR_Emulator(client_port=config_store.get(dsmr_reader_key, 'device'), n_steps=10, interval=.1)
emulator.start()
time.sleep(2)
logger = logger_factory.create_from_config(config_store, 'dsmr_logger')
yield logger
logger[0].commands[0].store.delete_files()
emulator.stop()
@pytest.fixture(scope='function')
def pv_logger():
logger = logger_factory.create_from_config(config_store, 'pv_logger')
yield logger
logger[0].commands[0].store.delete_files()
@pytest.fixture(scope='function')
def meteo_logger():
logger = logger_factory.create_from_config(config_store, 'meteo_logger')
yield logger
logger[0].commands[0].store.delete_files()
@pytest.fixture(scope='function')
def logger_fixture(store_fixture_factory, reader_fixture):
"""
mock_reader,
fronius_reader,
mock_fronius_reader,
ow_reader,
dsmr_reader_ten_messages,
# dsmr_reader_faulty_messages,
faulty_fronius_reader,
mock_timeout_fronius_reader,
# faulty_dsmr_reader,
faulty_ow_reader,
):
"""
class Logger_Fixture():
def __init__(self):
self.store_tag = None
self.reader_tag = None
self.logger_tag = None
self.agents = None
self.store = None
self.reader = None
self.period = None
self.repetitions = None
self.stores_to_delete_on_teardown = []
self.methods_to_execute_on_teardown = []
self.verifications = []
self.reader_register = [
'mock_reader',
'fronius_reader',
'mock_fronius_reader',
'ow_reader',
'dsmr_reader_ten_messages',
'mock_timeout_fronius_reader',
'dsmr_reader_faulty_messages',
'faulty_dsmr_reader',
'faulty_ow_reader',
'faulty_fronius_reader'
]
def set_logger_tag(self, logger_tag):
self.logger_tag = logger_tag
def set_store_tag(self, store_tag):
self.store_tag = store_tag
def set_reader_tag(self, reader_tag):
self.reader_tag = reader_tag
def set_period(self, period):
self.period = period
def set_repetitions(self, repetitions):
self.repetitions = repetitions
def get_adjusted_period(self):
return self.period * random.uniform(.8, 1.2)
def get_time_out(self):
return self.period * self.repetitions * random.uniform(.8, 1.2)
def create_logger(self):
if self.logger_tag is not None:
if self.logger_tag == 'pv_logger':
agents = logger_factory.create_from_config(config_store, 'pv_logger')
self.stores_to_delete_on_teardown.append(agents[0].commands[0].store)
if self.logger_tag == 'dsmr_logger':
agents = logger_factory.create_from_config(config_store, 'dsmr_logger')
self.stores_to_delete_on_teardown.append(agents[0].commands[0].store)
dsmr_reader_key = config_store.get('dsmr_logger', 'reader')
emulator = DSMR_Emulator(client_port=config_store.get(dsmr_reader_key, 'device'), n_steps=10, interval=.1)
emulator.start()
self.methods_to_execute_on_teardown.append(emulator.stop)
time.sleep(2)
if self.logger_tag == 'requestable_dsmr_logger':
agents = logger_factory.create_from_config(config_store, 'test_requestable_dsmr_logger')
self.stores_to_delete_on_teardown.append(agents[0].commands[0].store)
dsmr_reader_key = config_store.get('dsmr_logger', 'reader')
emulator = DSMR_Emulator(client_port=config_store.get(dsmr_reader_key, 'device'), n_steps=10, interval=.1)
emulator.start()
self.methods_to_execute_on_teardown.append(emulator.stop)
time.sleep(2)
self.set_period(agents[0].event.loop_period)
else:
if self.store_tag == 'mongo_store' or self.store_tag == 'CSV_store':
store = store_fixture_factory.create_from_tag(self.store_tag, randomize_filename=True, delete_on_teardown=True)
if self.reader_tag in self.reader_register:
reader = reader_fixture.create_reader(self.reader_tag)
store.set_index(reader.time_field)
log_event = Periodic_Event(loop_period=self.get_adjusted_period())
strategy = Log_Strategy()
agents = Logger_Factory().create(log_event, reader, store, strategy)
self.agents = agents
self.store = self.agents[0].commands[0].store
self.reader = self.agents[0].inputs[0].reader
return self.agents
def get_all_from_target_store(self):
return self.store.get_all()
def teardown(self):
for store in self.stores_to_delete_on_teardown:
store.delete_files()
for method in self.methods_to_execute_on_teardown:
method()
def df_is_not_none(self):
return self.get_all_from_target_store() is not None
def df_is_none(self):
try:
self.get_all_from_target_store()
return False
except ReadStoreError:
return True
def df_matches_reader(self):
return df_matches_reader(self.get_all_from_target_store(), self.reader)
def correct_nr_of_records_logged(self):
records_logged = self.agents[0].periodic_strategy.states['records_logged']
correct_nr_records = has_n_rows(self.get_all_from_target_store(), n=records_logged)
some_records_are_logged = records_logged > 0
return correct_nr_records and some_records_are_logged
def no_records_logged(self):
records_logged = self.agents[0].periodic_strategy.states['records_logged']
return records_logged == 0
def set_full_verification(self):
self.verifications.append(self.df_is_not_none)
self.verifications.append(self.df_matches_reader)
self.verifications.append(self.correct_nr_of_records_logged)
def set_faulty_verification(self):
self.verifications.append(self.df_is_none)
self.verifications.append(self.no_records_logged)
def verify(self):
if len(self.verifications) == 0:
return False
else:
verified = True
for verification in self.verifications:
if not verification():
verified = False
return verified
logger_fixture = Logger_Fixture()
yield logger_fixture
logger_fixture.teardown() | services/tests/logger_fixtures.py | import pytest
import time
import random
from endpoints.stores import ReadStoreError
from services import service_factories
from tools import tools
from endpoints.tests.emulators import (
DSMR_Emulator,
Fronius_Emulator
)
from services.strategies.basic import Log_Strategy
from services.events import Periodic_Event
from services.service_factories import Logger_Factory
from tools.test_tools.checks import (
df_matches_reader,
has_n_rows
)
from endpoints.tests.readers_fixtures import reader_fixture
logger_factory = service_factories.Logger_Factory()
config_store = tools.Config_Store(filename=tools.get_config_file())
@pytest.fixture(scope='function')
def dsmr_logger():
dsmr_reader_key = config_store.get('dsmr_logger', 'reader')
emulator = DSMR_Emulator(client_port=config_store.get(dsmr_reader_key, 'device'), n_steps=10, interval=.1)
emulator.start()
time.sleep(2)
logger = logger_factory.create_from_config(config_store, 'dsmr_logger')
yield logger
logger[0].commands[0].store.delete_files()
emulator.stop()
@pytest.fixture(scope='function')
def pv_logger():
logger = logger_factory.create_from_config(config_store, 'pv_logger')
yield logger
logger[0].commands[0].store.delete_files()
@pytest.fixture(scope='function')
def meteo_logger():
logger = logger_factory.create_from_config(config_store, 'meteo_logger')
yield logger
logger[0].commands[0].store.delete_files()
@pytest.fixture(scope='function')
def logger_fixture(store_fixture_factory, reader_fixture):
"""
mock_reader,
fronius_reader,
mock_fronius_reader,
ow_reader,
dsmr_reader_ten_messages,
# dsmr_reader_faulty_messages,
faulty_fronius_reader,
mock_timeout_fronius_reader,
# faulty_dsmr_reader,
faulty_ow_reader,
):
"""
class Logger_Fixture():
def __init__(self):
self.store_tag = None
self.reader_tag = None
self.logger_tag = None
self.agents = None
self.store = None
self.reader = None
self.period = None
self.repetitions = None
self.stores_to_delete_on_teardown = []
self.methods_to_execute_on_teardown = []
self.verifications = []
self.reader_register = [
'mock_reader',
'fronius_reader',
'mock_fronius_reader',
'ow_reader',
'dsmr_reader_ten_messages',
'mock_timeout_fronius_reader',
'dsmr_reader_faulty_messages',
'faulty_dsmr_reader',
'faulty_ow_reader',
'faulty_fronius_reader'
]
def set_logger_tag(self, logger_tag):
self.logger_tag = logger_tag
def set_store_tag(self, store_tag):
self.store_tag = store_tag
def set_reader_tag(self, reader_tag):
self.reader_tag = reader_tag
def set_period(self, period):
self.period = period
def set_repetitions(self, repetitions):
self.repetitions = repetitions
def get_adjusted_period(self):
return self.period * random.uniform(.8, 1.2)
def get_time_out(self):
return self.period * self.repetitions * random.uniform(.8, 1.2)
def create_logger(self):
if self.logger_tag is not None:
if self.logger_tag == 'pv_logger':
agents = logger_factory.create_from_config(config_store, 'pv_logger')
self.stores_to_delete_on_teardown.append(agents[0].commands[0].store)
if self.logger_tag == 'dsmr_logger':
agents = logger_factory.create_from_config(config_store, 'dsmr_logger')
self.stores_to_delete_on_teardown.append(agents[0].commands[0].store)
dsmr_reader_key = config_store.get('dsmr_logger', 'reader')
emulator = DSMR_Emulator(client_port=config_store.get(dsmr_reader_key, 'device'), n_steps=10, interval=.1)
emulator.start()
self.methods_to_execute_on_teardown.append(emulator.stop)
time.sleep(2)
if self.logger_tag == 'requestable_dsmr_logger':
agents = logger_factory.create_from_config(config_store, 'test_requestable_dsmr_logger')
self.stores_to_delete_on_teardown.append(agents[0].commands[0].store)
dsmr_reader_key = config_store.get('dsmr_logger', 'reader')
emulator = DSMR_Emulator(client_port=config_store.get(dsmr_reader_key, 'device'), n_steps=10, interval=.1)
emulator.start()
self.methods_to_execute_on_teardown.append(emulator.stop)
time.sleep(2)
self.set_period(agents[0].event.loop_period)
else:
if self.store_tag == 'mongo_store' or self.store_tag == 'CSV_store':
store = store_fixture_factory.create_from_tag(self.store_tag, randomize_filename=True, delete_on_teardown=True)
if self.reader_tag in self.reader_register:
reader = reader_fixture.create_reader(self.reader_tag)
store.set_index(reader.time_field)
log_event = Periodic_Event(loop_period=self.get_adjusted_period())
strategy = Log_Strategy()
agents = Logger_Factory().create(log_event, reader, store, strategy)
self.agents = agents
self.store = self.agents[0].commands[0].store
self.reader = self.agents[0].inputs[0].reader
return self.agents
def get_all_from_target_store(self):
return self.store.get_all()
def teardown(self):
for store in self.stores_to_delete_on_teardown:
store.delete_files()
for method in self.methods_to_execute_on_teardown:
method()
def df_is_not_none(self):
return self.get_all_from_target_store() is not None
def df_is_none(self):
try:
self.get_all_from_target_store()
return False
except ReadStoreError:
return True
def df_matches_reader(self):
return df_matches_reader(self.get_all_from_target_store(), self.reader)
def correct_nr_of_records_logged(self):
records_logged = self.agents[0].periodic_strategy.states['records_logged']
correct_nr_records = has_n_rows(self.get_all_from_target_store(), n=records_logged)
some_records_are_logged = records_logged > 0
return correct_nr_records and some_records_are_logged
def no_records_logged(self):
records_logged = self.agents[0].periodic_strategy.states['records_logged']
return records_logged == 0
def set_full_verification(self):
self.verifications.append(self.df_is_not_none)
self.verifications.append(self.df_matches_reader)
self.verifications.append(self.correct_nr_of_records_logged)
def set_faulty_verification(self):
self.verifications.append(self.df_is_none)
self.verifications.append(self.no_records_logged)
def verify(self):
if len(self.verifications) == 0:
return False
else:
verified = True
for verification in self.verifications:
if not verification():
verified = False
return verified
logger_fixture = Logger_Fixture()
yield logger_fixture
logger_fixture.teardown() | 0.242385 | 0.123524 |
import tempfile
import subprocess
class Visualizer(object):
"""
An abstract class to visualize Armor containers using graphviz.
"""
def __init__(self, digraph):
self._digraph = digraph
def _write_dot_nodes(self, key_mapper_inverse):
raise NotImplementedError()
def _write_dot_edges(self, key_mapper_inverse):
raise NotImplementedError()
def write_dot(self, dot, key_mapper_inverse=None):
if key_mapper_inverse is None:
key_mapper_inverse = chr
dot.write("digraph _ {")
dot.write("\n// nodes\n")
self._write_dot_nodes(dot, key_mapper_inverse)
dot.write("\n// edges\n")
self._write_dot_edges(dot, key_mapper_inverse)
dot.write("}\n")
def quicklook(self, key_mapper_inverse=None):
dot = tempfile.NamedTemporaryFile(suffix=".dot")
self.write_dot(dot, key_mapper_inverse)
dot.flush()
png = tempfile.NamedTemporaryFile(suffix=".png")
subprocess.Popen(["dot", "-Tpng", dot.name, "-o", png.name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).wait()
subprocess.Popen(["qlmanage", "-p", png.name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).wait()
class TrieVisualizerBase(Visualizer):
def __init__(self, *args):
super(TrieVisualizerBase, self).__init__(*args)
self._marks = []
def _write_dot_shape_and_color(self, dot, vertex):
shape = "circle"
if vertex.fields["value"]:
shape = "doublecircle"
color = "black"
for mark, color_ in self._marks:
if mark == vertex.name:
color = color_
dot.write(" node [shape = %s, color = %s];\n" % (shape, color))
def mark(self, pointer, color="red"):
self._marks.append((pointer, color))
return self
class TrieVisualizer(TrieVisualizerBase):
"""
A visualizer for Armor trie containers.
"""
def _write_dot_nodes(self, dot, key_mapper_inverse):
for vertex, _ in self._digraph:
self._write_dot_shape_and_color(dot, vertex)
dot.write(" \"%s\" [label = \"\"];\n" % vertex.name)
def _write_dot_edges(self, dot, key_mapper_inverse):
for vertex, edges in self._digraph:
for edge in edges:
dot.write(" \"%s\" -> \"%s\" [label = \"%s\" ]\n" %
(vertex.name,
edge.to.name,
key_mapper_inverse(edge.fields["value"])))
class TSTVisualizer(TrieVisualizerBase):
"""
A visualizer for Armor ternary search tree containers.
"""
def _write_dot_nodes(self, dot, key_mapper_inverse):
for vertex, _ in self._digraph:
self._write_dot_shape_and_color(dot, vertex)
dot.write(" \"%s\" [label = \"%s\"];\n" %
(vertex.name,
key_mapper_inverse(ord(vertex.fields["c"][1]))))
def _write_dot_edges(self, dot, _):
for vertex, edges in self._digraph:
for edge in edges:
edge_value = ("l", "m", "r")[int(edge.fields["value"])]
dot.write(" \"%s\" -> \"%s\" [label = \"%s\" ]\n" %
(vertex.name, edge.to.name, edge_value)) | debug/python/rmr/visualizers.py | import tempfile
import subprocess
class Visualizer(object):
"""
An abstract class to visualize Armor containers using graphviz.
"""
def __init__(self, digraph):
self._digraph = digraph
def _write_dot_nodes(self, key_mapper_inverse):
raise NotImplementedError()
def _write_dot_edges(self, key_mapper_inverse):
raise NotImplementedError()
def write_dot(self, dot, key_mapper_inverse=None):
if key_mapper_inverse is None:
key_mapper_inverse = chr
dot.write("digraph _ {")
dot.write("\n// nodes\n")
self._write_dot_nodes(dot, key_mapper_inverse)
dot.write("\n// edges\n")
self._write_dot_edges(dot, key_mapper_inverse)
dot.write("}\n")
def quicklook(self, key_mapper_inverse=None):
dot = tempfile.NamedTemporaryFile(suffix=".dot")
self.write_dot(dot, key_mapper_inverse)
dot.flush()
png = tempfile.NamedTemporaryFile(suffix=".png")
subprocess.Popen(["dot", "-Tpng", dot.name, "-o", png.name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).wait()
subprocess.Popen(["qlmanage", "-p", png.name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).wait()
class TrieVisualizerBase(Visualizer):
def __init__(self, *args):
super(TrieVisualizerBase, self).__init__(*args)
self._marks = []
def _write_dot_shape_and_color(self, dot, vertex):
shape = "circle"
if vertex.fields["value"]:
shape = "doublecircle"
color = "black"
for mark, color_ in self._marks:
if mark == vertex.name:
color = color_
dot.write(" node [shape = %s, color = %s];\n" % (shape, color))
def mark(self, pointer, color="red"):
self._marks.append((pointer, color))
return self
class TrieVisualizer(TrieVisualizerBase):
"""
A visualizer for Armor trie containers.
"""
def _write_dot_nodes(self, dot, key_mapper_inverse):
for vertex, _ in self._digraph:
self._write_dot_shape_and_color(dot, vertex)
dot.write(" \"%s\" [label = \"\"];\n" % vertex.name)
def _write_dot_edges(self, dot, key_mapper_inverse):
for vertex, edges in self._digraph:
for edge in edges:
dot.write(" \"%s\" -> \"%s\" [label = \"%s\" ]\n" %
(vertex.name,
edge.to.name,
key_mapper_inverse(edge.fields["value"])))
class TSTVisualizer(TrieVisualizerBase):
"""
A visualizer for Armor ternary search tree containers.
"""
def _write_dot_nodes(self, dot, key_mapper_inverse):
for vertex, _ in self._digraph:
self._write_dot_shape_and_color(dot, vertex)
dot.write(" \"%s\" [label = \"%s\"];\n" %
(vertex.name,
key_mapper_inverse(ord(vertex.fields["c"][1]))))
def _write_dot_edges(self, dot, _):
for vertex, edges in self._digraph:
for edge in edges:
edge_value = ("l", "m", "r")[int(edge.fields["value"])]
dot.write(" \"%s\" -> \"%s\" [label = \"%s\" ]\n" %
(vertex.name, edge.to.name, edge_value)) | 0.606964 | 0.324369 |
import os
import sys
from subprocess import Popen, PIPE
import paramiko
import rospy
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
class IwConfigParser():
def __init__(self, interface):
self.interface = interface
self.norm = ""
self.essid = ""
self.mode = ""
self.frequency = 0.0
self.access_point = ""
self.bit_rate = 0.0
self.tx_power = 0.0
self.retry_short_limit = 0
self.rts_thr = ""
self.fragment_thr = ""
self.power_management = ""
self.link_quality = ""
self.link_quality_percent = 0
self.signal_level = 0.0
self.rx_invalid_nwid = 0
self.rx_invalid_crypt = 0
self.rx_invalid_frag = 0
self.tx_excessive_retries = 0
self.invalic_misc = 0
self.missed_beacon = 0
self.stat = DiagnosticStatus()
def _parse_info(self, info):
try:
split = info.split('IEEE ',1)
split = split[1].split('ESSID:',1)
self.norm = split[0].encode('utf8').strip()
split = split[1].split('\n',1)
self.essid = split[0].encode('utf8').strip()
split = split[1].split('Mode:',1)
split = split[1].split('Frequency:',1)
self.mode = split[0].encode('utf8').strip()
split = split[1].split(' GHz',1)
self.frequency = float(split[0].strip())
split = split[1].split('Access Point: ',1)
split = split[1].split('\n',1)
self.access_point = split[0].encode('utf8').strip()
split = split[1].split('Bit Rate=',1)
split = split[1].split(' Mb/s',1)
self.bit_rate = float(split[0].strip())
if split[1].find('Tx-Power') != -1:
split = split[1].split('Tx-Power=',1)
split = split[1].split(' dBm',1)
self.tx_power = float(split[0].strip())
if split[1].find('Retry short limit:') != -1:
split = split[1].split('Retry short limit:',1)
if split[1].find('Retry short limit=') != -1:
split = split[1].split('Retry short limit=',1)
if split[1].find('RTS thr:') != -1:
split = split[1].split('RTS thr:',1)
if split[1].find('RTS thr=') != -1:
split = split[1].split('RTS thr=',1)
self.retry_short_limit = split[0].encode('utf8').strip()
if split[1].find('Fragment thr:') != -1:
split = split[1].split('Fragment thr:',1)
if split[1].find('Fragment thr=') != -1:
split = split[1].split('Fragment thr=',1)
self.rts_thr = split[0].encode('utf8').strip()
split = split[1].split('\n',1)
self.fragment_thr = split[0].encode('utf8').strip()
split = split[1].split('Power Management:',1)
split = split[1].split('\n',1)
self.power_managment = split[0].encode('utf8').strip()
split = split[1].split('Link Quality=',1)
split = split[1].split('Signal level=',1)
self.link_quality = split[0].encode('utf8').strip()
self.link_quality_percent = split[0].split('/')
self.link_quality_percent = int(float(self.link_quality_percent[0].strip()) / float(self.link_quality_percent[1].strip())*100.0)
split = split[1].split(' dBm',1)
self.signal_level = float(split[0].strip())
split = split[1].split('Rx invalid nwid:',1)
split = split[1].split('Rx invalid crypt:',1)
self.rx_invalid_nwid = int(split[0].strip())
split = split[1].split('Rx invalid frag:',1)
self.rx_invalid_crypt = int(split[0].strip())
split = split[1].split('\n',1)
self.rx_invalid_frag = int(split[0].strip())
split = split[1].split('Tx excessive retries:',1)
split = split[1].split('Invalid misc:',1)
self.tx_excessive_retries = int(split[0].strip())
split = split[1].split('Missed beacon:',1)
self.invalid_misc = int(split[0].strip())
split = split[1].split('\n',1)
self.missed_beacon = int(split[0].strip())
#ToDo: set diagnostic warning/error level accordingly
self.stat.level = DiagnosticStatus.OK
self.stat.message = "OK"
self.stat.values = [ KeyValue("Interface" , self.interface),
KeyValue("IEEE Norm", self.norm),
KeyValue("ESSID", self.essid),
KeyValue("Mode", self.mode),
KeyValue("Frequency", str(self.frequency)),
KeyValue("Access Point", self.access_point),
KeyValue("Bit Rate [Mb/s]", str(self.bit_rate)),
KeyValue("Tx-Power [dBm]", str(self.tx_power)),
KeyValue("Retry short limit", str(self.retry_short_limit)),
KeyValue("RTS thr", self.rts_thr),
KeyValue("Fragment thr", self.fragment_thr),
KeyValue("Power Managment", self.power_management),
KeyValue("Link Quality", self.link_quality),
KeyValue("Link Quality %", str(self.link_quality_percent)),
KeyValue("Signal level [dBm]", str(self.signal_level)),
KeyValue("Rx invalid nwid", str(self.rx_invalid_nwid)),
KeyValue("Rx invalid crypt", str(self.rx_invalid_crypt)),
KeyValue("Rx invalid frag", str(self.rx_invalid_frag)),
KeyValue("Tx excessive retries", str(self.tx_excessive_retries)),
KeyValue("Invalid misc", str(self.invalic_misc)),
KeyValue("Missed beacon", str(self.missed_beacon)) ]
except Exception, e:
rospy.logerr("Parsing Error: %s" %e)
self.stat.level = DiagnosticStatus.ERROR
self.stat.message = 'iwconfig Exception'
self.stat.values = [ KeyValue(key = 'Exception', value = str(e)) ]
class IwConfigLocal(IwConfigParser):
def __init__(self, interface):
IwConfigParser.__init__(self, interface)
def update(self):
try:
p = Popen(["iwconfig", self.interface], stdout=PIPE, stdin=PIPE, stderr=PIPE)
res = p.wait()
(stdout,stderr) = p.communicate()
if res != 0:
self.stat.level = DiagnosticStatus.ERROR
self.stat.message = 'iwconfig Error'
self.stat.values = [ KeyValue(key = 'iwconfig Error', value = stderr),
KeyValue(key = 'Output', value = stdout) ]
else:
self._parse_info(stdout)
except Exception, e:
self.stat.level = DiagnosticStatus.ERROR
self.stat.message = 'iwconfig Exception'
self.stat.values = [ KeyValue(key = 'Exception', value = str(e)) ]
class IwConfigSSH(IwConfigParser):
def __init__(self, interface, hostname, user, password):
IwConfigParser.__init__(self, interface)
self.ssh = paramiko.SSHClient()
self.ssh.load_system_host_keys()
ssh_key_file = os.getenv("HOME")+'/.ssh/id_rsa.pub'
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())# no known_hosts error
self.ssh.connect(str(hostname), username=user, key_filename=ssh_key_file) # no passwd needed
#self.ssh.connect(str(hostname), username=user, password=password)
def update(self):
try:
(stdin, stdout, stderr) = self.ssh.exec_command("iwconfig %s"%self.interface)
output = ''.join(stdout.readlines())
self._parse_info(output)
except Exception, e:
self.stat.level = DiagnosticStatus.ERROR
self.stat.message = 'iwconfig Exception'
self.stat.values = [ KeyValue(key = 'Exception', value = str(e)) ]
class WlanMonitor():
def __init__(self):
rospy.init_node("wlan_monitor")
self.get_params()
self._wlan_stat = DiagnosticStatus()
self._wlan_stat.name = '%s WLAN Info' % self.diag_hostname
self._wlan_stat.hardware_id = self.diag_hostname
self._wlan_stat.level = DiagnosticStatus.OK
self._wlan_stat.message = 'No Data'
self._wlan_stat.values = []
self.msg = DiagnosticArray()
self.msg.header.stamp = rospy.get_rostime()
self.msg.status = [self._wlan_stat]
self.diag_pub = rospy.Publisher("/diagnostics", DiagnosticArray, queue_size=1)
self.diag_timer = rospy.Timer(rospy.Duration(1.0), self.publish_diagnostics)
if self.monitor_local:
self.iwconfig = IwConfigLocal(self.interface)
else:
try:
self.iwconfig = IwConfigSSH(self.interface, self.diag_hostname, self.user, self.password)
except Exception, e:
rospy.logerr("Error connecting ssh to host: %s",e.message)
self._wlan_stat.level = DiagnosticStatus.ERROR
self._wlan_stat.message = 'iwconfig Exception'
self._wlan_stat.values = [ KeyValue(key = 'Exception', value = str(e)) ]
self.msg.status = [self._wlan_stat]
return
self.monitor_timer = rospy.Timer(rospy.Duration(1.0), self.update_diagnostics)
def update_diagnostics(self, event):
self.iwconfig.update()
self.msg = DiagnosticArray()
self.msg.header.stamp = rospy.get_rostime()
self._wlan_stat.level = self.iwconfig.stat.level
self._wlan_stat.message = self.iwconfig.stat.message
self._wlan_stat.values = self.iwconfig.stat.values
self.msg.status = [self._wlan_stat]
def publish_diagnostics(self, event):
self.diag_pub.publish(self.msg)
def get_params(self):
self.diag_hostname = rospy.get_param('~diag_hostname', "localhost")
self.interface = rospy.get_param('~interface', "wlan0")
self.monitor_local = rospy.get_param("~monitor_local", True)
self.user = rospy.get_param('~user', "")
self.password = rospy.get_param('~password', "")
if __name__ == "__main__":
monitor = WlanMonitor()
rospy.spin() | python-checker/Examples/FULL/cob_monitoring/src/wlan_monitor.py |
import os
import sys
from subprocess import Popen, PIPE
import paramiko
import rospy
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
class IwConfigParser():
def __init__(self, interface):
self.interface = interface
self.norm = ""
self.essid = ""
self.mode = ""
self.frequency = 0.0
self.access_point = ""
self.bit_rate = 0.0
self.tx_power = 0.0
self.retry_short_limit = 0
self.rts_thr = ""
self.fragment_thr = ""
self.power_management = ""
self.link_quality = ""
self.link_quality_percent = 0
self.signal_level = 0.0
self.rx_invalid_nwid = 0
self.rx_invalid_crypt = 0
self.rx_invalid_frag = 0
self.tx_excessive_retries = 0
self.invalic_misc = 0
self.missed_beacon = 0
self.stat = DiagnosticStatus()
def _parse_info(self, info):
try:
split = info.split('IEEE ',1)
split = split[1].split('ESSID:',1)
self.norm = split[0].encode('utf8').strip()
split = split[1].split('\n',1)
self.essid = split[0].encode('utf8').strip()
split = split[1].split('Mode:',1)
split = split[1].split('Frequency:',1)
self.mode = split[0].encode('utf8').strip()
split = split[1].split(' GHz',1)
self.frequency = float(split[0].strip())
split = split[1].split('Access Point: ',1)
split = split[1].split('\n',1)
self.access_point = split[0].encode('utf8').strip()
split = split[1].split('Bit Rate=',1)
split = split[1].split(' Mb/s',1)
self.bit_rate = float(split[0].strip())
if split[1].find('Tx-Power') != -1:
split = split[1].split('Tx-Power=',1)
split = split[1].split(' dBm',1)
self.tx_power = float(split[0].strip())
if split[1].find('Retry short limit:') != -1:
split = split[1].split('Retry short limit:',1)
if split[1].find('Retry short limit=') != -1:
split = split[1].split('Retry short limit=',1)
if split[1].find('RTS thr:') != -1:
split = split[1].split('RTS thr:',1)
if split[1].find('RTS thr=') != -1:
split = split[1].split('RTS thr=',1)
self.retry_short_limit = split[0].encode('utf8').strip()
if split[1].find('Fragment thr:') != -1:
split = split[1].split('Fragment thr:',1)
if split[1].find('Fragment thr=') != -1:
split = split[1].split('Fragment thr=',1)
self.rts_thr = split[0].encode('utf8').strip()
split = split[1].split('\n',1)
self.fragment_thr = split[0].encode('utf8').strip()
split = split[1].split('Power Management:',1)
split = split[1].split('\n',1)
self.power_managment = split[0].encode('utf8').strip()
split = split[1].split('Link Quality=',1)
split = split[1].split('Signal level=',1)
self.link_quality = split[0].encode('utf8').strip()
self.link_quality_percent = split[0].split('/')
self.link_quality_percent = int(float(self.link_quality_percent[0].strip()) / float(self.link_quality_percent[1].strip())*100.0)
split = split[1].split(' dBm',1)
self.signal_level = float(split[0].strip())
split = split[1].split('Rx invalid nwid:',1)
split = split[1].split('Rx invalid crypt:',1)
self.rx_invalid_nwid = int(split[0].strip())
split = split[1].split('Rx invalid frag:',1)
self.rx_invalid_crypt = int(split[0].strip())
split = split[1].split('\n',1)
self.rx_invalid_frag = int(split[0].strip())
split = split[1].split('Tx excessive retries:',1)
split = split[1].split('Invalid misc:',1)
self.tx_excessive_retries = int(split[0].strip())
split = split[1].split('Missed beacon:',1)
self.invalid_misc = int(split[0].strip())
split = split[1].split('\n',1)
self.missed_beacon = int(split[0].strip())
#ToDo: set diagnostic warning/error level accordingly
self.stat.level = DiagnosticStatus.OK
self.stat.message = "OK"
self.stat.values = [ KeyValue("Interface" , self.interface),
KeyValue("IEEE Norm", self.norm),
KeyValue("ESSID", self.essid),
KeyValue("Mode", self.mode),
KeyValue("Frequency", str(self.frequency)),
KeyValue("Access Point", self.access_point),
KeyValue("Bit Rate [Mb/s]", str(self.bit_rate)),
KeyValue("Tx-Power [dBm]", str(self.tx_power)),
KeyValue("Retry short limit", str(self.retry_short_limit)),
KeyValue("RTS thr", self.rts_thr),
KeyValue("Fragment thr", self.fragment_thr),
KeyValue("Power Managment", self.power_management),
KeyValue("Link Quality", self.link_quality),
KeyValue("Link Quality %", str(self.link_quality_percent)),
KeyValue("Signal level [dBm]", str(self.signal_level)),
KeyValue("Rx invalid nwid", str(self.rx_invalid_nwid)),
KeyValue("Rx invalid crypt", str(self.rx_invalid_crypt)),
KeyValue("Rx invalid frag", str(self.rx_invalid_frag)),
KeyValue("Tx excessive retries", str(self.tx_excessive_retries)),
KeyValue("Invalid misc", str(self.invalic_misc)),
KeyValue("Missed beacon", str(self.missed_beacon)) ]
except Exception, e:
rospy.logerr("Parsing Error: %s" %e)
self.stat.level = DiagnosticStatus.ERROR
self.stat.message = 'iwconfig Exception'
self.stat.values = [ KeyValue(key = 'Exception', value = str(e)) ]
class IwConfigLocal(IwConfigParser):
def __init__(self, interface):
IwConfigParser.__init__(self, interface)
def update(self):
try:
p = Popen(["iwconfig", self.interface], stdout=PIPE, stdin=PIPE, stderr=PIPE)
res = p.wait()
(stdout,stderr) = p.communicate()
if res != 0:
self.stat.level = DiagnosticStatus.ERROR
self.stat.message = 'iwconfig Error'
self.stat.values = [ KeyValue(key = 'iwconfig Error', value = stderr),
KeyValue(key = 'Output', value = stdout) ]
else:
self._parse_info(stdout)
except Exception, e:
self.stat.level = DiagnosticStatus.ERROR
self.stat.message = 'iwconfig Exception'
self.stat.values = [ KeyValue(key = 'Exception', value = str(e)) ]
class IwConfigSSH(IwConfigParser):
def __init__(self, interface, hostname, user, password):
IwConfigParser.__init__(self, interface)
self.ssh = paramiko.SSHClient()
self.ssh.load_system_host_keys()
ssh_key_file = os.getenv("HOME")+'/.ssh/id_rsa.pub'
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())# no known_hosts error
self.ssh.connect(str(hostname), username=user, key_filename=ssh_key_file) # no passwd needed
#self.ssh.connect(str(hostname), username=user, password=password)
def update(self):
try:
(stdin, stdout, stderr) = self.ssh.exec_command("iwconfig %s"%self.interface)
output = ''.join(stdout.readlines())
self._parse_info(output)
except Exception, e:
self.stat.level = DiagnosticStatus.ERROR
self.stat.message = 'iwconfig Exception'
self.stat.values = [ KeyValue(key = 'Exception', value = str(e)) ]
class WlanMonitor():
def __init__(self):
rospy.init_node("wlan_monitor")
self.get_params()
self._wlan_stat = DiagnosticStatus()
self._wlan_stat.name = '%s WLAN Info' % self.diag_hostname
self._wlan_stat.hardware_id = self.diag_hostname
self._wlan_stat.level = DiagnosticStatus.OK
self._wlan_stat.message = 'No Data'
self._wlan_stat.values = []
self.msg = DiagnosticArray()
self.msg.header.stamp = rospy.get_rostime()
self.msg.status = [self._wlan_stat]
self.diag_pub = rospy.Publisher("/diagnostics", DiagnosticArray, queue_size=1)
self.diag_timer = rospy.Timer(rospy.Duration(1.0), self.publish_diagnostics)
if self.monitor_local:
self.iwconfig = IwConfigLocal(self.interface)
else:
try:
self.iwconfig = IwConfigSSH(self.interface, self.diag_hostname, self.user, self.password)
except Exception, e:
rospy.logerr("Error connecting ssh to host: %s",e.message)
self._wlan_stat.level = DiagnosticStatus.ERROR
self._wlan_stat.message = 'iwconfig Exception'
self._wlan_stat.values = [ KeyValue(key = 'Exception', value = str(e)) ]
self.msg.status = [self._wlan_stat]
return
self.monitor_timer = rospy.Timer(rospy.Duration(1.0), self.update_diagnostics)
def update_diagnostics(self, event):
self.iwconfig.update()
self.msg = DiagnosticArray()
self.msg.header.stamp = rospy.get_rostime()
self._wlan_stat.level = self.iwconfig.stat.level
self._wlan_stat.message = self.iwconfig.stat.message
self._wlan_stat.values = self.iwconfig.stat.values
self.msg.status = [self._wlan_stat]
def publish_diagnostics(self, event):
self.diag_pub.publish(self.msg)
def get_params(self):
self.diag_hostname = rospy.get_param('~diag_hostname', "localhost")
self.interface = rospy.get_param('~interface', "wlan0")
self.monitor_local = rospy.get_param("~monitor_local", True)
self.user = rospy.get_param('~user', "")
self.password = rospy.get_param('~password', "")
if __name__ == "__main__":
monitor = WlanMonitor()
rospy.spin() | 0.291787 | 0.125627 |
import unittest
from amonone.web.template import *
from nose.tools import eq_
class TestTemplateFilters(unittest.TestCase):
def test_dateformat(self):
date = dateformat(1319737106)
eq_('27-10-2011-17:38', date)
def test_timeformat(self):
time = timeformat(1319737106)
eq_('17:38', time)
def test_date_to_js(self):
date = date_to_js(1319737106)
eq_('2011,9, 27, 17, 38', date)
def test_to_int(self):
_int = to_int('testme2')
eq_(_int, 2)
def test_clean_string(self):
string = clean_string('24.5MB')
eq_(string, 24.5)
def test_progress_width_percent(self):
full_container = progress_width_percent(100, container_type='full' )
eq_(full_container, '305px')
full_container = progress_width_percent(50, container_type='full' )
eq_(full_container, '152px')
full_container = progress_width_percent(0, container_type='full' )
eq_(full_container, '0px; border:3px solid transparent; background: none;')
container = progress_width_percent(100, container_type='medium' )
eq_(container, '158px')
container = progress_width_percent(50, container_type='medium')
eq_(container, '79px')
container = progress_width_percent(0, container_type='medium' )
eq_(container, '0px; border:3px solid transparent; background: none;')
container = progress_width_percent(100, container_type='small' )
eq_(container, '100px')
container = progress_width_percent(50, container_type='small' )
eq_(container, '50px')
def test_progress_width(self):
full_container = progress_width(300, 300, container_type='full' )
eq_(full_container, '305px')
full_container_50 = progress_width(150, 300, container_type='full')
eq_(full_container_50, '152px')
full_container_0 = progress_width(0, 300, container_type='full' )
eq_(full_container_0, '0px; border:3px solid transparent; background: none;')
medium_container = progress_width(300, 300, container_type='medium' )
eq_(medium_container, '158px')
medium_container_50 = progress_width(150, 300, container_type='medium' )
eq_(medium_container_50, '79px')
medium_container_0 = progress_width(0, 300, container_type='medium' )
eq_(medium_container_0, '0px; border:3px solid transparent; background: none;')
small_container = progress_width(300, 300, container_type='small' )
eq_(small_container, '100px')
small_container_50 = progress_width(150, 300, container_type='small' )
eq_(small_container_50, '50px')
small_container_0 = progress_width(0, 300, container_type='small' )
eq_(small_container_0, '0px; border:3px solid transparent; background: none;')
def test_progress_width_with_zeroes(self):
empty_container_full = progress_width(0,0, container_type='full' )
eq_(empty_container_full, '0px; border:3px solid transparent; background: none;')
empty_container_medium = progress_width(0,0, container_type='medium' )
eq_(empty_container_medium, '0px; border:3px solid transparent; background: none;')
empty_container_small = progress_width(0,0, container_type='small' )
eq_(empty_container_small, '0px; border:3px solid transparent; background: none;')
def test_value_bigger_than_total(self):
container_full = progress_width(600,0, container_type='full' )
eq_(container_full, '305px')
def test_with_big_numbers(self):
container_full = progress_width(12332323600,3434344, container_type='full')
eq_(container_full, '305px') # Value bigger than total - container is 100%
container = progress_width(9,12233332, container_type='full')
eq_(container,'0px; border:3px solid transparent; background: none;')
container_full = progress_width(1232,34343, container_type='full')
eq_(container_full, '9px')
def test_url(self):
_url = url('more', 'and', 'even', 'more')
eq_(_url, 'more/and/even/more')
def test_base_url(self):
_base_url = base_url()
assert isinstance(_base_url, str)
def test_check_additional_data(self):
ignored_dicts = [{'occurrence': 12223323}, {'occurrence': 1212121221}]
check_ignored_dicts = check_additional_data(ignored_dicts)
eq_(check_ignored_dicts, None)
true_dicts = [{'occurrence': 12223323, 'test': 'me'}, {'occurrence': 1212121221}]
check_true_dicts = check_additional_data(true_dicts)
eq_(check_true_dicts, True)
def test_cleanup_string(self):
string = '//test---/'
clean = clean_slashes(string)
eq_(clean, 'test') | amonone/web/apps/core/tests/template_filters_test.py | import unittest
from amonone.web.template import *
from nose.tools import eq_
class TestTemplateFilters(unittest.TestCase):
def test_dateformat(self):
date = dateformat(1319737106)
eq_('27-10-2011-17:38', date)
def test_timeformat(self):
time = timeformat(1319737106)
eq_('17:38', time)
def test_date_to_js(self):
date = date_to_js(1319737106)
eq_('2011,9, 27, 17, 38', date)
def test_to_int(self):
_int = to_int('testme2')
eq_(_int, 2)
def test_clean_string(self):
string = clean_string('24.5MB')
eq_(string, 24.5)
def test_progress_width_percent(self):
full_container = progress_width_percent(100, container_type='full' )
eq_(full_container, '305px')
full_container = progress_width_percent(50, container_type='full' )
eq_(full_container, '152px')
full_container = progress_width_percent(0, container_type='full' )
eq_(full_container, '0px; border:3px solid transparent; background: none;')
container = progress_width_percent(100, container_type='medium' )
eq_(container, '158px')
container = progress_width_percent(50, container_type='medium')
eq_(container, '79px')
container = progress_width_percent(0, container_type='medium' )
eq_(container, '0px; border:3px solid transparent; background: none;')
container = progress_width_percent(100, container_type='small' )
eq_(container, '100px')
container = progress_width_percent(50, container_type='small' )
eq_(container, '50px')
def test_progress_width(self):
full_container = progress_width(300, 300, container_type='full' )
eq_(full_container, '305px')
full_container_50 = progress_width(150, 300, container_type='full')
eq_(full_container_50, '152px')
full_container_0 = progress_width(0, 300, container_type='full' )
eq_(full_container_0, '0px; border:3px solid transparent; background: none;')
medium_container = progress_width(300, 300, container_type='medium' )
eq_(medium_container, '158px')
medium_container_50 = progress_width(150, 300, container_type='medium' )
eq_(medium_container_50, '79px')
medium_container_0 = progress_width(0, 300, container_type='medium' )
eq_(medium_container_0, '0px; border:3px solid transparent; background: none;')
small_container = progress_width(300, 300, container_type='small' )
eq_(small_container, '100px')
small_container_50 = progress_width(150, 300, container_type='small' )
eq_(small_container_50, '50px')
small_container_0 = progress_width(0, 300, container_type='small' )
eq_(small_container_0, '0px; border:3px solid transparent; background: none;')
def test_progress_width_with_zeroes(self):
empty_container_full = progress_width(0,0, container_type='full' )
eq_(empty_container_full, '0px; border:3px solid transparent; background: none;')
empty_container_medium = progress_width(0,0, container_type='medium' )
eq_(empty_container_medium, '0px; border:3px solid transparent; background: none;')
empty_container_small = progress_width(0,0, container_type='small' )
eq_(empty_container_small, '0px; border:3px solid transparent; background: none;')
def test_value_bigger_than_total(self):
container_full = progress_width(600,0, container_type='full' )
eq_(container_full, '305px')
def test_with_big_numbers(self):
container_full = progress_width(12332323600,3434344, container_type='full')
eq_(container_full, '305px') # Value bigger than total - container is 100%
container = progress_width(9,12233332, container_type='full')
eq_(container,'0px; border:3px solid transparent; background: none;')
container_full = progress_width(1232,34343, container_type='full')
eq_(container_full, '9px')
def test_url(self):
_url = url('more', 'and', 'even', 'more')
eq_(_url, 'more/and/even/more')
def test_base_url(self):
_base_url = base_url()
assert isinstance(_base_url, str)
def test_check_additional_data(self):
ignored_dicts = [{'occurrence': 12223323}, {'occurrence': 1212121221}]
check_ignored_dicts = check_additional_data(ignored_dicts)
eq_(check_ignored_dicts, None)
true_dicts = [{'occurrence': 12223323, 'test': 'me'}, {'occurrence': 1212121221}]
check_true_dicts = check_additional_data(true_dicts)
eq_(check_true_dicts, True)
def test_cleanup_string(self):
string = '//test---/'
clean = clean_slashes(string)
eq_(clean, 'test') | 0.239883 | 0.153296 |
from django.db import connection
from core.libs.cache import setCacheData
from core.libs.sqlcustom import fix_lob
from core.schedresource.utils import get_object_stores
import core.constants as const
def objectstore_summary_data(hours):
sqlRequest = """
SELECT JOBSTATUS, COUNT(JOBSTATUS) as COUNTJOBSINSTATE, COMPUTINGSITE, OBJSE, RTRIM(XMLAGG(XMLELEMENT(E,PANDAID,',').EXTRACT('//text()') ORDER BY PANDAID).GetClobVal(),',') AS PANDALIST
FROM
(SELECT DISTINCT t1.PANDAID, NUCLEUS, COMPUTINGSITE, JOBSTATUS, TASKTYPE, ES, CASE WHEN t2.OBJSTORE_ID > 0 THEN TO_CHAR(t2.OBJSTORE_ID) ELSE t3.destinationse END AS OBJSE
FROM ATLAS_PANDABIGMON.COMBINED_WAIT_ACT_DEF_ARCH4 t1
LEFT JOIN ATLAS_PANDA.JEDI_EVENTS t2 ON t1.PANDAID=t2.PANDAID and t1.JEDITASKID = t2.JEDITASKID and (t2.ziprow_id>0 or t2.OBJSTORE_ID > 0)
LEFT JOIN ATLAS_PANDA.filestable4 t3 ON (t3.pandaid = t2.pandaid and t3.JEDITASKID = t2.JEDITASKID and t3.row_id=t2.ziprow_id) WHERE t1.ES in (1) and t1.CLOUD='WORLD' and t1.MODIFICATIONTIME > (sysdate - interval '{hours}' hour)
AND t3.MODIFICATIONTIME > (sysdate - interval '{hours}' hour)
)
WHERE NOT OBJSE IS NULL
GROUP BY JOBSTATUS, JOBSTATUS, COMPUTINGSITE, OBJSE
order by OBJSE
""".format(hours=hours)
cur = connection.cursor()
cur.execute(sqlRequest)
rawsummary = fix_lob(cur)
return rawsummary
def objectstore_summary(request, hours=12):
object_stores = get_object_stores()
rawsummary = objectstore_summary_data(hours)
mObjectStores = {}
mObjectStoresTk = {}
if len(rawsummary) > 0:
for row in rawsummary:
id = -1
try:
id = int(row[3])
except ValueError:
pass
if not row[3] is None and id in object_stores:
osName = object_stores[id]['name']
else:
osName = "Not defined"
compsite = row[2]
status = row[0]
count = row[1]
tk = setCacheData(request, pandaid=row[4], compsite=row[2])
if osName in mObjectStores:
if not compsite in mObjectStores[osName]:
mObjectStores[osName][compsite] = {}
for state in const.JOB_STATES_SITE + ["closed"]:
mObjectStores[osName][compsite][state] = {'count': 0, 'tk': 0}
mObjectStores[osName][compsite][status] = {'count': count, 'tk': tk}
if not status in mObjectStoresTk[osName]:
mObjectStoresTk[osName][status] = []
mObjectStoresTk[osName][status].append(tk)
else:
mObjectStores[osName] = {}
mObjectStores[osName][compsite] = {}
mObjectStoresTk[osName] = {}
mObjectStoresTk[osName][status] = []
for state in const.JOB_STATES_SITE + ["closed"]:
mObjectStores[osName][compsite][state] = {'count': 0, 'tk': 0}
mObjectStores[osName][compsite][status] = {'count': count, 'tk': tk}
mObjectStoresTk[osName][status].append(tk)
# Getting tk's for parents
for osName in mObjectStoresTk:
for state in mObjectStoresTk[osName]:
mObjectStoresTk[osName][state] = setCacheData(request, childtk=','.join(mObjectStoresTk[osName][state]))
mObjectStoresSummary = {}
for osName in mObjectStores:
mObjectStoresSummary[osName] = {}
for site in mObjectStores[osName]:
for state in mObjectStores[osName][site]:
if state in mObjectStoresSummary[osName]:
mObjectStoresSummary[osName][state]['count'] += mObjectStores[osName][site][state]['count']
mObjectStoresSummary[osName][state]['tk'] = 0
else:
mObjectStoresSummary[osName][state] = {}
mObjectStoresSummary[osName][state]['count'] = mObjectStores[osName][site][state]['count']
mObjectStoresSummary[osName][state]['tk'] = 0
for osName in mObjectStoresSummary:
for state in mObjectStoresSummary[osName]:
if mObjectStoresSummary[osName][state]['count'] > 0:
mObjectStoresSummary[osName][state]['tk'] = mObjectStoresTk[osName][state]
return mObjectStores, mObjectStoresSummary | core/pandajob/summary_os.py | from django.db import connection
from core.libs.cache import setCacheData
from core.libs.sqlcustom import fix_lob
from core.schedresource.utils import get_object_stores
import core.constants as const
def objectstore_summary_data(hours):
sqlRequest = """
SELECT JOBSTATUS, COUNT(JOBSTATUS) as COUNTJOBSINSTATE, COMPUTINGSITE, OBJSE, RTRIM(XMLAGG(XMLELEMENT(E,PANDAID,',').EXTRACT('//text()') ORDER BY PANDAID).GetClobVal(),',') AS PANDALIST
FROM
(SELECT DISTINCT t1.PANDAID, NUCLEUS, COMPUTINGSITE, JOBSTATUS, TASKTYPE, ES, CASE WHEN t2.OBJSTORE_ID > 0 THEN TO_CHAR(t2.OBJSTORE_ID) ELSE t3.destinationse END AS OBJSE
FROM ATLAS_PANDABIGMON.COMBINED_WAIT_ACT_DEF_ARCH4 t1
LEFT JOIN ATLAS_PANDA.JEDI_EVENTS t2 ON t1.PANDAID=t2.PANDAID and t1.JEDITASKID = t2.JEDITASKID and (t2.ziprow_id>0 or t2.OBJSTORE_ID > 0)
LEFT JOIN ATLAS_PANDA.filestable4 t3 ON (t3.pandaid = t2.pandaid and t3.JEDITASKID = t2.JEDITASKID and t3.row_id=t2.ziprow_id) WHERE t1.ES in (1) and t1.CLOUD='WORLD' and t1.MODIFICATIONTIME > (sysdate - interval '{hours}' hour)
AND t3.MODIFICATIONTIME > (sysdate - interval '{hours}' hour)
)
WHERE NOT OBJSE IS NULL
GROUP BY JOBSTATUS, JOBSTATUS, COMPUTINGSITE, OBJSE
order by OBJSE
""".format(hours=hours)
cur = connection.cursor()
cur.execute(sqlRequest)
rawsummary = fix_lob(cur)
return rawsummary
def objectstore_summary(request, hours=12):
object_stores = get_object_stores()
rawsummary = objectstore_summary_data(hours)
mObjectStores = {}
mObjectStoresTk = {}
if len(rawsummary) > 0:
for row in rawsummary:
id = -1
try:
id = int(row[3])
except ValueError:
pass
if not row[3] is None and id in object_stores:
osName = object_stores[id]['name']
else:
osName = "Not defined"
compsite = row[2]
status = row[0]
count = row[1]
tk = setCacheData(request, pandaid=row[4], compsite=row[2])
if osName in mObjectStores:
if not compsite in mObjectStores[osName]:
mObjectStores[osName][compsite] = {}
for state in const.JOB_STATES_SITE + ["closed"]:
mObjectStores[osName][compsite][state] = {'count': 0, 'tk': 0}
mObjectStores[osName][compsite][status] = {'count': count, 'tk': tk}
if not status in mObjectStoresTk[osName]:
mObjectStoresTk[osName][status] = []
mObjectStoresTk[osName][status].append(tk)
else:
mObjectStores[osName] = {}
mObjectStores[osName][compsite] = {}
mObjectStoresTk[osName] = {}
mObjectStoresTk[osName][status] = []
for state in const.JOB_STATES_SITE + ["closed"]:
mObjectStores[osName][compsite][state] = {'count': 0, 'tk': 0}
mObjectStores[osName][compsite][status] = {'count': count, 'tk': tk}
mObjectStoresTk[osName][status].append(tk)
# Getting tk's for parents
for osName in mObjectStoresTk:
for state in mObjectStoresTk[osName]:
mObjectStoresTk[osName][state] = setCacheData(request, childtk=','.join(mObjectStoresTk[osName][state]))
mObjectStoresSummary = {}
for osName in mObjectStores:
mObjectStoresSummary[osName] = {}
for site in mObjectStores[osName]:
for state in mObjectStores[osName][site]:
if state in mObjectStoresSummary[osName]:
mObjectStoresSummary[osName][state]['count'] += mObjectStores[osName][site][state]['count']
mObjectStoresSummary[osName][state]['tk'] = 0
else:
mObjectStoresSummary[osName][state] = {}
mObjectStoresSummary[osName][state]['count'] = mObjectStores[osName][site][state]['count']
mObjectStoresSummary[osName][state]['tk'] = 0
for osName in mObjectStoresSummary:
for state in mObjectStoresSummary[osName]:
if mObjectStoresSummary[osName][state]['count'] > 0:
mObjectStoresSummary[osName][state]['tk'] = mObjectStoresTk[osName][state]
return mObjectStores, mObjectStoresSummary | 0.19129 | 0.130645 |
def big2little(string):
array = string.split(',')
for i in range(len(array) // 2):
array[2 * i], array[2 * i + 1] = array[2 * i + 1], array[2 * i]
return ','.join(array)
def rgb888to565(r, g, b):
return ((r & 0b11111000) << 8) | ((g & 0b11111100) << 3) | (b >> 3)
def rgb888to565(rgb):
r, g, b = rgb
return ((r & 0b11111000) << 8) | ((g & 0b11111100) << 3) | (b >> 3)
def rgb565to888(color):
return (((((color >> 11) & 0x1F) * 527) + 23) >> 6, ((((color >> 5) & 0x3F) * 259) + 33) >> 6, (((color & 0x1F) * 527) + 23) >> 6)
###
from PIL import Image
def dec2hexs(n):
"""
converts an integer to a hexadecimal string
"""
s = ''
while n > 15:
s = "0123456789ABCDEF"[n % 16] + s
n //= 16
return "0123456789ABCDEF"[n] + s
def toupper(s):
"""
string to uppercase
only lowercase letters are modified
"""
_s = ''
for c in s:
if ord(c) >= 97 and ord(c) <= 122:
_s += chr(ord(c) - 32)
else:
_s += c
return _s
def toalphanum(s):
"""
gets rid of the unwanted characters
"""
_s = ''
for c in s:
if c in '\\ /(.)-':
_s += '_'
else:
_s += c
return _s
def convert2define(name):
"""
returns the name of the define used according to 'name' which is the name of the file
"""
header = toupper(toalphanum(name))
return "__" + header + "__"
def convert(path, tile_width = 16, tile_height = 16):
"""
converts a tileset (an image with every tile) to an array of hex number (with little endianness to work on Nspire)
then stores this array with the appropriate format into a .h file
which can then be directly included in the C project
"""
im = Image.open(path)
img = im.load()
w, h = im.size
tile_per_line = w // tile_width
tile_per_column = h // tile_height
data = []
for i in range(tile_per_line * tile_per_column):
for y in range(tile_height):
for x in range(tile_width):
ix = (i % tile_per_line) * tile_width + x
iy = (i // tile_per_line) * tile_height + y
#print(ix, iy)
data += [rgb888to565(img[ix, iy][:3])]
#data now contains the color pf each pixel of the .png file
im.close()
#now converting that array to a file .h directly usable and include-able
lpath = path.split('\\')
lpath[-1] = '_'.join(lpath[-1].split('.')[:-1]) + '.h'
data_path = '\\'.join(lpath)
with open(data_path, 'w') as fdata:
define = convert2define(lpath[-1])
fdata.write("#ifndef " + define + "\n#define " + define + "\n\n")
fdata.write("#include <libndls.h>\n\n")
fdata.write('#define TILES_NUMBER ' + str(len(data) // (tile_width * tile_height)) + "\n\n") #adds the appropriate #define at the beginning of the .h file
data_name = lpath[-1].split('.')[0]
fdata.write("uint16_t " + data_name + "[] = {") #gives a proper name to the array
for i in range(len(data)):
hexs = dec2hexs(data[i])
if len(hexs) < 4:
hexs = ("0" * (4 - len(hexs))) + hexs
hexs = "0x" + hexs + ', '
if i % (tile_width * tile_height) == 0: #each line will contain the data for each tile
hexs = '\n\t' + hexs
fdata.write(hexs)
fdata.write("\n};\n\n")
fdata.write("#endif")
return data_path #returns the path of the newly created .h file | converter.py | def big2little(string):
array = string.split(',')
for i in range(len(array) // 2):
array[2 * i], array[2 * i + 1] = array[2 * i + 1], array[2 * i]
return ','.join(array)
def rgb888to565(r, g, b):
return ((r & 0b11111000) << 8) | ((g & 0b11111100) << 3) | (b >> 3)
def rgb888to565(rgb):
r, g, b = rgb
return ((r & 0b11111000) << 8) | ((g & 0b11111100) << 3) | (b >> 3)
def rgb565to888(color):
return (((((color >> 11) & 0x1F) * 527) + 23) >> 6, ((((color >> 5) & 0x3F) * 259) + 33) >> 6, (((color & 0x1F) * 527) + 23) >> 6)
###
from PIL import Image
def dec2hexs(n):
"""
converts an integer to a hexadecimal string
"""
s = ''
while n > 15:
s = "0123456789ABCDEF"[n % 16] + s
n //= 16
return "0123456789ABCDEF"[n] + s
def toupper(s):
"""
string to uppercase
only lowercase letters are modified
"""
_s = ''
for c in s:
if ord(c) >= 97 and ord(c) <= 122:
_s += chr(ord(c) - 32)
else:
_s += c
return _s
def toalphanum(s):
"""
gets rid of the unwanted characters
"""
_s = ''
for c in s:
if c in '\\ /(.)-':
_s += '_'
else:
_s += c
return _s
def convert2define(name):
"""
returns the name of the define used according to 'name' which is the name of the file
"""
header = toupper(toalphanum(name))
return "__" + header + "__"
def convert(path, tile_width = 16, tile_height = 16):
"""
converts a tileset (an image with every tile) to an array of hex number (with little endianness to work on Nspire)
then stores this array with the appropriate format into a .h file
which can then be directly included in the C project
"""
im = Image.open(path)
img = im.load()
w, h = im.size
tile_per_line = w // tile_width
tile_per_column = h // tile_height
data = []
for i in range(tile_per_line * tile_per_column):
for y in range(tile_height):
for x in range(tile_width):
ix = (i % tile_per_line) * tile_width + x
iy = (i // tile_per_line) * tile_height + y
#print(ix, iy)
data += [rgb888to565(img[ix, iy][:3])]
#data now contains the color pf each pixel of the .png file
im.close()
#now converting that array to a file .h directly usable and include-able
lpath = path.split('\\')
lpath[-1] = '_'.join(lpath[-1].split('.')[:-1]) + '.h'
data_path = '\\'.join(lpath)
with open(data_path, 'w') as fdata:
define = convert2define(lpath[-1])
fdata.write("#ifndef " + define + "\n#define " + define + "\n\n")
fdata.write("#include <libndls.h>\n\n")
fdata.write('#define TILES_NUMBER ' + str(len(data) // (tile_width * tile_height)) + "\n\n") #adds the appropriate #define at the beginning of the .h file
data_name = lpath[-1].split('.')[0]
fdata.write("uint16_t " + data_name + "[] = {") #gives a proper name to the array
for i in range(len(data)):
hexs = dec2hexs(data[i])
if len(hexs) < 4:
hexs = ("0" * (4 - len(hexs))) + hexs
hexs = "0x" + hexs + ', '
if i % (tile_width * tile_height) == 0: #each line will contain the data for each tile
hexs = '\n\t' + hexs
fdata.write(hexs)
fdata.write("\n};\n\n")
fdata.write("#endif")
return data_path #returns the path of the newly created .h file | 0.296451 | 0.506469 |
from __future__ import absolute_import, division, print_function
from unittest import TestCase
from marshmallow import ValidationError
from tests.utils import assert_equal_dict
from polyaxon_schemas.ops.environments.legacy import TFRunConfig
from polyaxon_schemas.ops.environments.resources import (
K8SResourcesConfig,
PodResourcesConfig,
)
from polyaxon_schemas.ops.experiment.environment import (
HorovodClusterConfig,
HorovodConfig,
MPIClusterConfig,
MXNetClusterConfig,
MXNetConfig,
PytorchClusterConfig,
PytorchConfig,
TensorflowClusterConfig,
TensorflowConfig,
)
class TestExperimentEnvironmentsConfigs(TestCase):
def test_tensorflow_cluster_config(self):
config_dict = {
"worker": [
"worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222",
],
"ps": ["ps0.example.com:2222", "ps1.example.com:2222"],
}
config = TensorflowClusterConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
def test_horovod_cluster_config(self):
config_dict = {
"worker": [
"worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222",
]
}
config = HorovodClusterConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
def test_mpi_cluster_config(self):
config_dict = {
"worker": [
"worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222",
]
}
config = MPIClusterConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
def test_pytorch_cluster_config(self):
config_dict = {
"worker": [
"worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222",
]
}
config = PytorchClusterConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
def test_mxnet_cluster_config(self):
config_dict = {
"worker": [
"worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222",
],
"server": ["server0.example.com:2222", "server1.example.com:2222"],
}
config = MXNetClusterConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
def test_tensorflow_config(self):
config_dict = {"n_workers": 10, "n_ps": 5}
config = TensorflowConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add run config
config_dict["run_config"] = TFRunConfig().to_dict()
with self.assertRaises(ValidationError):
TensorflowConfig.from_dict(config_dict)
del config_dict["run_config"]
# Add default worker resources
config_dict["default_worker"] = {
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1),
gpu=K8SResourcesConfig(2, 4),
tpu=K8SResourcesConfig(2, 8),
).to_dict()
}
config = TensorflowConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_light_dict())
# Add default ps resources
config_dict["default_ps"] = {
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict()
}
config = TensorflowConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_light_dict())
# Adding custom resources for worker 4
config_dict["worker"] = [
{
"index": 4,
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict(),
}
]
config = TensorflowConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_light_dict())
# Adding custom resources for ps 4
config_dict["ps"] = [
{
"index": 4,
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict(),
}
]
config = TensorflowConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_light_dict())
def test_horovod_config(self):
config_dict = {"n_workers": 10}
config = HorovodConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_light_dict())
# Add default worker resources
config_dict["default_worker"] = {
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1),
tpu=K8SResourcesConfig(2, 8),
gpu=K8SResourcesConfig(2, 4),
).to_dict()
}
config = HorovodConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Adding custom resources for worker 4
config_dict["worker"] = [
{
"index": 4,
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict(),
}
]
config = HorovodConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_light_dict())
def test_pytorch_config(self):
config_dict = {"n_workers": 10}
config = PytorchConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add default worker resources
config_dict["default_worker"] = {
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1),
tpu=K8SResourcesConfig(1, 1),
gpu=K8SResourcesConfig(2, 4),
).to_dict()
}
config = PytorchConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Adding custom resources for worker 4
config_dict["worker"] = [
{
"index": 4,
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict(),
}
]
config = PytorchConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
def test_mxnet_config(self):
config_dict = {"n_workers": 10, "n_ps": 5}
config = MXNetConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add default worker resources
config_dict["default_worker"] = {
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1),
tpu=K8SResourcesConfig(1, 1),
gpu=K8SResourcesConfig(2, 4),
).to_dict()
}
config = MXNetConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add default ps resources
config_dict["default_ps"] = {
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict()
}
config = MXNetConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Adding custom resources for worker 4
config_dict["worker"] = [
{
"index": 4,
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict(),
}
]
config = MXNetConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Adding custom resources for ps 4
config_dict["ps"] = [
{
"index": 4,
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict(),
}
]
config = MXNetConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict()) | tests/test_ops/test_environments/test_experiments.py | from __future__ import absolute_import, division, print_function
from unittest import TestCase
from marshmallow import ValidationError
from tests.utils import assert_equal_dict
from polyaxon_schemas.ops.environments.legacy import TFRunConfig
from polyaxon_schemas.ops.environments.resources import (
K8SResourcesConfig,
PodResourcesConfig,
)
from polyaxon_schemas.ops.experiment.environment import (
HorovodClusterConfig,
HorovodConfig,
MPIClusterConfig,
MXNetClusterConfig,
MXNetConfig,
PytorchClusterConfig,
PytorchConfig,
TensorflowClusterConfig,
TensorflowConfig,
)
class TestExperimentEnvironmentsConfigs(TestCase):
def test_tensorflow_cluster_config(self):
config_dict = {
"worker": [
"worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222",
],
"ps": ["ps0.example.com:2222", "ps1.example.com:2222"],
}
config = TensorflowClusterConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
def test_horovod_cluster_config(self):
config_dict = {
"worker": [
"worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222",
]
}
config = HorovodClusterConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
def test_mpi_cluster_config(self):
config_dict = {
"worker": [
"worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222",
]
}
config = MPIClusterConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
def test_pytorch_cluster_config(self):
config_dict = {
"worker": [
"worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222",
]
}
config = PytorchClusterConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
def test_mxnet_cluster_config(self):
config_dict = {
"worker": [
"worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222",
],
"server": ["server0.example.com:2222", "server1.example.com:2222"],
}
config = MXNetClusterConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
def test_tensorflow_config(self):
config_dict = {"n_workers": 10, "n_ps": 5}
config = TensorflowConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add run config
config_dict["run_config"] = TFRunConfig().to_dict()
with self.assertRaises(ValidationError):
TensorflowConfig.from_dict(config_dict)
del config_dict["run_config"]
# Add default worker resources
config_dict["default_worker"] = {
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1),
gpu=K8SResourcesConfig(2, 4),
tpu=K8SResourcesConfig(2, 8),
).to_dict()
}
config = TensorflowConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_light_dict())
# Add default ps resources
config_dict["default_ps"] = {
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict()
}
config = TensorflowConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_light_dict())
# Adding custom resources for worker 4
config_dict["worker"] = [
{
"index": 4,
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict(),
}
]
config = TensorflowConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_light_dict())
# Adding custom resources for ps 4
config_dict["ps"] = [
{
"index": 4,
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict(),
}
]
config = TensorflowConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_light_dict())
def test_horovod_config(self):
config_dict = {"n_workers": 10}
config = HorovodConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_light_dict())
# Add default worker resources
config_dict["default_worker"] = {
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1),
tpu=K8SResourcesConfig(2, 8),
gpu=K8SResourcesConfig(2, 4),
).to_dict()
}
config = HorovodConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Adding custom resources for worker 4
config_dict["worker"] = [
{
"index": 4,
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict(),
}
]
config = HorovodConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_light_dict())
def test_pytorch_config(self):
config_dict = {"n_workers": 10}
config = PytorchConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add default worker resources
config_dict["default_worker"] = {
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1),
tpu=K8SResourcesConfig(1, 1),
gpu=K8SResourcesConfig(2, 4),
).to_dict()
}
config = PytorchConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Adding custom resources for worker 4
config_dict["worker"] = [
{
"index": 4,
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict(),
}
]
config = PytorchConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
def test_mxnet_config(self):
config_dict = {"n_workers": 10, "n_ps": 5}
config = MXNetConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add default worker resources
config_dict["default_worker"] = {
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1),
tpu=K8SResourcesConfig(1, 1),
gpu=K8SResourcesConfig(2, 4),
).to_dict()
}
config = MXNetConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add default ps resources
config_dict["default_ps"] = {
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict()
}
config = MXNetConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Adding custom resources for worker 4
config_dict["worker"] = [
{
"index": 4,
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict(),
}
]
config = MXNetConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Adding custom resources for ps 4
config_dict["ps"] = [
{
"index": 4,
"resources": PodResourcesConfig(
cpu=K8SResourcesConfig(0.5, 1), memory=K8SResourcesConfig(256, 400)
).to_dict(),
}
]
config = MXNetConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict()) | 0.711531 | 0.376394 |
import subprocess
import signal
import os
class CliHandler:
def __init__(self):
self.process = {}
self.returncode = {}
def call(self, name, cmd, shell=True):
"""
this function is used to spawn new subprocess
if this function is called with same name more than one time
and while previous process is not finished, it will return the
previous process and will not spawn new subprocess
"""
if name in self.process:
return name
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell)
self.process[name] = p
self.returncode[name] = None
return name
def get(self, name):
"""
this will return subprocess object corrosponding
to given name as input
"""
if name in self.process:
return self.process[name]
def return_code(self, name):
if name in self.returncode:
return self.returncode[name]
def reset(self, name):
if name in self.process:
self.kill(name)
if name in self.returncode:
del self.returncode[name]
def list_process(self):
"""
return all registered process
"""
return self.process
def capture(self, name):
"""
once a subprocess is spawned, it's output
can be captured via this function as an iterator
"""
if name in self.process:
p = self.process[name]
while True:
# returns None while subprocess is running
try:
retcode = p.poll()
line = p.stdout.readline()
line = line.decode().strip()
if len(line) > 0 and name in self.process: yield line
if retcode is not None:
if name in self.process:
self.returncode[name] = retcode
self.kill(name, returncode=retcode)
break
except KeyboardInterrupt:
pass
def kill(self, name, returncode=130):
"""
kill the spawned subrocess by it's name
"""
if name in self.process:
p = self.process[name]
os.kill(p.pid, 0)
self.returncode[name] = returncode
del self.process[name]
return self.returncode[name]
def __repr__(self):
return str(self.__class__.__name__)+'({})'.format(self.process)
def __len__(self):
return len(self.process)
def __getitem__(self, position):
return list(self.process.items())[position] | clihandler/clihandler.py | import subprocess
import signal
import os
class CliHandler:
def __init__(self):
self.process = {}
self.returncode = {}
def call(self, name, cmd, shell=True):
"""
this function is used to spawn new subprocess
if this function is called with same name more than one time
and while previous process is not finished, it will return the
previous process and will not spawn new subprocess
"""
if name in self.process:
return name
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell)
self.process[name] = p
self.returncode[name] = None
return name
def get(self, name):
"""
this will return subprocess object corrosponding
to given name as input
"""
if name in self.process:
return self.process[name]
def return_code(self, name):
if name in self.returncode:
return self.returncode[name]
def reset(self, name):
if name in self.process:
self.kill(name)
if name in self.returncode:
del self.returncode[name]
def list_process(self):
"""
return all registered process
"""
return self.process
def capture(self, name):
"""
once a subprocess is spawned, it's output
can be captured via this function as an iterator
"""
if name in self.process:
p = self.process[name]
while True:
# returns None while subprocess is running
try:
retcode = p.poll()
line = p.stdout.readline()
line = line.decode().strip()
if len(line) > 0 and name in self.process: yield line
if retcode is not None:
if name in self.process:
self.returncode[name] = retcode
self.kill(name, returncode=retcode)
break
except KeyboardInterrupt:
pass
def kill(self, name, returncode=130):
"""
kill the spawned subrocess by it's name
"""
if name in self.process:
p = self.process[name]
os.kill(p.pid, 0)
self.returncode[name] = returncode
del self.process[name]
return self.returncode[name]
def __repr__(self):
return str(self.__class__.__name__)+'({})'.format(self.process)
def __len__(self):
return len(self.process)
def __getitem__(self, position):
return list(self.process.items())[position] | 0.416915 | 0.091139 |
import pytest
from astropy import units as u
import numpy as np
from xrtpy.response.channel import Channel
import pkg_resources
import sunpy
import sunpy.map
from sunpy.data import manager
import scipy.io
import sunpy.io.special
channel_names = [
"Al-mesh",
"Al-poly",
"C-poly",
"Ti-poly",
"Be-thin",
"Be-med",
"Al-med",
"Al-thick",
"Be-thick",
"Al-poly/Al-mesh",
"Al-poly/Ti-poly",
"Al-poly/Al-thick",
"Al-poly/Be-thick",
"C-poly/Ti-poly",
]
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_name(channel_name):
channel = Channel(channel_name)
assert channel.name == channel_name
filename = pkg_resources.resource_filename(
"xrtpy", "data/channels/xrt_channels_v0016.genx"
)
v6_genx = sunpy.io.special.genx.read_genx(filename)
v6_genx_s = v6_genx["SAVEGEN0"]
_channel_name_to_index_mapping = {
"Al-mesh": 0,
"Al-poly": 1,
"C-poly": 2,
"Ti-poly": 3,
"Be-thin": 4,
"Be-med": 5,
"Al-med": 6,
"Al-thick": 7,
"Be-thick": 8,
"Al-poly/Al-mesh": 9,
"Al-poly/Ti-poly": 10,
"Al-poly/Al-thick": 11,
"Al-poly/Be-thick": 12,
"C-poly/Ti-poly": 13,
}
@pytest.mark.parametrize("channel_name", channel_names)
def test_CCD_wavelength(channel_name):
channel_filter = Channel(channel_name)
ccd_wavelength_length = int(channel_filter.ccd.number_of_wavelengths)
ccd_wavelength = channel_filter.ccd.ccd_wavelength[:ccd_wavelength_length]
idl_ccd_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["LENGTH"]
)
idl_ccd_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["WAVE"][
:idl_ccd_array_length
]
* u.angstrom
)
assert u.allclose(idl_ccd_wavelength_auto, ccd_wavelength)
idl_ccd_wavelength_manu = [
1.00000,
1.10000,
1.20000,
1.30000,
1.40000,
1.50000,
1.60000,
1.70000,
1.80000,
1.90000,
] * u.angstrom
assert u.allclose(idl_ccd_wavelength_manu, ccd_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_CCD_quantum_efficiency(channel_name):
channel_filter = Channel(channel_name)
ccd_array_length = int(channel_filter.ccd.number_of_wavelengths)
ccd_quantum_efficiency = channel_filter.ccd.ccd_quantum_efficiency[
:ccd_array_length
]
idl_ccd_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["LENGTH"]
)
idl_ccd_quantum_efficiency_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["CCD"]["QE"][:idl_ccd_array_length]
assert u.allclose(idl_ccd_quantum_efficiency_auto, ccd_quantum_efficiency)
idl_ccd_quantum_efficiency_manu = [
0.0573069,
0.0751920,
0.0960381,
0.119867,
0.146638,
0.176252,
0.208541,
0.243277,
0.280167,
0.318879,
0.359036,
0.400219,
0.441984,
0.483898,
]
assert idl_ccd_quantum_efficiency_manu, ccd_quantum_efficiency[0:13]
@pytest.mark.parametrize("channel_name", channel_names)
def test_CCD_pixel_size(channel_name):
channel_filter = Channel(channel_name)
ccd_pixel_size = channel_filter.ccd.ccd_pixel_size
idl_ccd_quantum_efficiency_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["PIXEL_SIZE"]
* u.micron
)
assert u.allclose(idl_ccd_quantum_efficiency_auto, ccd_pixel_size)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_gain_left(channel_name):
channel_filter = Channel(channel_name)
ccd_gain_left = channel_filter.ccd.ccd_gain_left
idl_ccd_gain_left_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["GAIN_L"]
* u.electron
)
assert u.isclose(ccd_gain_left, idl_ccd_gain_left_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_gain_right(channel_name):
channel_filter = Channel(channel_name)
ccd_gain_right = channel_filter.ccd.ccd_gain_right
idl_ccd_gain_right_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["GAIN_R"]
* u.electron
)
assert u.isclose(ccd_gain_right, idl_ccd_gain_right_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_full_well(channel_name):
channel_filter = Channel(channel_name)
ccd_full_well = channel_filter.ccd.ccd_full_well
idl_ccd_full_well_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["FULL_WELL"]
* u.electron
)
assert u.isclose(ccd_full_well, idl_ccd_full_well_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_ev_ore_electron(channel_name):
channel_filter = Channel(channel_name)
ccd_full_well = channel_filter.ccd.ccd_ev_ore_electron
idl_ccd_full_well_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"CCD"
]["EV_PER_EL"] * (u.eV / u.electron)
assert u.isclose(ccd_full_well, idl_ccd_full_well_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_name(channel_name):
channel_filter = Channel(channel_name)
ccd_name = channel_filter.ccd.ccd_name
idl_ccd_name_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"][
"LONG_NAME"
]
assert ccd_name == idl_ccd_name_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_name(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_name = channel_filter.entrancefilter.entrancefilter_name
IDL_entrancefilter_name_AUTO = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["LONG_NAME"]
assert entrancefilter_name == IDL_entrancefilter_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_material(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_material = channel_filter.entrancefilter.entrancefilter_material
idl_entrancefilter_material_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["MATERIAL"]
if np.all(entrancefilter_material == idl_entrancefilter_material_auto):
pass
else:
raise ValueError("FAIL: test_entrancefilter_material")
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_thickness(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_thickness = channel_filter.entrancefilter.entrancefilter_thickness
idl_entrancefilter_thick_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["EN_FILTER"]["THICK"]
* u.angstrom
)
assert u.allclose(entrancefilter_thickness, idl_entrancefilter_thick_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_density(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_density = channel_filter.entrancefilter.entrancefilter_density
idl_entrancefilter_density_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["DENS"] * (u.g * u.cm ** -3)
assert u.allclose(entrancefilter_density, idl_entrancefilter_density_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_wavelength(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_wavelength_length = int(
channel_filter.entrancefilter.number_of_wavelengths
)
entrancefilter_wavelength = channel_filter.entrancefilter.entrancefilter_wavelength[
:entrancefilter_wavelength_length
]
idl_entrancefilter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["EN_FILTER"]["LENGTH"]
)
idl_entrancefilter_wavelength_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["WAVE"][:idl_entrancefilter_array_length] * u.Unit(
"Angstrom"
) # wavelength_CCD_unit
assert u.allclose(idl_entrancefilter_wavelength_auto, entrancefilter_wavelength)
idl_entrancefilter_wavelength_manu = [
1.00000,
1.00802,
1.01610,
1.02424,
1.03245,
1.04073,
1.04907,
1.05748,
1.06595,
1.07450,
] * u.angstrom
assert u.allclose(
idl_entrancefilter_wavelength_manu, entrancefilter_wavelength[0:10]
)
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_transmission(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_transmission_length = int(
channel_filter.entrancefilter.number_of_wavelengths
)
entrancefilter_transmission = (
channel_filter.entrancefilter.entrancefilter_transmission[
:entrancefilter_transmission_length
]
)
idl_entrancefilter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["EN_FILTER"]["LENGTH"]
)
idl_entrancefilter_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["TRANS"][:idl_entrancefilter_array_length]
assert u.allclose(idl_entrancefilter_transmission_auto, entrancefilter_transmission)
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_mesh_transmission(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_mesh_transmission = (
channel_filter.entrancefilter.entrancefilter_mesh_transmission
)
idl_entrancefilter_mesh_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["MESH_TRANS"]
assert entrancefilter_mesh_transmission == idl_entrancefilter_mesh_transmission_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_substrate(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_substrate = channel_filter.entrancefilter.entrancefilter_substrate
idl_entrancefilter_substrate_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["SUBSTRATE"]
assert entrancefilter_substrate == idl_entrancefilter_substrate_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_name(channel_name):
channel_filter = Channel(channel_name)
filter_name = channel_filter.filter_1.name
idl_filter_name_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER1"
]["LONG_NAME"]
assert filter_name == idl_filter_name_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_material(channel_name):
channel_filter = Channel(channel_name)
filter_material = channel_filter.filter_1.material
idl_filter_material_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER1"
]["MATERIAL"]
assert np.all(filter_material == idl_filter_material_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_thickness(channel_name):
channel_filter = Channel(channel_name)
filter_thickness = channel_filter.filter_1.thickness
idl_filter_thick_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER1"]["THICK"]
* u.angstrom
)
assert np.all(filter_thickness == idl_filter_thick_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_density(channel_name):
channel_filter = Channel(channel_name)
filter_density = channel_filter.filter_1.density
idl_filter_density_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER1"
]["DENS"] * (u.g * u.cm ** -3)
assert u.allclose(filter_density, idl_filter_density_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_wavelength(channel_name):
channel_filter = Channel(channel_name)
filter_wavelength_length = int(channel_filter.filter_1.number_of_wavelengths)
filter_wavelength = channel_filter.filter_1.wavelength[:filter_wavelength_length]
idl_filter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER1"]["LENGTH"]
)
idl_filter_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER1"]["WAVE"][
:idl_filter_array_length
]
* u.angstrom
)
assert u.allclose(idl_filter_wavelength_auto, filter_wavelength)
idl_filter_wavelength_manu = [
1.00000,
1.00802,
1.01610,
1.02424,
1.03245,
1.04073,
1.04907,
1.05748,
1.06595,
1.07450,
] * u.angstrom
assert u.allclose(idl_filter_wavelength_manu, filter_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_transmission(channel_name):
channel_filter = Channel(channel_name)
filter_transmission_length = int(channel_filter.filter_1.number_of_wavelengths)
filter_transmission = channel_filter.filter_1.transmission[
:filter_transmission_length
]
idl_filter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER1"]["LENGTH"]
)
idl_filter_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["FP_FILTER1"]["TRANS"][:idl_filter_array_length]
assert u.allclose(idl_filter_transmission_auto, filter_transmission)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_mesh_transmission(channel_name):
channel_filter = Channel(channel_name)
filter_mesh_transmission = channel_filter._filter_1.mesh_trans
idl_filter_mesh_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["FP_FILTER1"]["MESH_TRANS"]
assert filter_mesh_transmission == idl_filter_mesh_transmission_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_substrate(channel_name):
channel_filter = Channel(channel_name)
filter_substrate = channel_filter.filter_1.substrate
idl_filter_substrate_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER1"
]["SUBSTRATE"]
assert filter_substrate == idl_filter_substrate_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_name(channel_name):
channel_filter = Channel(channel_name)
filter_name = channel_filter.filter_2.name
IDL_filter_name_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER2"
]["LONG_NAME"]
assert filter_name == IDL_filter_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_material(channel_name):
channel_filter = Channel(channel_name)
filter_material = channel_filter.filter_2.material
idl_filter_material_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER2"
]["MATERIAL"]
assert np.all(filter_material == idl_filter_material_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_thickness(channel_name):
channel_filter = Channel(channel_name)
filter_thickness = channel_filter.filter_2.thickness
idl_filter_thick_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER2"]["THICK"]
* u.angstrom
)
assert u.allclose(filter_thickness, idl_filter_thick_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_density(channel_name):
channel_filter = Channel(channel_name)
filter_density = channel_filter.filter_2.density
IDL_filter_density_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER2"
]["DENS"] * (u.g * u.cm ** -3)
np.allclose(filter_density, IDL_filter_density_AUTO)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_wavelength(channel_name):
channel_filter = Channel(channel_name)
filter_wavelength_length = int(channel_filter.filter_2.number_of_wavelengths)
filter_wavelength = channel_filter.filter_2.wavelength[:filter_wavelength_length]
idl_filter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER2"]["LENGTH"]
)
idl_filter_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER2"]["WAVE"][
:idl_filter_array_length
]
* u.angstrom
)
assert u.allclose(idl_filter_wavelength_auto, filter_wavelength)
idl_filter_wavelength_manu = [
1.00000,
1.00802,
1.01610,
1.02424,
1.03245,
1.04073,
1.04907,
1.05748,
1.06595,
1.07450,
] * u.angstrom
assert u.allclose(idl_filter_wavelength_manu, filter_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_transmission(channel_name):
channel_filter = Channel(channel_name)
filter_transmission_length = int(channel_filter.filter_2.number_of_wavelengths)
filter_transmission = channel_filter.filter_2.transmission[
:filter_transmission_length
]
idl_filter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER2"]["LENGTH"]
)
idl_filter_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["FP_FILTER2"]["TRANS"][:idl_filter_array_length]
assert u.allclose(idl_filter_transmission_auto, filter_transmission)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_mesh_transmission(channel_name):
channel_filter = Channel(channel_name)
filter_mesh_transmission = channel_filter.filter_2.mesh_trans
idl_filter_mesh_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["FP_FILTER2"]["MESH_TRANS"]
assert filter_mesh_transmission == idl_filter_mesh_transmission_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_substrate(channel_name):
channel_filter = Channel(channel_name)
filter_substrate = channel_filter.filter_2.substrate
idl_filter_substrate_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER2"
]["SUBSTRATE"]
assert filter_substrate == idl_filter_substrate_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_geometry_name(channel_name):
channel_filter = Channel(channel_name)
geometry_name = channel_filter.geometry.name
IDL_geometry_name_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"GEOM"
]["LONG_NAME"]
assert geometry_name == IDL_geometry_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_geometry_focal_len(channel_name):
channel_filter = Channel(channel_name)
geometry_focal_len = channel_filter.geometry.focal_len
IDL_geometry_focal_len_AUTO = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["GEOM"]["FOC_LEN"]
* u.cm
)
assert u.isclose(geometry_focal_len, IDL_geometry_focal_len_AUTO)
@pytest.mark.parametrize("channel_name", channel_names)
def test_geometry_aperture_area(channel_name):
channel_filter = Channel(channel_name)
geometry_aperture_area = channel_filter.geometry.aperture_area
idl_geometry_aperture_area_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["GEOM"]["APERTURE_AREA"]
* u.cm ** 2
)
assert u.isclose(geometry_aperture_area, idl_geometry_aperture_area_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_name(channel_name):
channel_filter = Channel(channel_name)
mirror_name = channel_filter.mirror_1.name
IDL_mirror_name_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR1"
]["LONG_NAME"]
assert mirror_name == IDL_mirror_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_material(channel_name):
channel_filter = Channel(channel_name)
mirror_material = channel_filter.mirror_1.material
IDL_mirror_material_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR1"
]["MATERIAL"]
assert mirror_material == IDL_mirror_material_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_density(channel_name):
channel_filter = Channel(channel_name)
mirror_density = channel_filter.mirror_1.density
idl_mirror_density_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR1"
]["DENS"] * (u.g * u.cm ** -3)
assert u.isclose(mirror_density, idl_mirror_density_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirro1_graze_angle(channel_name):
channel_filter = Channel(channel_name)
mirror_graze_angle = channel_filter.mirror_1.graze_angle
idl_mirror_graze_angle_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR1"][
"GRAZE_ANGLE"
]
* u.deg
)
assert u.isclose(mirror_graze_angle, idl_mirror_graze_angle_auto)
idl_mirror_graze_angle_manu = [0.910000] * u.deg
assert u.isclose(idl_mirror_graze_angle_manu, mirror_graze_angle)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_wavelength(channel_name):
channel_filter = Channel(channel_name)
mirror_number_of_length = int(channel_filter.mirror_1.number_of_wavelengths)
mirror_wavelength = channel_filter.mirror_1.wavelength[:mirror_number_of_length]
idl_mirror_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR1"]["LENGTH"]
)
idl_mirror_wavelength_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["MIRROR1"]["WAVE"][:idl_mirror_array_length] * u.Unit("Angstrom")
assert u.allclose(idl_mirror_wavelength_auto, mirror_wavelength)
idl_mirror_wavelength_manu = [
1.00000,
1.10000,
1.20000,
1.30000,
1.40000,
1.50000,
1.60000,
1.70000,
1.80000,
1.90000,
] * u.angstrom
assert u.allclose(idl_mirror_wavelength_manu, mirror_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_reflection(channel_name):
channel_filter = Channel(channel_name)
mirror_number_of_length = int(channel_filter.mirror_1.number_of_wavelengths)
mirror_reflection = channel_filter.mirror_1.reflection[:mirror_number_of_length]
idl_mirror_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR1"]["LENGTH"]
)
idl_mirror_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR1"]["REFL"][
:idl_mirror_array_length
]
* u.angstrom
)
assert u.allclose(idl_mirror_wavelength_auto, mirror_reflection)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_name(channel_name):
channel_filter = Channel(channel_name)
mirror_name = channel_filter.mirror_1.name
idl_mirror_name_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR2"
]["LONG_NAME"]
assert mirror_name == idl_mirror_name_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_material(channel_name):
channel_filter = Channel(channel_name)
mirror_material = channel_filter.mirror_1.material
idl_mirror_material_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR2"
]["MATERIAL"]
assert mirror_material == idl_mirror_material_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_density(channel_name):
channel_filter = Channel(channel_name)
mirror_density = channel_filter.mirror_1.density
idl_mirror_density_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR2"
]["DENS"] * (u.g * u.cm ** -3)
assert u.isclose(mirror_density, idl_mirror_density_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_graze_angle(channel_name):
channel_filter = Channel(channel_name)
mirror_graze_angle = channel_filter.mirror_1.graze_angle
idl_mirror_graze_angle_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"][
"GRAZE_ANGLE"
]
* u.deg
)
assert u.isclose(mirror_graze_angle, idl_mirror_graze_angle_auto)
idl_mirror_graze_angle_manu = [0.910000] * u.deg
assert u.isclose(idl_mirror_graze_angle_manu, mirror_graze_angle)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_wavelength(channel_name):
channel_filter = Channel(channel_name)
mirror_number_of_length = int(channel_filter.mirror_1.number_of_wavelengths)
mirror_wavelength = channel_filter.mirror_1.wavelength[:mirror_number_of_length]
idl_mirror_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"]["LENGTH"]
)
idl_mirror_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"]["WAVE"][
:idl_mirror_array_length
]
* u.angstrom
)
assert u.allclose(idl_mirror_wavelength_auto, mirror_wavelength)
idl_mirror_wavelength_manu = [
1.00000,
1.10000,
1.20000,
1.30000,
1.40000,
1.50000,
1.60000,
1.70000,
1.80000,
1.90000,
] * u.angstrom
assert u.allclose(idl_mirror_wavelength_manu, mirror_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_reflection(channel_name):
channel_filter = Channel(channel_name)
mirror_number_of_length = int(channel_filter.mirror_1.number_of_wavelengths)
mirror_reflection = channel_filter.mirror_1.reflection[:mirror_number_of_length]
idl_mirror_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"]["LENGTH"]
)
idl_mirror_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"]["REFL"][
:idl_mirror_array_length
]
* u.angstrom
)
assert u.allclose(idl_mirror_wavelength_auto, mirror_reflection)
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_name(channel_name):
channel_filter = Channel(channel_name)
name = channel_filter.name
IDL_mirror_name_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"NAME"
]
assert name == IDL_mirror_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_wavelength(channel_name):
channel_filter = Channel(channel_name)
wavelength_length = int(channel_filter.number_of_wavelengths)
wavelength = channel_filter.wavelength[:wavelength_length]
idl_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["LENGTH"]
)
idl_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["WAVE"][
:idl_array_length
]
* u.angstrom
)
assert u.allclose(idl_wavelength_auto, wavelength)
idl_mirror_wavelength_manu = [
9.00000,
9.10000,
9.20000,
9.30000,
9.40000,
9.50000,
9.60000,
9.70000,
9.80000,
9.90000,
] * u.angstrom
assert u.allclose(idl_mirror_wavelength_manu, wavelength[80:90])
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_transmission(channel_name):
channel_filter = Channel(channel_name)
transmission_length = int(channel_filter.number_of_wavelengths)
transmission = channel_filter.transmission[:transmission_length]
idl_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["LENGTH"]
)
idl_transmission_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"TRANS"
][:idl_array_length]
assert u.allclose(idl_transmission_auto, transmission)
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_number_of_wavelengths(channel_name):
channel_filter = Channel(channel_name)
channel_number_of_wavelengths = channel_filter.number_of_wavelengths
idl_array_length = v6_genx_s[_channel_name_to_index_mapping[channel_name]]["LENGTH"]
assert channel_number_of_wavelengths == idl_array_length
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_observatory(channel_name):
channel_filter = Channel(channel_name)
observatory = channel_filter.observatory
idl_observatory = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"OBSERVATORY"
]
assert observatory == idl_observatory
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_instrument(channel_name):
channel_filter = Channel(channel_name)
instrument = channel_filter.instrument
idl_instrument = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"INSTRUMENT"
]
assert instrument == idl_instrument | xrtpy/response/tests/test_channel.py | import pytest
from astropy import units as u
import numpy as np
from xrtpy.response.channel import Channel
import pkg_resources
import sunpy
import sunpy.map
from sunpy.data import manager
import scipy.io
import sunpy.io.special
channel_names = [
"Al-mesh",
"Al-poly",
"C-poly",
"Ti-poly",
"Be-thin",
"Be-med",
"Al-med",
"Al-thick",
"Be-thick",
"Al-poly/Al-mesh",
"Al-poly/Ti-poly",
"Al-poly/Al-thick",
"Al-poly/Be-thick",
"C-poly/Ti-poly",
]
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_name(channel_name):
channel = Channel(channel_name)
assert channel.name == channel_name
filename = pkg_resources.resource_filename(
"xrtpy", "data/channels/xrt_channels_v0016.genx"
)
v6_genx = sunpy.io.special.genx.read_genx(filename)
v6_genx_s = v6_genx["SAVEGEN0"]
_channel_name_to_index_mapping = {
"Al-mesh": 0,
"Al-poly": 1,
"C-poly": 2,
"Ti-poly": 3,
"Be-thin": 4,
"Be-med": 5,
"Al-med": 6,
"Al-thick": 7,
"Be-thick": 8,
"Al-poly/Al-mesh": 9,
"Al-poly/Ti-poly": 10,
"Al-poly/Al-thick": 11,
"Al-poly/Be-thick": 12,
"C-poly/Ti-poly": 13,
}
@pytest.mark.parametrize("channel_name", channel_names)
def test_CCD_wavelength(channel_name):
channel_filter = Channel(channel_name)
ccd_wavelength_length = int(channel_filter.ccd.number_of_wavelengths)
ccd_wavelength = channel_filter.ccd.ccd_wavelength[:ccd_wavelength_length]
idl_ccd_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["LENGTH"]
)
idl_ccd_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["WAVE"][
:idl_ccd_array_length
]
* u.angstrom
)
assert u.allclose(idl_ccd_wavelength_auto, ccd_wavelength)
idl_ccd_wavelength_manu = [
1.00000,
1.10000,
1.20000,
1.30000,
1.40000,
1.50000,
1.60000,
1.70000,
1.80000,
1.90000,
] * u.angstrom
assert u.allclose(idl_ccd_wavelength_manu, ccd_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_CCD_quantum_efficiency(channel_name):
channel_filter = Channel(channel_name)
ccd_array_length = int(channel_filter.ccd.number_of_wavelengths)
ccd_quantum_efficiency = channel_filter.ccd.ccd_quantum_efficiency[
:ccd_array_length
]
idl_ccd_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["LENGTH"]
)
idl_ccd_quantum_efficiency_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["CCD"]["QE"][:idl_ccd_array_length]
assert u.allclose(idl_ccd_quantum_efficiency_auto, ccd_quantum_efficiency)
idl_ccd_quantum_efficiency_manu = [
0.0573069,
0.0751920,
0.0960381,
0.119867,
0.146638,
0.176252,
0.208541,
0.243277,
0.280167,
0.318879,
0.359036,
0.400219,
0.441984,
0.483898,
]
assert idl_ccd_quantum_efficiency_manu, ccd_quantum_efficiency[0:13]
@pytest.mark.parametrize("channel_name", channel_names)
def test_CCD_pixel_size(channel_name):
channel_filter = Channel(channel_name)
ccd_pixel_size = channel_filter.ccd.ccd_pixel_size
idl_ccd_quantum_efficiency_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["PIXEL_SIZE"]
* u.micron
)
assert u.allclose(idl_ccd_quantum_efficiency_auto, ccd_pixel_size)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_gain_left(channel_name):
channel_filter = Channel(channel_name)
ccd_gain_left = channel_filter.ccd.ccd_gain_left
idl_ccd_gain_left_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["GAIN_L"]
* u.electron
)
assert u.isclose(ccd_gain_left, idl_ccd_gain_left_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_gain_right(channel_name):
channel_filter = Channel(channel_name)
ccd_gain_right = channel_filter.ccd.ccd_gain_right
idl_ccd_gain_right_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["GAIN_R"]
* u.electron
)
assert u.isclose(ccd_gain_right, idl_ccd_gain_right_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_full_well(channel_name):
channel_filter = Channel(channel_name)
ccd_full_well = channel_filter.ccd.ccd_full_well
idl_ccd_full_well_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["FULL_WELL"]
* u.electron
)
assert u.isclose(ccd_full_well, idl_ccd_full_well_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_ev_ore_electron(channel_name):
channel_filter = Channel(channel_name)
ccd_full_well = channel_filter.ccd.ccd_ev_ore_electron
idl_ccd_full_well_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"CCD"
]["EV_PER_EL"] * (u.eV / u.electron)
assert u.isclose(ccd_full_well, idl_ccd_full_well_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_name(channel_name):
channel_filter = Channel(channel_name)
ccd_name = channel_filter.ccd.ccd_name
idl_ccd_name_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"][
"LONG_NAME"
]
assert ccd_name == idl_ccd_name_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_name(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_name = channel_filter.entrancefilter.entrancefilter_name
IDL_entrancefilter_name_AUTO = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["LONG_NAME"]
assert entrancefilter_name == IDL_entrancefilter_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_material(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_material = channel_filter.entrancefilter.entrancefilter_material
idl_entrancefilter_material_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["MATERIAL"]
if np.all(entrancefilter_material == idl_entrancefilter_material_auto):
pass
else:
raise ValueError("FAIL: test_entrancefilter_material")
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_thickness(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_thickness = channel_filter.entrancefilter.entrancefilter_thickness
idl_entrancefilter_thick_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["EN_FILTER"]["THICK"]
* u.angstrom
)
assert u.allclose(entrancefilter_thickness, idl_entrancefilter_thick_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_density(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_density = channel_filter.entrancefilter.entrancefilter_density
idl_entrancefilter_density_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["DENS"] * (u.g * u.cm ** -3)
assert u.allclose(entrancefilter_density, idl_entrancefilter_density_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_wavelength(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_wavelength_length = int(
channel_filter.entrancefilter.number_of_wavelengths
)
entrancefilter_wavelength = channel_filter.entrancefilter.entrancefilter_wavelength[
:entrancefilter_wavelength_length
]
idl_entrancefilter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["EN_FILTER"]["LENGTH"]
)
idl_entrancefilter_wavelength_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["WAVE"][:idl_entrancefilter_array_length] * u.Unit(
"Angstrom"
) # wavelength_CCD_unit
assert u.allclose(idl_entrancefilter_wavelength_auto, entrancefilter_wavelength)
idl_entrancefilter_wavelength_manu = [
1.00000,
1.00802,
1.01610,
1.02424,
1.03245,
1.04073,
1.04907,
1.05748,
1.06595,
1.07450,
] * u.angstrom
assert u.allclose(
idl_entrancefilter_wavelength_manu, entrancefilter_wavelength[0:10]
)
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_transmission(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_transmission_length = int(
channel_filter.entrancefilter.number_of_wavelengths
)
entrancefilter_transmission = (
channel_filter.entrancefilter.entrancefilter_transmission[
:entrancefilter_transmission_length
]
)
idl_entrancefilter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["EN_FILTER"]["LENGTH"]
)
idl_entrancefilter_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["TRANS"][:idl_entrancefilter_array_length]
assert u.allclose(idl_entrancefilter_transmission_auto, entrancefilter_transmission)
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_mesh_transmission(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_mesh_transmission = (
channel_filter.entrancefilter.entrancefilter_mesh_transmission
)
idl_entrancefilter_mesh_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["MESH_TRANS"]
assert entrancefilter_mesh_transmission == idl_entrancefilter_mesh_transmission_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_substrate(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_substrate = channel_filter.entrancefilter.entrancefilter_substrate
idl_entrancefilter_substrate_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["SUBSTRATE"]
assert entrancefilter_substrate == idl_entrancefilter_substrate_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_name(channel_name):
channel_filter = Channel(channel_name)
filter_name = channel_filter.filter_1.name
idl_filter_name_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER1"
]["LONG_NAME"]
assert filter_name == idl_filter_name_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_material(channel_name):
channel_filter = Channel(channel_name)
filter_material = channel_filter.filter_1.material
idl_filter_material_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER1"
]["MATERIAL"]
assert np.all(filter_material == idl_filter_material_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_thickness(channel_name):
channel_filter = Channel(channel_name)
filter_thickness = channel_filter.filter_1.thickness
idl_filter_thick_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER1"]["THICK"]
* u.angstrom
)
assert np.all(filter_thickness == idl_filter_thick_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_density(channel_name):
channel_filter = Channel(channel_name)
filter_density = channel_filter.filter_1.density
idl_filter_density_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER1"
]["DENS"] * (u.g * u.cm ** -3)
assert u.allclose(filter_density, idl_filter_density_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_wavelength(channel_name):
channel_filter = Channel(channel_name)
filter_wavelength_length = int(channel_filter.filter_1.number_of_wavelengths)
filter_wavelength = channel_filter.filter_1.wavelength[:filter_wavelength_length]
idl_filter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER1"]["LENGTH"]
)
idl_filter_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER1"]["WAVE"][
:idl_filter_array_length
]
* u.angstrom
)
assert u.allclose(idl_filter_wavelength_auto, filter_wavelength)
idl_filter_wavelength_manu = [
1.00000,
1.00802,
1.01610,
1.02424,
1.03245,
1.04073,
1.04907,
1.05748,
1.06595,
1.07450,
] * u.angstrom
assert u.allclose(idl_filter_wavelength_manu, filter_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_transmission(channel_name):
channel_filter = Channel(channel_name)
filter_transmission_length = int(channel_filter.filter_1.number_of_wavelengths)
filter_transmission = channel_filter.filter_1.transmission[
:filter_transmission_length
]
idl_filter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER1"]["LENGTH"]
)
idl_filter_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["FP_FILTER1"]["TRANS"][:idl_filter_array_length]
assert u.allclose(idl_filter_transmission_auto, filter_transmission)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_mesh_transmission(channel_name):
channel_filter = Channel(channel_name)
filter_mesh_transmission = channel_filter._filter_1.mesh_trans
idl_filter_mesh_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["FP_FILTER1"]["MESH_TRANS"]
assert filter_mesh_transmission == idl_filter_mesh_transmission_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_substrate(channel_name):
channel_filter = Channel(channel_name)
filter_substrate = channel_filter.filter_1.substrate
idl_filter_substrate_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER1"
]["SUBSTRATE"]
assert filter_substrate == idl_filter_substrate_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_name(channel_name):
channel_filter = Channel(channel_name)
filter_name = channel_filter.filter_2.name
IDL_filter_name_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER2"
]["LONG_NAME"]
assert filter_name == IDL_filter_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_material(channel_name):
channel_filter = Channel(channel_name)
filter_material = channel_filter.filter_2.material
idl_filter_material_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER2"
]["MATERIAL"]
assert np.all(filter_material == idl_filter_material_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_thickness(channel_name):
channel_filter = Channel(channel_name)
filter_thickness = channel_filter.filter_2.thickness
idl_filter_thick_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER2"]["THICK"]
* u.angstrom
)
assert u.allclose(filter_thickness, idl_filter_thick_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_density(channel_name):
channel_filter = Channel(channel_name)
filter_density = channel_filter.filter_2.density
IDL_filter_density_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER2"
]["DENS"] * (u.g * u.cm ** -3)
np.allclose(filter_density, IDL_filter_density_AUTO)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_wavelength(channel_name):
channel_filter = Channel(channel_name)
filter_wavelength_length = int(channel_filter.filter_2.number_of_wavelengths)
filter_wavelength = channel_filter.filter_2.wavelength[:filter_wavelength_length]
idl_filter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER2"]["LENGTH"]
)
idl_filter_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER2"]["WAVE"][
:idl_filter_array_length
]
* u.angstrom
)
assert u.allclose(idl_filter_wavelength_auto, filter_wavelength)
idl_filter_wavelength_manu = [
1.00000,
1.00802,
1.01610,
1.02424,
1.03245,
1.04073,
1.04907,
1.05748,
1.06595,
1.07450,
] * u.angstrom
assert u.allclose(idl_filter_wavelength_manu, filter_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_transmission(channel_name):
channel_filter = Channel(channel_name)
filter_transmission_length = int(channel_filter.filter_2.number_of_wavelengths)
filter_transmission = channel_filter.filter_2.transmission[
:filter_transmission_length
]
idl_filter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER2"]["LENGTH"]
)
idl_filter_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["FP_FILTER2"]["TRANS"][:idl_filter_array_length]
assert u.allclose(idl_filter_transmission_auto, filter_transmission)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_mesh_transmission(channel_name):
channel_filter = Channel(channel_name)
filter_mesh_transmission = channel_filter.filter_2.mesh_trans
idl_filter_mesh_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["FP_FILTER2"]["MESH_TRANS"]
assert filter_mesh_transmission == idl_filter_mesh_transmission_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_substrate(channel_name):
channel_filter = Channel(channel_name)
filter_substrate = channel_filter.filter_2.substrate
idl_filter_substrate_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER2"
]["SUBSTRATE"]
assert filter_substrate == idl_filter_substrate_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_geometry_name(channel_name):
channel_filter = Channel(channel_name)
geometry_name = channel_filter.geometry.name
IDL_geometry_name_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"GEOM"
]["LONG_NAME"]
assert geometry_name == IDL_geometry_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_geometry_focal_len(channel_name):
channel_filter = Channel(channel_name)
geometry_focal_len = channel_filter.geometry.focal_len
IDL_geometry_focal_len_AUTO = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["GEOM"]["FOC_LEN"]
* u.cm
)
assert u.isclose(geometry_focal_len, IDL_geometry_focal_len_AUTO)
@pytest.mark.parametrize("channel_name", channel_names)
def test_geometry_aperture_area(channel_name):
channel_filter = Channel(channel_name)
geometry_aperture_area = channel_filter.geometry.aperture_area
idl_geometry_aperture_area_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["GEOM"]["APERTURE_AREA"]
* u.cm ** 2
)
assert u.isclose(geometry_aperture_area, idl_geometry_aperture_area_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_name(channel_name):
channel_filter = Channel(channel_name)
mirror_name = channel_filter.mirror_1.name
IDL_mirror_name_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR1"
]["LONG_NAME"]
assert mirror_name == IDL_mirror_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_material(channel_name):
channel_filter = Channel(channel_name)
mirror_material = channel_filter.mirror_1.material
IDL_mirror_material_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR1"
]["MATERIAL"]
assert mirror_material == IDL_mirror_material_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_density(channel_name):
channel_filter = Channel(channel_name)
mirror_density = channel_filter.mirror_1.density
idl_mirror_density_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR1"
]["DENS"] * (u.g * u.cm ** -3)
assert u.isclose(mirror_density, idl_mirror_density_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirro1_graze_angle(channel_name):
channel_filter = Channel(channel_name)
mirror_graze_angle = channel_filter.mirror_1.graze_angle
idl_mirror_graze_angle_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR1"][
"GRAZE_ANGLE"
]
* u.deg
)
assert u.isclose(mirror_graze_angle, idl_mirror_graze_angle_auto)
idl_mirror_graze_angle_manu = [0.910000] * u.deg
assert u.isclose(idl_mirror_graze_angle_manu, mirror_graze_angle)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_wavelength(channel_name):
channel_filter = Channel(channel_name)
mirror_number_of_length = int(channel_filter.mirror_1.number_of_wavelengths)
mirror_wavelength = channel_filter.mirror_1.wavelength[:mirror_number_of_length]
idl_mirror_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR1"]["LENGTH"]
)
idl_mirror_wavelength_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["MIRROR1"]["WAVE"][:idl_mirror_array_length] * u.Unit("Angstrom")
assert u.allclose(idl_mirror_wavelength_auto, mirror_wavelength)
idl_mirror_wavelength_manu = [
1.00000,
1.10000,
1.20000,
1.30000,
1.40000,
1.50000,
1.60000,
1.70000,
1.80000,
1.90000,
] * u.angstrom
assert u.allclose(idl_mirror_wavelength_manu, mirror_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_reflection(channel_name):
channel_filter = Channel(channel_name)
mirror_number_of_length = int(channel_filter.mirror_1.number_of_wavelengths)
mirror_reflection = channel_filter.mirror_1.reflection[:mirror_number_of_length]
idl_mirror_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR1"]["LENGTH"]
)
idl_mirror_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR1"]["REFL"][
:idl_mirror_array_length
]
* u.angstrom
)
assert u.allclose(idl_mirror_wavelength_auto, mirror_reflection)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_name(channel_name):
channel_filter = Channel(channel_name)
mirror_name = channel_filter.mirror_1.name
idl_mirror_name_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR2"
]["LONG_NAME"]
assert mirror_name == idl_mirror_name_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_material(channel_name):
channel_filter = Channel(channel_name)
mirror_material = channel_filter.mirror_1.material
idl_mirror_material_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR2"
]["MATERIAL"]
assert mirror_material == idl_mirror_material_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_density(channel_name):
channel_filter = Channel(channel_name)
mirror_density = channel_filter.mirror_1.density
idl_mirror_density_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR2"
]["DENS"] * (u.g * u.cm ** -3)
assert u.isclose(mirror_density, idl_mirror_density_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_graze_angle(channel_name):
channel_filter = Channel(channel_name)
mirror_graze_angle = channel_filter.mirror_1.graze_angle
idl_mirror_graze_angle_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"][
"GRAZE_ANGLE"
]
* u.deg
)
assert u.isclose(mirror_graze_angle, idl_mirror_graze_angle_auto)
idl_mirror_graze_angle_manu = [0.910000] * u.deg
assert u.isclose(idl_mirror_graze_angle_manu, mirror_graze_angle)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_wavelength(channel_name):
channel_filter = Channel(channel_name)
mirror_number_of_length = int(channel_filter.mirror_1.number_of_wavelengths)
mirror_wavelength = channel_filter.mirror_1.wavelength[:mirror_number_of_length]
idl_mirror_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"]["LENGTH"]
)
idl_mirror_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"]["WAVE"][
:idl_mirror_array_length
]
* u.angstrom
)
assert u.allclose(idl_mirror_wavelength_auto, mirror_wavelength)
idl_mirror_wavelength_manu = [
1.00000,
1.10000,
1.20000,
1.30000,
1.40000,
1.50000,
1.60000,
1.70000,
1.80000,
1.90000,
] * u.angstrom
assert u.allclose(idl_mirror_wavelength_manu, mirror_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_reflection(channel_name):
channel_filter = Channel(channel_name)
mirror_number_of_length = int(channel_filter.mirror_1.number_of_wavelengths)
mirror_reflection = channel_filter.mirror_1.reflection[:mirror_number_of_length]
idl_mirror_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"]["LENGTH"]
)
idl_mirror_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"]["REFL"][
:idl_mirror_array_length
]
* u.angstrom
)
assert u.allclose(idl_mirror_wavelength_auto, mirror_reflection)
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_name(channel_name):
channel_filter = Channel(channel_name)
name = channel_filter.name
IDL_mirror_name_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"NAME"
]
assert name == IDL_mirror_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_wavelength(channel_name):
channel_filter = Channel(channel_name)
wavelength_length = int(channel_filter.number_of_wavelengths)
wavelength = channel_filter.wavelength[:wavelength_length]
idl_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["LENGTH"]
)
idl_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["WAVE"][
:idl_array_length
]
* u.angstrom
)
assert u.allclose(idl_wavelength_auto, wavelength)
idl_mirror_wavelength_manu = [
9.00000,
9.10000,
9.20000,
9.30000,
9.40000,
9.50000,
9.60000,
9.70000,
9.80000,
9.90000,
] * u.angstrom
assert u.allclose(idl_mirror_wavelength_manu, wavelength[80:90])
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_transmission(channel_name):
channel_filter = Channel(channel_name)
transmission_length = int(channel_filter.number_of_wavelengths)
transmission = channel_filter.transmission[:transmission_length]
idl_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["LENGTH"]
)
idl_transmission_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"TRANS"
][:idl_array_length]
assert u.allclose(idl_transmission_auto, transmission)
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_number_of_wavelengths(channel_name):
channel_filter = Channel(channel_name)
channel_number_of_wavelengths = channel_filter.number_of_wavelengths
idl_array_length = v6_genx_s[_channel_name_to_index_mapping[channel_name]]["LENGTH"]
assert channel_number_of_wavelengths == idl_array_length
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_observatory(channel_name):
channel_filter = Channel(channel_name)
observatory = channel_filter.observatory
idl_observatory = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"OBSERVATORY"
]
assert observatory == idl_observatory
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_instrument(channel_name):
channel_filter = Channel(channel_name)
instrument = channel_filter.instrument
idl_instrument = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"INSTRUMENT"
]
assert instrument == idl_instrument | 0.638159 | 0.447098 |
# template file: justice_py_sdk_codegen/__main__.py
"""Auto-generated package that contains models used by the Justice Matchmaking Service."""
__version__ = "2.15.1"
__author__ = "AccelByte"
__email__ = "<EMAIL>"
# pylint: disable=line-too-long
from ._matchmaking import add_user_into_session_in_channel
from ._matchmaking import add_user_into_session_in_channel_async
from ._matchmaking import bulk_get_sessions
from ._matchmaking import bulk_get_sessions_async
from ._matchmaking import create_channel_handler
from ._matchmaking import create_channel_handler_async
from ._matchmaking import delete_channel_handler
from ._matchmaking import delete_channel_handler_async
from ._matchmaking import delete_session_in_channel
from ._matchmaking import delete_session_in_channel_async
from ._matchmaking import delete_user_from_session_in_channel
from ._matchmaking import delete_user_from_session_in_channel_async
from ._matchmaking import dequeue_session_handler
from ._matchmaking import dequeue_session_handler_async
from ._matchmaking import export_channels
from ._matchmaking import export_channels_async
from ._matchmaking import get_all_channels_handler
from ._matchmaking import get_all_channels_handler_async
from ._matchmaking import get_all_party_in_all_channel
from ._matchmaking import get_all_party_in_all_channel_async
from ._matchmaking import get_all_party_in_channel
from ._matchmaking import get_all_party_in_channel_async
from ._matchmaking import get_all_sessions_in_channel
from ._matchmaking import get_all_sessions_in_channel_async
from ._matchmaking import get_session_history_detailed
from ._matchmaking import get_session_history_detailed_async
from ._matchmaking import get_single_matchmaking_channel
from ._matchmaking import get_single_matchmaking_channel_async
from ._matchmaking import import_channels
from ._matchmaking import import_channels_async
from ._matchmaking import public_get_all_matchmaking_channel
from ._matchmaking import public_get_all_matchmaking_channel_async
from ._matchmaking import public_get_single_matchmaking_channel
from ._matchmaking import public_get_single_matchmaking_channel_async
from ._matchmaking import query_session_handler
from ._matchmaking import query_session_handler_async
from ._matchmaking import queue_session_handler
from ._matchmaking import queue_session_handler_async
from ._matchmaking import rebalance
from ._matchmaking import rebalance_async
from ._matchmaking import search_sessions
from ._matchmaking import search_sessions_async
from ._matchmaking import search_sessions_v2
from ._matchmaking import search_sessions_v2_async
from ._matchmaking import store_match_results
from ._matchmaking import store_match_results_async
from ._matchmaking import update_matchmaking_channel
from ._matchmaking import update_matchmaking_channel_async
from ._matchmaking_operations import get_healthcheck_info
from ._matchmaking_operations import get_healthcheck_info_async
from ._matchmaking_operations import handler_v3_healthz
from ._matchmaking_operations import handler_v3_healthz_async
from ._matchmaking_operations import public_get_messages
from ._matchmaking_operations import public_get_messages_async
from ._matchmaking_operations import version_check_handler
from ._matchmaking_operations import version_check_handler_async
from ._social_matchmaking import update_play_time_weight
from ._social_matchmaking import update_play_time_weight_async | accelbyte_py_sdk/api/matchmaking/wrappers/__init__.py |
# template file: justice_py_sdk_codegen/__main__.py
"""Auto-generated package that contains models used by the Justice Matchmaking Service."""
__version__ = "2.15.1"
__author__ = "AccelByte"
__email__ = "<EMAIL>"
# pylint: disable=line-too-long
from ._matchmaking import add_user_into_session_in_channel
from ._matchmaking import add_user_into_session_in_channel_async
from ._matchmaking import bulk_get_sessions
from ._matchmaking import bulk_get_sessions_async
from ._matchmaking import create_channel_handler
from ._matchmaking import create_channel_handler_async
from ._matchmaking import delete_channel_handler
from ._matchmaking import delete_channel_handler_async
from ._matchmaking import delete_session_in_channel
from ._matchmaking import delete_session_in_channel_async
from ._matchmaking import delete_user_from_session_in_channel
from ._matchmaking import delete_user_from_session_in_channel_async
from ._matchmaking import dequeue_session_handler
from ._matchmaking import dequeue_session_handler_async
from ._matchmaking import export_channels
from ._matchmaking import export_channels_async
from ._matchmaking import get_all_channels_handler
from ._matchmaking import get_all_channels_handler_async
from ._matchmaking import get_all_party_in_all_channel
from ._matchmaking import get_all_party_in_all_channel_async
from ._matchmaking import get_all_party_in_channel
from ._matchmaking import get_all_party_in_channel_async
from ._matchmaking import get_all_sessions_in_channel
from ._matchmaking import get_all_sessions_in_channel_async
from ._matchmaking import get_session_history_detailed
from ._matchmaking import get_session_history_detailed_async
from ._matchmaking import get_single_matchmaking_channel
from ._matchmaking import get_single_matchmaking_channel_async
from ._matchmaking import import_channels
from ._matchmaking import import_channels_async
from ._matchmaking import public_get_all_matchmaking_channel
from ._matchmaking import public_get_all_matchmaking_channel_async
from ._matchmaking import public_get_single_matchmaking_channel
from ._matchmaking import public_get_single_matchmaking_channel_async
from ._matchmaking import query_session_handler
from ._matchmaking import query_session_handler_async
from ._matchmaking import queue_session_handler
from ._matchmaking import queue_session_handler_async
from ._matchmaking import rebalance
from ._matchmaking import rebalance_async
from ._matchmaking import search_sessions
from ._matchmaking import search_sessions_async
from ._matchmaking import search_sessions_v2
from ._matchmaking import search_sessions_v2_async
from ._matchmaking import store_match_results
from ._matchmaking import store_match_results_async
from ._matchmaking import update_matchmaking_channel
from ._matchmaking import update_matchmaking_channel_async
from ._matchmaking_operations import get_healthcheck_info
from ._matchmaking_operations import get_healthcheck_info_async
from ._matchmaking_operations import handler_v3_healthz
from ._matchmaking_operations import handler_v3_healthz_async
from ._matchmaking_operations import public_get_messages
from ._matchmaking_operations import public_get_messages_async
from ._matchmaking_operations import version_check_handler
from ._matchmaking_operations import version_check_handler_async
from ._social_matchmaking import update_play_time_weight
from ._social_matchmaking import update_play_time_weight_async | 0.508788 | 0.037047 |
from unittest import TestCase
from datetime import date
from app.domain.model import (
Currency,
Category,
Account,
Operation
)
class TestAccount(TestCase):
def test_hashes_must_be_identical(self):
account = Account("abc", Currency.EUR, [])
self.assertEqual(hash(account), hash("abc"))
def test_add_operation_must_append_to_account(self):
my_account = Account("uuid", Currency.EUR, [])
t1 = Operation("my operation", date(2022,1,26), -12345.90, Currency.EUR, Category.HOUSING)
my_account.add_operation(t1)
self.assertEqual(my_account.operations, [t1])
def test_add_operation_must_raise_exception(self):
my_account = Account("uuid", Currency.USD, [])
with self.assertRaises(ValueError):
new_op = Operation("my operation", date(2022,1,26), -12345.90, 'EUR', 'HOUSING')
my_account.add_operation(new_op)
def test_compute_balance_must_return_value(self):
t1 = Operation(
"My operation one",
date.today(),
12.36,
Currency.EUR
)
t2 = Operation(
"My operation two",
date.today(),
29.78,
Currency.EUR
)
my_account = Account("uuid", Currency.EUR, [t1, t2])
self.assertEqual(my_account.compute_balance(), t1.value + t2.value)
def test_compute_balance_category_must_return_value(self):
t1 = Operation(
"My operation one",
date.today(),
1290.36,
Currency.EUR,
Category.SALARY
)
t2 = Operation(
"My operation two",
date.today(),
29.78,
Currency.EUR,
Category.HOBBIES_SPORT
)
t3 = Operation(
"My operation three",
date.today(),
4.99,
Currency.EUR,
Category.HOBBIES_SPORT
)
my_account = Account("uuid", Currency.EUR, [t1, t2, t3])
self.assertEqual(my_account.compute_category_balance(Category.SALARY), t1.value)
self.assertEqual(my_account.compute_category_balance(Category.HOBBIES_SPORT), t2.value + t3.value)
self.assertEqual(my_account.compute_category_balance(Category.HOUSING), 0.0, Currency.EUR) | tirelire-account/tests/unit/tests_account.py | from unittest import TestCase
from datetime import date
from app.domain.model import (
Currency,
Category,
Account,
Operation
)
class TestAccount(TestCase):
def test_hashes_must_be_identical(self):
account = Account("abc", Currency.EUR, [])
self.assertEqual(hash(account), hash("abc"))
def test_add_operation_must_append_to_account(self):
my_account = Account("uuid", Currency.EUR, [])
t1 = Operation("my operation", date(2022,1,26), -12345.90, Currency.EUR, Category.HOUSING)
my_account.add_operation(t1)
self.assertEqual(my_account.operations, [t1])
def test_add_operation_must_raise_exception(self):
my_account = Account("uuid", Currency.USD, [])
with self.assertRaises(ValueError):
new_op = Operation("my operation", date(2022,1,26), -12345.90, 'EUR', 'HOUSING')
my_account.add_operation(new_op)
def test_compute_balance_must_return_value(self):
t1 = Operation(
"My operation one",
date.today(),
12.36,
Currency.EUR
)
t2 = Operation(
"My operation two",
date.today(),
29.78,
Currency.EUR
)
my_account = Account("uuid", Currency.EUR, [t1, t2])
self.assertEqual(my_account.compute_balance(), t1.value + t2.value)
def test_compute_balance_category_must_return_value(self):
t1 = Operation(
"My operation one",
date.today(),
1290.36,
Currency.EUR,
Category.SALARY
)
t2 = Operation(
"My operation two",
date.today(),
29.78,
Currency.EUR,
Category.HOBBIES_SPORT
)
t3 = Operation(
"My operation three",
date.today(),
4.99,
Currency.EUR,
Category.HOBBIES_SPORT
)
my_account = Account("uuid", Currency.EUR, [t1, t2, t3])
self.assertEqual(my_account.compute_category_balance(Category.SALARY), t1.value)
self.assertEqual(my_account.compute_category_balance(Category.HOBBIES_SPORT), t2.value + t3.value)
self.assertEqual(my_account.compute_category_balance(Category.HOUSING), 0.0, Currency.EUR) | 0.760917 | 0.45944 |
"""Implements user interface."""
import os
import argparse
from six.moves import input, configparser
from . import core
def _exp_path(path):
return os.path.abspath(os.path.expanduser(path))
if __name__ == '__main__':
# check config for hanger
config = configparser.RawConfigParser()
dot = os.path.join(os.path.expanduser('~'), '.jetpack')
config.read(dot)
hanger = None
hanger_str = ''
try:
hanger = config.get('path', 'hanger')
hanger_str = ' (default: {})'.format(hanger)
except configparser.NoSectionError:
config.add_section('path')
except configparser.NoOptionError:
pass
# init arg parser
description = 'A utility for building jetpack templates.'
epilog = 'hanger cached in ~/.jetpack'
parser = argparse.ArgumentParser('jetpack', description=description,
epilog=epilog)
parser.add_argument('pack',
help='pack name')
parser.add_argument('-s', metavar='hanger', dest='hanger',
help='hanger directory{}'.format(hanger_str))
parser.add_argument('-d', metavar='destination', dest='dest',
help='destination directory (default: current ' \
'directory)')
kwargs = vars(parser.parse_args())
# validate hanger
update_config = True
if kwargs.get('hanger'): # hanger set in terminal
kwargs['hanger'] = _exp_path(kwargs.get('hanger'))
elif hanger: # hanger set in config
kwargs['hanger'] = _exp_path(hanger)
print('hanger: {} (default)'.format(kwargs['hanger']))
update_config = False
else: # hanger set interactively
kwargs['hanger'] = _exp_path(input('hanger: '))
# update config
if update_config:
config.set('path', 'hanger', kwargs.get('hanger'))
with open(dot, 'w') as f:
config.write(f)
# validate dest
if kwargs.get('dest'):
kwargs['dest'] = _exp_path(kwargs.get('dest'))
else:
kwargs['dest'] = _exp_path(os.curdir)
# collect kwargs
kwargs['name'] = input('name: ')
kwargs['description'] = input('description: ')
core.launch(**kwargs) | jetpack/ui.py | """Implements user interface."""
import os
import argparse
from six.moves import input, configparser
from . import core
def _exp_path(path):
return os.path.abspath(os.path.expanduser(path))
if __name__ == '__main__':
# check config for hanger
config = configparser.RawConfigParser()
dot = os.path.join(os.path.expanduser('~'), '.jetpack')
config.read(dot)
hanger = None
hanger_str = ''
try:
hanger = config.get('path', 'hanger')
hanger_str = ' (default: {})'.format(hanger)
except configparser.NoSectionError:
config.add_section('path')
except configparser.NoOptionError:
pass
# init arg parser
description = 'A utility for building jetpack templates.'
epilog = 'hanger cached in ~/.jetpack'
parser = argparse.ArgumentParser('jetpack', description=description,
epilog=epilog)
parser.add_argument('pack',
help='pack name')
parser.add_argument('-s', metavar='hanger', dest='hanger',
help='hanger directory{}'.format(hanger_str))
parser.add_argument('-d', metavar='destination', dest='dest',
help='destination directory (default: current ' \
'directory)')
kwargs = vars(parser.parse_args())
# validate hanger
update_config = True
if kwargs.get('hanger'): # hanger set in terminal
kwargs['hanger'] = _exp_path(kwargs.get('hanger'))
elif hanger: # hanger set in config
kwargs['hanger'] = _exp_path(hanger)
print('hanger: {} (default)'.format(kwargs['hanger']))
update_config = False
else: # hanger set interactively
kwargs['hanger'] = _exp_path(input('hanger: '))
# update config
if update_config:
config.set('path', 'hanger', kwargs.get('hanger'))
with open(dot, 'w') as f:
config.write(f)
# validate dest
if kwargs.get('dest'):
kwargs['dest'] = _exp_path(kwargs.get('dest'))
else:
kwargs['dest'] = _exp_path(os.curdir)
# collect kwargs
kwargs['name'] = input('name: ')
kwargs['description'] = input('description: ')
core.launch(**kwargs) | 0.452536 | 0.0704 |
__version__ = "0.1.0"
import os
from datetime import date, datetime, timedelta
import connexion
from flask.templating import render_template
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
# @connex_app.route("/api/v1.0/precipitation")
def precipitation():
db.create_all()
precipitation = Measurement.query.all()
print(type(precipitation))
precipitation_schema = MeasurementSchema(many=True)
print(type(precipitation_schema))
prep_sch_json = precipitation_schema.dump(precipitation)
print(type(prep_sch_json))
return prep_sch_json
# @connex_app.route("/api/v1.0/stations")
def stations():
return
# @connex_app.route("/api/v1.0/tobs")
def temp_monthly():
return
# @connex_app.route("/api/v1.0/temp/<start>")
# @connex_app.route("/api/v1.0/temp/<start>/<end>")
def stats():
return
# Create the connexion application instance
connex_app = connexion.FlaskApp(__name__)
# Read the openapi.yaml file to configure the endpoints
connex_app.add_api("openapi.yaml")
# Get the underlying Flask app instance
app = connex_app.app
# basedir = os.path.abspath(os.path.dirname(__file__))
# # Build the Sqlite ULR for SQLAlchemy
# sqlite_url = "sqlite:////" + os.path.join(basedir, "hawaii.db")
# Configure the SQLAlchemy part of the app instance
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///notebook/hawaii.db"
app.config["SQLALCHEMY_ECHO"] = True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
# Flask-SQLAlchemy makes using the database outside of a Flask context difficult.
# I ran into this issue when I wanted to still use my SQLAlchemy scripts in FastAPI.
# Create the SQLAlchemy db instance
db = SQLAlchemy(app)
class Measurement(db.Model):
__tablename__ = "measurement"
id = db.Column(db.Integer, primary_key=True)
station = db.Column(db.String)
date = db.Column(db.String)
prcp = db.Column(db.Float)
tobs = db.Column(db.Float)
class Station(db.Model):
__tablename__ = "station"
id = db.Column(db.Integer, primary_key=True)
station = db.Column(db.String)
name = db.Column(db.String)
latitude = db.Column(db.Float)
longitude = db.Column(db.Float)
elevation = db.Column(db.Float)
# Initialize Marshmallow
ma = Marshmallow(app)
class MeasurementSchema(ma.SQLAlchemySchema):
class Meta:
model = Measurement
sqla_session = db.session
id = ma.auto_field()
station = ma.auto_field()
date = ma.auto_field()
prcp = ma.auto_field()
tobs = ma.auto_field()
class StationSchema(ma.SQLAlchemySchema):
class Meta:
model = Station
sqla_session = db.session
id = ma.auto_field()
station = ma.auto_field()
name = ma.auto_field()
latitude = ma.auto_field()
longitude = ma.auto_field()
elevation = ma.auto_field()
db.init_app(app)
# Create a URL route in our application for "/"
@connex_app.route("/")
def index():
"""
This function just responds to the browser URL
localhost:5000/
:return: the rendered template "index.html"
"""
return render_template("index.html")
if __name__ == "__main__":
connex_app.run(debug=True) | app_refactored.py | __version__ = "0.1.0"
import os
from datetime import date, datetime, timedelta
import connexion
from flask.templating import render_template
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
# @connex_app.route("/api/v1.0/precipitation")
def precipitation():
db.create_all()
precipitation = Measurement.query.all()
print(type(precipitation))
precipitation_schema = MeasurementSchema(many=True)
print(type(precipitation_schema))
prep_sch_json = precipitation_schema.dump(precipitation)
print(type(prep_sch_json))
return prep_sch_json
# @connex_app.route("/api/v1.0/stations")
def stations():
return
# @connex_app.route("/api/v1.0/tobs")
def temp_monthly():
return
# @connex_app.route("/api/v1.0/temp/<start>")
# @connex_app.route("/api/v1.0/temp/<start>/<end>")
def stats():
return
# Create the connexion application instance
connex_app = connexion.FlaskApp(__name__)
# Read the openapi.yaml file to configure the endpoints
connex_app.add_api("openapi.yaml")
# Get the underlying Flask app instance
app = connex_app.app
# basedir = os.path.abspath(os.path.dirname(__file__))
# # Build the Sqlite ULR for SQLAlchemy
# sqlite_url = "sqlite:////" + os.path.join(basedir, "hawaii.db")
# Configure the SQLAlchemy part of the app instance
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///notebook/hawaii.db"
app.config["SQLALCHEMY_ECHO"] = True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
# Flask-SQLAlchemy makes using the database outside of a Flask context difficult.
# I ran into this issue when I wanted to still use my SQLAlchemy scripts in FastAPI.
# Create the SQLAlchemy db instance
db = SQLAlchemy(app)
class Measurement(db.Model):
__tablename__ = "measurement"
id = db.Column(db.Integer, primary_key=True)
station = db.Column(db.String)
date = db.Column(db.String)
prcp = db.Column(db.Float)
tobs = db.Column(db.Float)
class Station(db.Model):
__tablename__ = "station"
id = db.Column(db.Integer, primary_key=True)
station = db.Column(db.String)
name = db.Column(db.String)
latitude = db.Column(db.Float)
longitude = db.Column(db.Float)
elevation = db.Column(db.Float)
# Initialize Marshmallow
ma = Marshmallow(app)
class MeasurementSchema(ma.SQLAlchemySchema):
class Meta:
model = Measurement
sqla_session = db.session
id = ma.auto_field()
station = ma.auto_field()
date = ma.auto_field()
prcp = ma.auto_field()
tobs = ma.auto_field()
class StationSchema(ma.SQLAlchemySchema):
class Meta:
model = Station
sqla_session = db.session
id = ma.auto_field()
station = ma.auto_field()
name = ma.auto_field()
latitude = ma.auto_field()
longitude = ma.auto_field()
elevation = ma.auto_field()
db.init_app(app)
# Create a URL route in our application for "/"
@connex_app.route("/")
def index():
"""
This function just responds to the browser URL
localhost:5000/
:return: the rendered template "index.html"
"""
return render_template("index.html")
if __name__ == "__main__":
connex_app.run(debug=True) | 0.419767 | 0.090494 |
"""Function implementation"""
import datetime
import logging
from resilient_lib import validate_fields, RequestsCommon
from fn_create_webex_meeting.lib.cisco_api import WebexAPI
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
PACKAGE_NAME = "fn_create_webex_meeting"
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
def selftest_function(opts):
"""
Placeholder for selftest function. An example use would be to test package api connectivity.
Suggested return values are be unimplemented, success, or failure.
"""
options = opts.get(PACKAGE_NAME, {})
required_fields = ["webex_email", "webex_password", "webex_site_url", "webex_timezone"]
validate_fields(required_fields, options)
opts = dict()
opts["rc"] = RequestsCommon(opts, options)
opts["webex_site_url"] = options.get("webex_site_url")
opts["email"] = options.get("webex_email")
opts["password"] = options.get("webex_password")
opts["sitename"] = options.get("webex_site")
opts["timezone"] = options.get("webex_timezone")
opts["meeting_password"] = "<PASSWORD>#"
opts["meeting_name"] = "SelfTest Meeting"
opts["meeting_agenda"] = "Agenda"
# compute meeting start/end time for 1 day in the future (in epoch)
now = datetime.datetime.utcnow()
meeting_start = now + datetime.timedelta(days=1)
meeting_end = meeting_start + datetime.timedelta(minutes= 10)
webex_meeting_start_time = int(meeting_start.timestamp()*1000)
webex_meeting_end_time = int(meeting_end.timestamp()*1000)
try:
webex = WebexAPI(opts, webex_meeting_start_time, webex_meeting_end_time)
response = webex.create_meeting()
if response.get("status") == "SUCCESS":
return {"state": "success",
"reason": "success"}
else:
return {"state": "failure",
"reason": response.get("fail_reason")}
except Exception as err:
return {"state": "failure",
"reason": err} | fn_create_webex_meeting/fn_create_webex_meeting/util/selftest.py | """Function implementation"""
import datetime
import logging
from resilient_lib import validate_fields, RequestsCommon
from fn_create_webex_meeting.lib.cisco_api import WebexAPI
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
PACKAGE_NAME = "fn_create_webex_meeting"
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
def selftest_function(opts):
"""
Placeholder for selftest function. An example use would be to test package api connectivity.
Suggested return values are be unimplemented, success, or failure.
"""
options = opts.get(PACKAGE_NAME, {})
required_fields = ["webex_email", "webex_password", "webex_site_url", "webex_timezone"]
validate_fields(required_fields, options)
opts = dict()
opts["rc"] = RequestsCommon(opts, options)
opts["webex_site_url"] = options.get("webex_site_url")
opts["email"] = options.get("webex_email")
opts["password"] = options.get("webex_password")
opts["sitename"] = options.get("webex_site")
opts["timezone"] = options.get("webex_timezone")
opts["meeting_password"] = "<PASSWORD>#"
opts["meeting_name"] = "SelfTest Meeting"
opts["meeting_agenda"] = "Agenda"
# compute meeting start/end time for 1 day in the future (in epoch)
now = datetime.datetime.utcnow()
meeting_start = now + datetime.timedelta(days=1)
meeting_end = meeting_start + datetime.timedelta(minutes= 10)
webex_meeting_start_time = int(meeting_start.timestamp()*1000)
webex_meeting_end_time = int(meeting_end.timestamp()*1000)
try:
webex = WebexAPI(opts, webex_meeting_start_time, webex_meeting_end_time)
response = webex.create_meeting()
if response.get("status") == "SUCCESS":
return {"state": "success",
"reason": "success"}
else:
return {"state": "failure",
"reason": response.get("fail_reason")}
except Exception as err:
return {"state": "failure",
"reason": err} | 0.41182 | 0.164047 |
from django.db import models
# Create your models here.
class MainModel(models.Model):
title = models.CharField('title', db_column='title', max_length=32, null=False, blank=False)
subtitle = models.CharField('subtitle', db_column='subtitle', max_length=128, null=False, blank=False)
class Meta:
managed = True
db_table = 'main'
verbose_name = 'Main'
verbose_name_plural = 'MainItems'
def __str__(self):
return self.title
class PersonModel(models.Model):
name = models.CharField('name', db_column='name', max_length=32, null=False, blank=False)
age = models.IntegerField('age', db_column='age', null=True, blank=True, default=0)
class Meta:
managed = True
db_table = 'person'
verbose_name = 'Person'
verbose_name_plural = 'People'
def __str__(self):
return self.name
class BookModel(models.Model):
name = models.CharField('name', db_column='name', max_length=64, null=False, blank=False)
class Meta:
managed = True
db_table = 'book'
verbose_name = 'Book'
verbose_name_plural = 'Books'
def __str__(self):
return self.name
class Prop1(models.Model):
key = models.CharField('key', db_column='key', max_length=32, null=False, blank=False)
value = models.CharField('value', db_column='value', max_length=32, null=False, blank=False)
class Meta:
managed = True
db_table = 'prop1'
verbose_name = 'Prop1'
verbose_name_plural = 'Prop1'
def __str__(self):
return self.key
class Prop2(models.Model):
key = models.CharField('key', db_column='key', max_length=32, null=False, blank=False)
value = models.CharField('value', db_column='value', max_length=32, null=False, blank=False)
class Meta:
managed = True
db_table = 'prop2'
verbose_name = 'Prop2'
verbose_name_plural = 'Prop2'
def __str__(self):
return self.key
class Config(models.Model):
key = models.CharField('key', db_column='key', max_length=32, null=False, blank=False)
value = models.CharField('value', db_column='value', max_length=32, null=False, blank=False)
class Meta:
managed = True
db_table = 'config'
verbose_name = 'Config'
verbose_name_plural = 'Configs'
def __str__(self):
return self.key | demo/models.py | from django.db import models
# Create your models here.
class MainModel(models.Model):
title = models.CharField('title', db_column='title', max_length=32, null=False, blank=False)
subtitle = models.CharField('subtitle', db_column='subtitle', max_length=128, null=False, blank=False)
class Meta:
managed = True
db_table = 'main'
verbose_name = 'Main'
verbose_name_plural = 'MainItems'
def __str__(self):
return self.title
class PersonModel(models.Model):
name = models.CharField('name', db_column='name', max_length=32, null=False, blank=False)
age = models.IntegerField('age', db_column='age', null=True, blank=True, default=0)
class Meta:
managed = True
db_table = 'person'
verbose_name = 'Person'
verbose_name_plural = 'People'
def __str__(self):
return self.name
class BookModel(models.Model):
name = models.CharField('name', db_column='name', max_length=64, null=False, blank=False)
class Meta:
managed = True
db_table = 'book'
verbose_name = 'Book'
verbose_name_plural = 'Books'
def __str__(self):
return self.name
class Prop1(models.Model):
key = models.CharField('key', db_column='key', max_length=32, null=False, blank=False)
value = models.CharField('value', db_column='value', max_length=32, null=False, blank=False)
class Meta:
managed = True
db_table = 'prop1'
verbose_name = 'Prop1'
verbose_name_plural = 'Prop1'
def __str__(self):
return self.key
class Prop2(models.Model):
key = models.CharField('key', db_column='key', max_length=32, null=False, blank=False)
value = models.CharField('value', db_column='value', max_length=32, null=False, blank=False)
class Meta:
managed = True
db_table = 'prop2'
verbose_name = 'Prop2'
verbose_name_plural = 'Prop2'
def __str__(self):
return self.key
class Config(models.Model):
key = models.CharField('key', db_column='key', max_length=32, null=False, blank=False)
value = models.CharField('value', db_column='value', max_length=32, null=False, blank=False)
class Meta:
managed = True
db_table = 'config'
verbose_name = 'Config'
verbose_name_plural = 'Configs'
def __str__(self):
return self.key | 0.605566 | 0.058158 |
from functools import lru_cache
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from treebeard.mp_tree import MP_Node
from wagtail.core import hooks
from wagtail.core.models import Page
from .field_adapters import adapter_registry
from .models import get_base_model
def _get_subclasses_recurse(model):
"""
Given a Model class, find all related objects, exploring children
recursively, returning a `list` of strings representing the
relations for select_related, adapted from https://github.com/jazzband/django-model-utils/blob/master/model_utils/managers.py
"""
related_objects = [f for f in model._meta.get_fields() if isinstance(f, models.OneToOneRel)]
rels = [
rel for rel in related_objects
if isinstance(rel.field, models.OneToOneField)
and issubclass(rel.field.model, model)
and model is not rel.field.model
and rel.parent_link
]
subclasses = []
for rel in rels:
for subclass in _get_subclasses_recurse(rel.field.model):
subclasses.append(
rel.get_accessor_name() + LOOKUP_SEP + subclass)
subclasses.append(rel.get_accessor_name())
return subclasses
def _get_sub_obj_recurse(obj, s):
"""
Given an object and its potential subclasses in lookup string form,
retrieve its most specific subclass recursively
Taken from: https://github.com/jazzband/django-model-utils/blob/master/model_utils/managers.py
"""
rel, _, s = s.partition(LOOKUP_SEP)
try:
node = getattr(obj, rel)
except ObjectDoesNotExist:
return None
if s:
child = _get_sub_obj_recurse(node, s)
return child
else:
return node
def get_subclass_instances(instances, subclasses):
subclass_instances = []
for obj in instances:
sub_obj = None
for s in subclasses:
sub_obj = _get_sub_obj_recurse(obj, s)
if sub_obj:
break
if not sub_obj:
sub_obj = obj
subclass_instances.append(sub_obj)
return subclass_instances
class ModelSerializer:
ignored_fields = []
def __init__(self, model):
self.model = model
self.base_model = get_base_model(model)
field_adapters = []
adapter_managed_fields = []
for field in self.model._meta.get_fields():
if field.name in self.ignored_fields:
continue
# ignore primary keys (including MTI parent pointers)
if getattr(field, 'primary_key', False):
continue
adapter = adapter_registry.get_field_adapter(field)
if adapter:
adapter_managed_fields = adapter_managed_fields + adapter.get_managed_fields()
field_adapters.append(adapter)
self.field_adapters = [adapter for adapter in field_adapters if adapter.name not in adapter_managed_fields]
def get_objects_by_ids(self, ids):
"""
Given a list of IDs, return a list of model instances that we can
run serialize and get_object_references on, fetching the specific subclasses
if using multi table inheritance as appropriate
"""
base_queryset = self.model.objects.filter(pk__in=ids)
subclasses = _get_subclasses_recurse(self.model)
return get_subclass_instances(base_queryset, subclasses)
def serialize_fields(self, instance):
return {
field_adapter.name: field_adapter.serialize(instance)
for field_adapter in self.field_adapters
}
def serialize(self, instance):
return {
'model': self.model._meta.label_lower,
'pk': instance.pk,
'fields': self.serialize_fields(instance)
}
def get_object_references(self, instance):
refs = {
# always include the primary key as an object reference
(self.base_model, instance.pk)
}
for f in self.field_adapters:
refs.update(f.get_object_references(instance))
return refs
def get_objects_to_serialize(self, instance):
objects = set()
for f in self.field_adapters:
objects.update(f.get_objects_to_serialize(instance))
return objects
class TreeModelSerializer(ModelSerializer):
ignored_fields = ['path', 'depth', 'numchild']
def serialize(self, instance):
result = super().serialize(instance)
if instance.is_root():
result['parent_id'] = None
else:
result['parent_id'] = instance.get_parent().pk
return result
def get_object_references(self, instance):
refs = super().get_object_references(instance)
if not instance.is_root():
# add a reference for the parent ID
refs.add(
(self.base_model, instance.get_parent().pk)
)
return refs
class PageSerializer(TreeModelSerializer):
ignored_fields = TreeModelSerializer.ignored_fields + [
'url_path', 'content_type', 'draft_title', 'has_unpublished_changes', 'owner',
'go_live_at', 'expire_at', 'expired', 'locked', 'first_published_at', 'last_published_at',
'latest_revision_created_at', 'live_revision',
]
def get_objects_by_ids(self, ids):
# serialize method needs the instance in its specific form
return self.model.objects.filter(pk__in=ids).specific()
class SerializerRegistry:
BASE_SERIALIZERS_BY_MODEL_CLASS = {
models.Model: ModelSerializer,
MP_Node: TreeModelSerializer,
Page: PageSerializer,
}
def __init__(self):
self._scanned_for_serializers = False
self.serializers_by_model_class = {}
def _scan_for_serializers(self):
serializers = dict(self.BASE_SERIALIZERS_BY_MODEL_CLASS)
for fn in hooks.get_hooks('register_custom_serializers'):
serializers.update(fn())
self.serializers_by_model_class = serializers
self._scanned_for_serializers = True
@lru_cache(maxsize=None)
def get_model_serializer(self, model):
# find the serializer class for the most specific class in the model's inheritance tree
if not self._scanned_for_serializers:
self._scan_for_serializers()
for cls in model.__mro__:
if cls in self.serializers_by_model_class:
serializer_class = self.serializers_by_model_class[cls]
return serializer_class(model)
serializer_registry = SerializerRegistry() | wagtail_transfer/serializers.py | from functools import lru_cache
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from treebeard.mp_tree import MP_Node
from wagtail.core import hooks
from wagtail.core.models import Page
from .field_adapters import adapter_registry
from .models import get_base_model
def _get_subclasses_recurse(model):
"""
Given a Model class, find all related objects, exploring children
recursively, returning a `list` of strings representing the
relations for select_related, adapted from https://github.com/jazzband/django-model-utils/blob/master/model_utils/managers.py
"""
related_objects = [f for f in model._meta.get_fields() if isinstance(f, models.OneToOneRel)]
rels = [
rel for rel in related_objects
if isinstance(rel.field, models.OneToOneField)
and issubclass(rel.field.model, model)
and model is not rel.field.model
and rel.parent_link
]
subclasses = []
for rel in rels:
for subclass in _get_subclasses_recurse(rel.field.model):
subclasses.append(
rel.get_accessor_name() + LOOKUP_SEP + subclass)
subclasses.append(rel.get_accessor_name())
return subclasses
def _get_sub_obj_recurse(obj, s):
"""
Given an object and its potential subclasses in lookup string form,
retrieve its most specific subclass recursively
Taken from: https://github.com/jazzband/django-model-utils/blob/master/model_utils/managers.py
"""
rel, _, s = s.partition(LOOKUP_SEP)
try:
node = getattr(obj, rel)
except ObjectDoesNotExist:
return None
if s:
child = _get_sub_obj_recurse(node, s)
return child
else:
return node
def get_subclass_instances(instances, subclasses):
subclass_instances = []
for obj in instances:
sub_obj = None
for s in subclasses:
sub_obj = _get_sub_obj_recurse(obj, s)
if sub_obj:
break
if not sub_obj:
sub_obj = obj
subclass_instances.append(sub_obj)
return subclass_instances
class ModelSerializer:
ignored_fields = []
def __init__(self, model):
self.model = model
self.base_model = get_base_model(model)
field_adapters = []
adapter_managed_fields = []
for field in self.model._meta.get_fields():
if field.name in self.ignored_fields:
continue
# ignore primary keys (including MTI parent pointers)
if getattr(field, 'primary_key', False):
continue
adapter = adapter_registry.get_field_adapter(field)
if adapter:
adapter_managed_fields = adapter_managed_fields + adapter.get_managed_fields()
field_adapters.append(adapter)
self.field_adapters = [adapter for adapter in field_adapters if adapter.name not in adapter_managed_fields]
def get_objects_by_ids(self, ids):
"""
Given a list of IDs, return a list of model instances that we can
run serialize and get_object_references on, fetching the specific subclasses
if using multi table inheritance as appropriate
"""
base_queryset = self.model.objects.filter(pk__in=ids)
subclasses = _get_subclasses_recurse(self.model)
return get_subclass_instances(base_queryset, subclasses)
def serialize_fields(self, instance):
return {
field_adapter.name: field_adapter.serialize(instance)
for field_adapter in self.field_adapters
}
def serialize(self, instance):
return {
'model': self.model._meta.label_lower,
'pk': instance.pk,
'fields': self.serialize_fields(instance)
}
def get_object_references(self, instance):
refs = {
# always include the primary key as an object reference
(self.base_model, instance.pk)
}
for f in self.field_adapters:
refs.update(f.get_object_references(instance))
return refs
def get_objects_to_serialize(self, instance):
objects = set()
for f in self.field_adapters:
objects.update(f.get_objects_to_serialize(instance))
return objects
class TreeModelSerializer(ModelSerializer):
ignored_fields = ['path', 'depth', 'numchild']
def serialize(self, instance):
result = super().serialize(instance)
if instance.is_root():
result['parent_id'] = None
else:
result['parent_id'] = instance.get_parent().pk
return result
def get_object_references(self, instance):
refs = super().get_object_references(instance)
if not instance.is_root():
# add a reference for the parent ID
refs.add(
(self.base_model, instance.get_parent().pk)
)
return refs
class PageSerializer(TreeModelSerializer):
ignored_fields = TreeModelSerializer.ignored_fields + [
'url_path', 'content_type', 'draft_title', 'has_unpublished_changes', 'owner',
'go_live_at', 'expire_at', 'expired', 'locked', 'first_published_at', 'last_published_at',
'latest_revision_created_at', 'live_revision',
]
def get_objects_by_ids(self, ids):
# serialize method needs the instance in its specific form
return self.model.objects.filter(pk__in=ids).specific()
class SerializerRegistry:
BASE_SERIALIZERS_BY_MODEL_CLASS = {
models.Model: ModelSerializer,
MP_Node: TreeModelSerializer,
Page: PageSerializer,
}
def __init__(self):
self._scanned_for_serializers = False
self.serializers_by_model_class = {}
def _scan_for_serializers(self):
serializers = dict(self.BASE_SERIALIZERS_BY_MODEL_CLASS)
for fn in hooks.get_hooks('register_custom_serializers'):
serializers.update(fn())
self.serializers_by_model_class = serializers
self._scanned_for_serializers = True
@lru_cache(maxsize=None)
def get_model_serializer(self, model):
# find the serializer class for the most specific class in the model's inheritance tree
if not self._scanned_for_serializers:
self._scan_for_serializers()
for cls in model.__mro__:
if cls in self.serializers_by_model_class:
serializer_class = self.serializers_by_model_class[cls]
return serializer_class(model)
serializer_registry = SerializerRegistry() | 0.774626 | 0.14682 |
import os, sys
import numpy as np
import cv2
import matplotlib.pyplot as plt
from cv2_custom.marking import cv2_draw_label
from cv2_custom.transformation import scale_image
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QLineEdit, QInputDialog)
from config import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--camid', type=int, default=1)
parser.add_argument('--runid', type=int, default=1)
parser.add_argument('--start_frame', type=int, default=0)
parser.add_argument('--end_frame', type=int, default=-1)
args = parser.parse_args()
CAMID = args.camid
RUNID = args.runid
# BR2 Configuration
NUM_RING = 5
NUM_POINT = 9
RING_CHAR = 'R'
# Path
path = POSTPROCESSING_PATH
video_path = PREPROCESSED_FOOTAGE_VIDEO_PATH.format(CAMID, RUNID)
video_name = os.path.basename(video_path)
initial_point_file = TRACKING_FILE.format(CAMID, RUNID) # Initial point shared by experiment (save path)
# Set Colors
np.random.seed(100)
color = np.random.randint(0,235,(100,3)).astype(int)
# Capture Video
cap = cv2.VideoCapture(os.path.join(path, video_name))
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if args.start_frame == -1:
setattr(args, 'start_frame', length-1)
#args.start_frame = length-1
cap.set(cv2.CAP_PROP_POS_FRAMES, args.start_frame)
ret, curr_frame = cap.read()
if args.end_frame == -1:
args.end_frame = length
assert args.start_frame < length
app = QApplication(sys.argv)
tags = []
points = []
# Mouse Handle
prev_tag = ''
def mouse_event_click_point(event, x, y, flags, param):
global prev_tag
points = param['points']
tags = param['tags']
bypass_inquiry = (flags & cv2.EVENT_FLAG_CTRLKEY)
if event == cv2.EVENT_LBUTTONDOWN:
point = np.array([x,y], dtype=np.int32).reshape([1,2])
elif event == cv2.EVENT_RBUTTONDOWN:
# Second zoom-layer selection
uv = zoomed_inquiry(param['frame'], np.array([x,y]))
point = uv.astype(np.int32).reshape([1,2])
else:
return
points.append(point)
# Ask for a tag in a separate window
if bypass_inquiry:
tag = prev_tag
else:
_ok = False
while not _ok:
tag, _ok = QInputDialog.getText(QWidget(), 'Tag', 'Input Tag', text=prev_tag)
if tag[0] == 'R':
prev_tag = tag[0] + str(int(tag.split('-')[0][1:])+1) + '-' + str(int(tag.split('-')[1]))
else:
prev_tag = tag
tags.append(tag)
print('added: ')
print(point, tag)
def zoomed_inquiry(current_frame, uv, scale=5.0, disp_h=80, disp_w=80):
x, y = uv
x = int(x)
y = int(y)
# Region of interest display
window_name_roi = 'roi'
cv2.namedWindow(window_name_roi)
disp_img_roi = current_frame.copy()
disp_img_roi = cv2.rectangle(disp_img_roi, (x-disp_w//2,y-disp_h//2), (x+disp_w//2,y+disp_h//2), (0,0,255), thickness=3)
cv2.imshow(window_name_roi, disp_img_roi)
# Transformation
img = current_frame.copy()
padded_img = cv2.copyMakeBorder(img, disp_h//2, disp_h//2, disp_w//2, disp_w//2, cv2.BORDER_CONSTANT, value=[0,0,0])
scaled_img = scale_image(padded_img[y:y+disp_h, x:x+disp_w], scale)
_x = int(disp_w * scale / 2)
_y = int(disp_h * scale / 2)
_uv = np.array([_x, _y])
# Implement mouse event for clicking other point
original_uv = _uv.copy()
def onMouse(event, x, y, flags, param):
uv = param['uv']
original_uv = param['original_uv']
if event == cv2.EVENT_LBUTTONDOWN:
uv[0] = x
uv[1] = y
elif event == cv2.EVENT_RBUTTONDOWN:
# Return original uv
uv[:] = original_uv
# Inquiry Loop
inquiry_on = True
window_name = 'select reappeared point'
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, onMouse, param={'uv':_uv, 'original_uv':original_uv})
while inquiry_on:
disp_img = scaled_img.copy()
# Draw cross with exact center _uv
disp_img[_uv[1]:_uv[1]+1,:] = np.array([0,0,235])
disp_img[:,_uv[0]:_uv[0]+1] = np.array([0,0,235])
cv2.imshow(window_name, disp_img)
key = cv2.waitKey(1) & 0xFF
if key == ord("d"): # Cancel: No point found
inquiry_on = False
uv = original_uv
elif key == ord('a'): # Accept: accept change
inquiry_on = False
else:
pass
cv2.destroyWindow(window_name)
cv2.destroyWindow(window_name_roi)
x = int(_uv[0] / scale) + x - disp_w//2
y = int(_uv[1] / scale) + y - disp_h//2
return np.array([x,y], dtype=int)
# Draw
def frame_label(frame, points, tags):
for inx in range(len(points)):
point = tuple(points[inx][0])
tag = tags[inx]
cv2_draw_label(frame, int(point[0]), int(point[1]), tag, fontScale=0.8)
# First-layer Selection
cv2.namedWindow(video_name)
cv2.setMouseCallback(video_name, mouse_event_click_point, param={'frame':curr_frame, 'points':points, 'tags':tags})
while True:
disp_img = curr_frame.copy()
if len(points) > 0:
frame_label(disp_img, points, tags)
cv2.imshow(video_name, disp_img)
key = cv2.waitKey(1)&0xFF
if key == ord("c"):
print('done')
break
elif key == ord("d"):
if len(points) > 0:
points.pop(-1)
tags.pop(-1)
print('deleted')
elif key == ord("p"):
print('check')
print(points)
print(tags)
cv2.destroyAllWindows()
# Load existing points and tags
if os.path.exists(initial_point_file):
data = np.load(initial_point_file, allow_pickle=True)
all_tags = data['tags'].tolist()
all_points = data['points']
inquiry = data['inquiry'].tolist() # list((list(tag), stime, etime))
else:
all_tags = []
for ring_id in range(1, NUM_RING+1):
for point_id in range(NUM_POINT):
all_tags.append(f'{RING_CHAR}{ring_id}-{point_id}')
all_points = np.zeros([length, len(all_tags), 2], dtype=np.float32)
inquiry = []
# Add Flow Queue
sframe = args.start_frame
eframe = args.end_frame
for tag, point in zip(tags, points):
idx = all_tags.index(tag)
all_points[sframe, idx, :] = point
inquiry.append((tags, sframe, eframe))
# Save points
np.savez( initial_point_file, points=all_points, tags=all_tags,
inquiry=inquiry, history=[]) | add_initial_flow_point.py | import os, sys
import numpy as np
import cv2
import matplotlib.pyplot as plt
from cv2_custom.marking import cv2_draw_label
from cv2_custom.transformation import scale_image
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QLineEdit, QInputDialog)
from config import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--camid', type=int, default=1)
parser.add_argument('--runid', type=int, default=1)
parser.add_argument('--start_frame', type=int, default=0)
parser.add_argument('--end_frame', type=int, default=-1)
args = parser.parse_args()
CAMID = args.camid
RUNID = args.runid
# BR2 Configuration
NUM_RING = 5
NUM_POINT = 9
RING_CHAR = 'R'
# Path
path = POSTPROCESSING_PATH
video_path = PREPROCESSED_FOOTAGE_VIDEO_PATH.format(CAMID, RUNID)
video_name = os.path.basename(video_path)
initial_point_file = TRACKING_FILE.format(CAMID, RUNID) # Initial point shared by experiment (save path)
# Set Colors
np.random.seed(100)
color = np.random.randint(0,235,(100,3)).astype(int)
# Capture Video
cap = cv2.VideoCapture(os.path.join(path, video_name))
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if args.start_frame == -1:
setattr(args, 'start_frame', length-1)
#args.start_frame = length-1
cap.set(cv2.CAP_PROP_POS_FRAMES, args.start_frame)
ret, curr_frame = cap.read()
if args.end_frame == -1:
args.end_frame = length
assert args.start_frame < length
app = QApplication(sys.argv)
tags = []
points = []
# Mouse Handle
prev_tag = ''
def mouse_event_click_point(event, x, y, flags, param):
global prev_tag
points = param['points']
tags = param['tags']
bypass_inquiry = (flags & cv2.EVENT_FLAG_CTRLKEY)
if event == cv2.EVENT_LBUTTONDOWN:
point = np.array([x,y], dtype=np.int32).reshape([1,2])
elif event == cv2.EVENT_RBUTTONDOWN:
# Second zoom-layer selection
uv = zoomed_inquiry(param['frame'], np.array([x,y]))
point = uv.astype(np.int32).reshape([1,2])
else:
return
points.append(point)
# Ask for a tag in a separate window
if bypass_inquiry:
tag = prev_tag
else:
_ok = False
while not _ok:
tag, _ok = QInputDialog.getText(QWidget(), 'Tag', 'Input Tag', text=prev_tag)
if tag[0] == 'R':
prev_tag = tag[0] + str(int(tag.split('-')[0][1:])+1) + '-' + str(int(tag.split('-')[1]))
else:
prev_tag = tag
tags.append(tag)
print('added: ')
print(point, tag)
def zoomed_inquiry(current_frame, uv, scale=5.0, disp_h=80, disp_w=80):
x, y = uv
x = int(x)
y = int(y)
# Region of interest display
window_name_roi = 'roi'
cv2.namedWindow(window_name_roi)
disp_img_roi = current_frame.copy()
disp_img_roi = cv2.rectangle(disp_img_roi, (x-disp_w//2,y-disp_h//2), (x+disp_w//2,y+disp_h//2), (0,0,255), thickness=3)
cv2.imshow(window_name_roi, disp_img_roi)
# Transformation
img = current_frame.copy()
padded_img = cv2.copyMakeBorder(img, disp_h//2, disp_h//2, disp_w//2, disp_w//2, cv2.BORDER_CONSTANT, value=[0,0,0])
scaled_img = scale_image(padded_img[y:y+disp_h, x:x+disp_w], scale)
_x = int(disp_w * scale / 2)
_y = int(disp_h * scale / 2)
_uv = np.array([_x, _y])
# Implement mouse event for clicking other point
original_uv = _uv.copy()
def onMouse(event, x, y, flags, param):
uv = param['uv']
original_uv = param['original_uv']
if event == cv2.EVENT_LBUTTONDOWN:
uv[0] = x
uv[1] = y
elif event == cv2.EVENT_RBUTTONDOWN:
# Return original uv
uv[:] = original_uv
# Inquiry Loop
inquiry_on = True
window_name = 'select reappeared point'
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, onMouse, param={'uv':_uv, 'original_uv':original_uv})
while inquiry_on:
disp_img = scaled_img.copy()
# Draw cross with exact center _uv
disp_img[_uv[1]:_uv[1]+1,:] = np.array([0,0,235])
disp_img[:,_uv[0]:_uv[0]+1] = np.array([0,0,235])
cv2.imshow(window_name, disp_img)
key = cv2.waitKey(1) & 0xFF
if key == ord("d"): # Cancel: No point found
inquiry_on = False
uv = original_uv
elif key == ord('a'): # Accept: accept change
inquiry_on = False
else:
pass
cv2.destroyWindow(window_name)
cv2.destroyWindow(window_name_roi)
x = int(_uv[0] / scale) + x - disp_w//2
y = int(_uv[1] / scale) + y - disp_h//2
return np.array([x,y], dtype=int)
# Draw
def frame_label(frame, points, tags):
for inx in range(len(points)):
point = tuple(points[inx][0])
tag = tags[inx]
cv2_draw_label(frame, int(point[0]), int(point[1]), tag, fontScale=0.8)
# First-layer Selection
cv2.namedWindow(video_name)
cv2.setMouseCallback(video_name, mouse_event_click_point, param={'frame':curr_frame, 'points':points, 'tags':tags})
while True:
disp_img = curr_frame.copy()
if len(points) > 0:
frame_label(disp_img, points, tags)
cv2.imshow(video_name, disp_img)
key = cv2.waitKey(1)&0xFF
if key == ord("c"):
print('done')
break
elif key == ord("d"):
if len(points) > 0:
points.pop(-1)
tags.pop(-1)
print('deleted')
elif key == ord("p"):
print('check')
print(points)
print(tags)
cv2.destroyAllWindows()
# Load existing points and tags
if os.path.exists(initial_point_file):
data = np.load(initial_point_file, allow_pickle=True)
all_tags = data['tags'].tolist()
all_points = data['points']
inquiry = data['inquiry'].tolist() # list((list(tag), stime, etime))
else:
all_tags = []
for ring_id in range(1, NUM_RING+1):
for point_id in range(NUM_POINT):
all_tags.append(f'{RING_CHAR}{ring_id}-{point_id}')
all_points = np.zeros([length, len(all_tags), 2], dtype=np.float32)
inquiry = []
# Add Flow Queue
sframe = args.start_frame
eframe = args.end_frame
for tag, point in zip(tags, points):
idx = all_tags.index(tag)
all_points[sframe, idx, :] = point
inquiry.append((tags, sframe, eframe))
# Save points
np.savez( initial_point_file, points=all_points, tags=all_tags,
inquiry=inquiry, history=[]) | 0.292393 | 0.151529 |
import random
import sys
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
PEOPLE_FILE = 'people.txt'
KEY_SIZE = 2048
def encode_int(x):
return '{0}{1}{2}{3}'.format(x % 10, x // 10 % 10, x // 100 % 10, x // 1000 % 10)
def decode_int(b):
return int(b[0]) + int(b[1]) * 10 + int(b[2]) * 100 + int(b[3]) * 1000
def list_people(people_file):
people = []
with open(people_file, 'r') as f:
people = f.readlines()
return [person[:-1] if person[-1] == '\n' else person for person in people]
def open_ticket(ticket):
my_key = None
encrypted_people = []
with open(ticket, 'rb') as f:
cert_length = decode_int(f.read(4))
my_key = serialization.load_pem_private_key(
f.read(cert_length),
password=None,
backend=default_backend()
)
encrypted_people_bytes = f.read()
encrypted_people = [encrypted_people_bytes[i:i + KEY_SIZE // 8] for i in
range(0, len(encrypted_people_bytes), KEY_SIZE // 8)]
found = False
for encrypted_person in encrypted_people:
try:
plaintext = my_key.decrypt(
encrypted_person,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
print('{0}, your person is {1}'.format(ticket.split('.')[0], plaintext))
return
except ValueError:
continue
if found is False:
print('{0}, you got nobody :(\n(Could not find a valid person)'.format(ticket.split('.')[0]))
def cmp_pb_keys(key1, key2):
return key1.public_key().public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo) == \
key2.public_key().public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo)
def write_ticket(person, cert, people):
with open('{0}.ticket'.format(person), 'wb') as f:
f.write(encode_int(len(cert)) + cert + people)
def generate_tickets(people=None):
if people is None:
people = list_people(PEOPLE_FILE)
private_keys = []
crypto_text = []
for line in people:
if line.endswith('\n'):
line = line[:-1]
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=KEY_SIZE,
backend=default_backend()
)
crypto_text.append(private_key.public_key().encrypt(
line,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
))
private_keys.append(private_key)
init_order = [prv_key for prv_key in private_keys]
ok = False
while ok is False:
random.shuffle(private_keys)
ok = True
for x in range(len(init_order)):
if cmp_pb_keys(init_order[x], private_keys[x]):
#print('[DEBUG] Not ok, {0} gets himself'.format(people[x]))
ok = False
for x in range(len(private_keys)):
pem = private_keys[x].private_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PrivateFormat.PKCS8,
encryption_algorithm = serialization.NoEncryption()
)
write_ticket(people[x], pem, b''.join(crypto_text))
if __name__ == '__main__':
if sys.argv[1] == 'gen':
if len(sys.argv) > 3:
generate_tickets(sys.argv[2:])
else:
generate_tickets()
elif sys.argv[1] == 'open':
open_ticket('{0}.ticket'.format(sys.argv[2]))
elif sys.argv[1] == 'test':
generate_tickets(['test1', 'test2', 'test3'])
for x in range(1,4):
open_ticket('test{0}.ticket'.format(x)) | secretize.py | import random
import sys
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
PEOPLE_FILE = 'people.txt'
KEY_SIZE = 2048
def encode_int(x):
return '{0}{1}{2}{3}'.format(x % 10, x // 10 % 10, x // 100 % 10, x // 1000 % 10)
def decode_int(b):
return int(b[0]) + int(b[1]) * 10 + int(b[2]) * 100 + int(b[3]) * 1000
def list_people(people_file):
people = []
with open(people_file, 'r') as f:
people = f.readlines()
return [person[:-1] if person[-1] == '\n' else person for person in people]
def open_ticket(ticket):
my_key = None
encrypted_people = []
with open(ticket, 'rb') as f:
cert_length = decode_int(f.read(4))
my_key = serialization.load_pem_private_key(
f.read(cert_length),
password=None,
backend=default_backend()
)
encrypted_people_bytes = f.read()
encrypted_people = [encrypted_people_bytes[i:i + KEY_SIZE // 8] for i in
range(0, len(encrypted_people_bytes), KEY_SIZE // 8)]
found = False
for encrypted_person in encrypted_people:
try:
plaintext = my_key.decrypt(
encrypted_person,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
print('{0}, your person is {1}'.format(ticket.split('.')[0], plaintext))
return
except ValueError:
continue
if found is False:
print('{0}, you got nobody :(\n(Could not find a valid person)'.format(ticket.split('.')[0]))
def cmp_pb_keys(key1, key2):
return key1.public_key().public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo) == \
key2.public_key().public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo)
def write_ticket(person, cert, people):
with open('{0}.ticket'.format(person), 'wb') as f:
f.write(encode_int(len(cert)) + cert + people)
def generate_tickets(people=None):
if people is None:
people = list_people(PEOPLE_FILE)
private_keys = []
crypto_text = []
for line in people:
if line.endswith('\n'):
line = line[:-1]
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=KEY_SIZE,
backend=default_backend()
)
crypto_text.append(private_key.public_key().encrypt(
line,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
))
private_keys.append(private_key)
init_order = [prv_key for prv_key in private_keys]
ok = False
while ok is False:
random.shuffle(private_keys)
ok = True
for x in range(len(init_order)):
if cmp_pb_keys(init_order[x], private_keys[x]):
#print('[DEBUG] Not ok, {0} gets himself'.format(people[x]))
ok = False
for x in range(len(private_keys)):
pem = private_keys[x].private_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PrivateFormat.PKCS8,
encryption_algorithm = serialization.NoEncryption()
)
write_ticket(people[x], pem, b''.join(crypto_text))
if __name__ == '__main__':
if sys.argv[1] == 'gen':
if len(sys.argv) > 3:
generate_tickets(sys.argv[2:])
else:
generate_tickets()
elif sys.argv[1] == 'open':
open_ticket('{0}.ticket'.format(sys.argv[2]))
elif sys.argv[1] == 'test':
generate_tickets(['test1', 'test2', 'test3'])
for x in range(1,4):
open_ticket('test{0}.ticket'.format(x)) | 0.191139 | 0.195671 |
from uuid import UUID, uuid4
import anvil.users
from anvil.tables import app_tables, in_transaction, order_by
from anvil_extras import logging
from ..historic.events import Change, Creation
from ..historic.exceptions import (
AuthorizationError,
DuplicationError,
InvalidUIDError,
NonExistentError,
ResurrectionError,
)
__version__ = "0.0.1"
LOGGER = logging.Logger("anvil_labs.historic.persistence")
def _default_identifier():
try:
user = anvil.users.get_user()["email"]
except Exception as e:
if isinstance(e, TypeError) or repr(e).startswith("ServiceNotAdded"):
user = None
else:
raise e
return user
class _Authorization:
"""A class to check whether authorization exists for an operation on an object
Attributes
----------
policy: callable
which must take an event instance as its argument and return a bool.
identifier : callable
which must return a string.
"""
def __init__(self):
self.policy = None
self.identifier = None
def check(self, event):
if self.policy is None:
return True
return self.policy(event)
def user_id(self):
if self.identifier is None:
self.identifier = _default_identifier
return self.identifier()
authorization = _Authorization()
def _is_valid_uid(uid):
try:
UUID(uid, version=4)
return True
except ValueError:
return False
def _previous_event(object_id):
"""Find the most recent event record for a given object_id
Parameters
----------
object_id : str
Returns
-------
app_tables.events row
"""
result = None
try:
result = app_tables.events.search(
order_by("event_id", ascending=False), object_id=object_id
)[0]
except IndexError:
pass
if result is not None and result["event_type"] == "termination":
raise ResurrectionError(
f"Object {object_id} was terminated at {result['occurred_at']} "
f"(event {result['event_id']})",
)
return result
def _state_diff(state, previous_state):
"""A dict to show the new, changed and removed attributes between two states
Parameters
----------
state : dict
previous_state : dict
Returns
-------
dict
with keys 'new', 'changed' and 'removed' for each of those with content
"""
new = {k: v for k, v in state.items() if k not in previous_state}
changed = {
k: {"from": previous_state[k], "to": v}
for k, v in state.items()
if k in previous_state and v != previous_state[k]
}
removed = {k: v for k, v in previous_state.items() if k not in state}
diff = {"new": new, "changed": changed, "removed": removed}
result = {k: v for k, v in diff.items() if len(v) > 0}
return result if len(result) > 0 else None
def _record_event(event, record_duplicates, user_id):
"""Write a single event record to the data table
Parameters
----------
event : Event
prevent_duplication : bool
Whether to disallow records where the state is unchanged from previously
"""
if isinstance(event, Creation):
if event.affected.uid is None:
event.affected.uid = str(uuid4())
elif not _is_valid_uid(event.affected.uid):
raise InvalidUIDError(f"Invalid UID {event.affected.uid}")
object_id = event.affected.uid
state = None
diff = None
try:
state = event.affected.__persist__()
except AttributeError:
state = event.affected.__dict__
previous_event = _previous_event(object_id)
try:
previous_event_id = previous_event["event_id"]
except TypeError:
previous_event_id = None
if isinstance(event, Creation) and previous_event is not None:
raise DuplicationError(
f"Object {object_id} already exists (event {previous_event_id})"
)
if isinstance(event, Change) and previous_event is None:
raise NonExistentError(
f"Object {object_id} does not exist and so cannot be updated"
)
if isinstance(event, Change):
diff = _state_diff(state, previous_event["state"])
if diff is None and not record_duplicates:
return object_id
sequence = app_tables.sequences.get(name="events") or app_tables.sequences.add_row(
name="events", value=1
)
app_tables.events.add_row(
event_id=sequence["value"],
recorded_at=event.recorded_at,
object_id=object_id,
object_type=type(event.affected).__name__,
event_type=type(event).__name__.lower(),
occurred_at=event.occurred_at,
state=state,
predecessor=previous_event_id,
state_diff=diff,
user_id=user_id,
)
sequence["value"] += 1
return object_id
@in_transaction
def save_event_records(events, log_level, record_duplicates, return_identifiers):
"""Save event records for a batch of events
Parameters
----------
payload : list
of Event instances
prevent_duplication : bool
Whether to disallow records where the state is unchanged from previously
return_identifiers : bool
Returns
-------
list
either empty or with the uids of the saved objects depending on
return_identifiers
"""
LOGGER.level = log_level
result = []
if not isinstance(events, list):
events = [events]
user_id = authorization.user_id()
LOGGER.debug(f"Saving payload of {len(events)} events")
try:
for event in events:
if not authorization.check(event):
raise AuthorizationError(
f"You do not have {type(event).__name__} permission for this "
f"{type(event.affected).__name__} "
f"object (id: {event.affected.uid}])"
)
LOGGER.debug(
f"Attempting {type(event).__name__} of {type(event.affected).__name__} "
f"object (id: {event.affected.uid})"
)
uid = _record_event(event, record_duplicates, user_id)
if return_identifiers:
result.append(uid)
LOGGER.debug(f"{len(events)} Events saved")
return result
except Exception as e:
LOGGER.error(
"An error occurred whilst attempting to save these events. "
"No changes were committed to the db."
)
raise e | server_code/historic_server/persistence.py | from uuid import UUID, uuid4
import anvil.users
from anvil.tables import app_tables, in_transaction, order_by
from anvil_extras import logging
from ..historic.events import Change, Creation
from ..historic.exceptions import (
AuthorizationError,
DuplicationError,
InvalidUIDError,
NonExistentError,
ResurrectionError,
)
__version__ = "0.0.1"
LOGGER = logging.Logger("anvil_labs.historic.persistence")
def _default_identifier():
try:
user = anvil.users.get_user()["email"]
except Exception as e:
if isinstance(e, TypeError) or repr(e).startswith("ServiceNotAdded"):
user = None
else:
raise e
return user
class _Authorization:
"""A class to check whether authorization exists for an operation on an object
Attributes
----------
policy: callable
which must take an event instance as its argument and return a bool.
identifier : callable
which must return a string.
"""
def __init__(self):
self.policy = None
self.identifier = None
def check(self, event):
if self.policy is None:
return True
return self.policy(event)
def user_id(self):
if self.identifier is None:
self.identifier = _default_identifier
return self.identifier()
authorization = _Authorization()
def _is_valid_uid(uid):
try:
UUID(uid, version=4)
return True
except ValueError:
return False
def _previous_event(object_id):
"""Find the most recent event record for a given object_id
Parameters
----------
object_id : str
Returns
-------
app_tables.events row
"""
result = None
try:
result = app_tables.events.search(
order_by("event_id", ascending=False), object_id=object_id
)[0]
except IndexError:
pass
if result is not None and result["event_type"] == "termination":
raise ResurrectionError(
f"Object {object_id} was terminated at {result['occurred_at']} "
f"(event {result['event_id']})",
)
return result
def _state_diff(state, previous_state):
"""A dict to show the new, changed and removed attributes between two states
Parameters
----------
state : dict
previous_state : dict
Returns
-------
dict
with keys 'new', 'changed' and 'removed' for each of those with content
"""
new = {k: v for k, v in state.items() if k not in previous_state}
changed = {
k: {"from": previous_state[k], "to": v}
for k, v in state.items()
if k in previous_state and v != previous_state[k]
}
removed = {k: v for k, v in previous_state.items() if k not in state}
diff = {"new": new, "changed": changed, "removed": removed}
result = {k: v for k, v in diff.items() if len(v) > 0}
return result if len(result) > 0 else None
def _record_event(event, record_duplicates, user_id):
"""Write a single event record to the data table
Parameters
----------
event : Event
prevent_duplication : bool
Whether to disallow records where the state is unchanged from previously
"""
if isinstance(event, Creation):
if event.affected.uid is None:
event.affected.uid = str(uuid4())
elif not _is_valid_uid(event.affected.uid):
raise InvalidUIDError(f"Invalid UID {event.affected.uid}")
object_id = event.affected.uid
state = None
diff = None
try:
state = event.affected.__persist__()
except AttributeError:
state = event.affected.__dict__
previous_event = _previous_event(object_id)
try:
previous_event_id = previous_event["event_id"]
except TypeError:
previous_event_id = None
if isinstance(event, Creation) and previous_event is not None:
raise DuplicationError(
f"Object {object_id} already exists (event {previous_event_id})"
)
if isinstance(event, Change) and previous_event is None:
raise NonExistentError(
f"Object {object_id} does not exist and so cannot be updated"
)
if isinstance(event, Change):
diff = _state_diff(state, previous_event["state"])
if diff is None and not record_duplicates:
return object_id
sequence = app_tables.sequences.get(name="events") or app_tables.sequences.add_row(
name="events", value=1
)
app_tables.events.add_row(
event_id=sequence["value"],
recorded_at=event.recorded_at,
object_id=object_id,
object_type=type(event.affected).__name__,
event_type=type(event).__name__.lower(),
occurred_at=event.occurred_at,
state=state,
predecessor=previous_event_id,
state_diff=diff,
user_id=user_id,
)
sequence["value"] += 1
return object_id
@in_transaction
def save_event_records(events, log_level, record_duplicates, return_identifiers):
"""Save event records for a batch of events
Parameters
----------
payload : list
of Event instances
prevent_duplication : bool
Whether to disallow records where the state is unchanged from previously
return_identifiers : bool
Returns
-------
list
either empty or with the uids of the saved objects depending on
return_identifiers
"""
LOGGER.level = log_level
result = []
if not isinstance(events, list):
events = [events]
user_id = authorization.user_id()
LOGGER.debug(f"Saving payload of {len(events)} events")
try:
for event in events:
if not authorization.check(event):
raise AuthorizationError(
f"You do not have {type(event).__name__} permission for this "
f"{type(event.affected).__name__} "
f"object (id: {event.affected.uid}])"
)
LOGGER.debug(
f"Attempting {type(event).__name__} of {type(event.affected).__name__} "
f"object (id: {event.affected.uid})"
)
uid = _record_event(event, record_duplicates, user_id)
if return_identifiers:
result.append(uid)
LOGGER.debug(f"{len(events)} Events saved")
return result
except Exception as e:
LOGGER.error(
"An error occurred whilst attempting to save these events. "
"No changes were committed to the db."
)
raise e | 0.692434 | 0.218253 |
from game_states import GameStates
from game_messages import Message
from result_handlers.dead_entity_rh import handle_dead_entity_result
from result_handlers.equip_rh import handle_equip_result
from result_handlers.targeting_rh import handle_targeting_result
from result_handlers.xp_rh import handle_xp_result
from result_consumer.available_results import get_available_results
def consume_results(player_turn_result,message_log,entities,player,game_state,targeting_item, previous_game_state):
available_results = get_available_results(player_turn_result)
if available_results['message']:
message_log.add_message(available_results['message'])
if available_results['dead_entity']:
available_results['message'], game_state, message_log = handle_dead_entity_result(available_results['dead_entity'], player,
game_state, message_log)
if available_results['item_added']:
entities.remove(available_results['item_added'])
game_state = GameStates.ENEMY_TURN
if available_results['item_consumed']:
game_state = GameStates.ENEMY_TURN
if available_results['item_dropped']:
entities.append(available_results['item_dropped'])
game_state = GameStates.ENEMY_TURN
if available_results['equip']:
message_log, game_state, player = handle_equip_result(player, available_results['equip'], message_log)
if available_results['targeting']:
targeting_item, previous_game_state, game_state, message_log = handle_targeting_result(
available_results['targeting'], message_log)
if available_results['targeting_cancelled']:
game_state = previous_game_state
message_log.add_message(Message('Targeting cancelled'))
if available_results['xp']:
player, message_log, previous_game_state, game_state = handle_xp_result(player, available_results['xp'],
message_log, game_state,
previous_game_state)
return available_results,message_log,available_results['message'],game_state,entities,player,game_state,targeting_item, previous_game_state | result_consumer/result_consumer.py | from game_states import GameStates
from game_messages import Message
from result_handlers.dead_entity_rh import handle_dead_entity_result
from result_handlers.equip_rh import handle_equip_result
from result_handlers.targeting_rh import handle_targeting_result
from result_handlers.xp_rh import handle_xp_result
from result_consumer.available_results import get_available_results
def consume_results(player_turn_result,message_log,entities,player,game_state,targeting_item, previous_game_state):
available_results = get_available_results(player_turn_result)
if available_results['message']:
message_log.add_message(available_results['message'])
if available_results['dead_entity']:
available_results['message'], game_state, message_log = handle_dead_entity_result(available_results['dead_entity'], player,
game_state, message_log)
if available_results['item_added']:
entities.remove(available_results['item_added'])
game_state = GameStates.ENEMY_TURN
if available_results['item_consumed']:
game_state = GameStates.ENEMY_TURN
if available_results['item_dropped']:
entities.append(available_results['item_dropped'])
game_state = GameStates.ENEMY_TURN
if available_results['equip']:
message_log, game_state, player = handle_equip_result(player, available_results['equip'], message_log)
if available_results['targeting']:
targeting_item, previous_game_state, game_state, message_log = handle_targeting_result(
available_results['targeting'], message_log)
if available_results['targeting_cancelled']:
game_state = previous_game_state
message_log.add_message(Message('Targeting cancelled'))
if available_results['xp']:
player, message_log, previous_game_state, game_state = handle_xp_result(player, available_results['xp'],
message_log, game_state,
previous_game_state)
return available_results,message_log,available_results['message'],game_state,entities,player,game_state,targeting_item, previous_game_state | 0.378459 | 0.130673 |
import pathlib
import threading
import time
import pyaudio
import tkinter as tk
import tkinter.ttk as ttk
from tkinter.messagebox import showerror, showinfo, showwarning
from classes.scrollimage import ScrollableImage
from classes.audio import AudioHandler
from classes.staffgen import StaffGenerator
from classes.model import Model
import speech_recognition as s_r
PROJECT_PATH = pathlib.Path(__file__).parent
PROJECT_UI = PROJECT_PATH / "first_layout.ui"
class FirstLayoutApp:
def __init__(self, master=None):
# build ui
self.mic_map = None
self.build_ui(master=master)
# Local variables
self.selected_mic = tk.StringVar()
self.selected_mic_level = tk.DoubleVar()
# set up microphones
self.set_microphones()
# set up variables
self.mic_options.configure(textvariable=self.selected_mic)
self.mic_level.configure(variable=self.selected_mic_level)
self.listen_for_notes = False
self.stop_thread = False
self.note_listener_thread = threading.Thread(target=self.note_listener)
self.note_listener_thread.start()
# self.note_listener_thread = multiprocessing.Process(target=self.note_listener)
self.notes = []
self.note_writer = None
self.staff_gen = StaffGenerator()
self.test_threshold.configure(command=self.save_pdf)
self.notes_played = 0
self.audio = None
def build_ui(self, master):
""" A function that contains the code from Pygubu to build the application - Simply copy & paste """
self.mainwindow = tk.Tk() if master is None else tk.Toplevel(master)
self.mainwindow.protocol('WM_DELETE_WINDOW', self.terminate)
self.dashboard = ttk.Frame(self.mainwindow)
self.mic_select = ttk.Frame(self.dashboard)
self.mic_label = ttk.Label(self.mic_select)
self.mic_label.configure(text='Select Microphone')
self.mic_label.place(anchor='nw', relwidth='0.60', relx='0.03', rely='0.0', x='0', y='0')
self.mic_options = ttk.Combobox(self.mic_select)
self.mic_options.place(anchor='nw', relx='0.03', rely='0.30', x='0', y='0')
self.mic_level = tk.Scale(self.mic_select)
self.mic_level.configure(digits='0', from_='0', orient='horizontal', repeatdelay='0')
self.mic_level.configure(showvalue='true', sliderrelief='flat', to='100')
self.mic_level.place(anchor='nw', relheight='0.61', relwidth='0.3', relx='0.53', rely='0.36', x='0', y='0')
self.refresh = tk.Button(self.mic_select)
self.refresh.configure(text='refresh')
self.refresh.place(anchor='nw', relx='0.03', rely='0.6', x='0', y='0')
self.refresh.configure(command=self.set_microphones)
self.threshold_label = tk.Label(self.mic_select)
self.threshold_label.configure(text='threshold')
self.threshold_label.place(anchor='nw', relx='0.58', x='0', y='0')
self.test_threshold = tk.Button(self.mic_select)
self.test_threshold.configure(text='save')
self.test_threshold.place(anchor='nw', relx='0.86', rely='0.34', x='0', y='0')
self.mic_select.configure(borderwidth='2', relief='raised')
self.mic_select.place(anchor='nw', relheight='1.0', relwidth='0.50', x='0', y='0')
self.control = tk.Frame(self.dashboard)
self.start = tk.Button(self.control)
self.start.configure(text='start')
self.start.place(anchor='nw', relheight='0.42', relwidth='0.25', relx='0.05', rely='0.25', x='0', y='0')
self.start.configure(command=self.start_audio)
self.stop = tk.Button(self.control)
self.stop.configure(compound='top', text='stop')
self.stop.place(anchor='nw', relheight='0.42', relwidth='0.25', relx='0.35', rely='0.25', x='0', y='0')
self.stop.configure(command=self.stop_audio)
self.record = tk.Button(self.control)
self.record.configure(text='tabify')
self.record.place(anchor='nw', relheight='0.42', relwidth='0.25', relx='0.65', rely='0.25', x='0', y='0')
self.record.configure(command=self.tabify)
self.control.configure(height='200', width='200')
self.control.place(anchor='nw', relheight='1', relwidth='0.5', relx='0.5', x='0', y='0')
self.dashboard.configure(relief='ridge')
self.dashboard.place(anchor='nw', relheight='0.15', relwidth='1.0', relx='0.0', rely='0.0', x='0', y='0')
self.display = ttk.Frame(self.mainwindow)
self.display.configure(height='200', width='200')
self.display.place(anchor='nw', relheight='0.85', relwidth='1.0', rely='0.15', x='0', y='0')
self.staff_display = ttk.Frame(self.display)
self.staff_display.configure(height='100', width='200')
self.staff_display.place(anchor='nw', relheight='0.85', relwidth='1.0', rely='0.15', x='0', y='0')
self.note_display = tk.Text(self.display)
self.note_display.configure(height='10', width='50')
_text_ = "Execute tabify to fill this with tab notation"
self.note_display.insert('0.0', _text_)
self.note_display.place(anchor='nw', relheight='0.5', relwidth='1.0', relx='0.0', rely='0.5', x='0', y='0')
self.mainwindow.configure(height='480', width='640')
self.mainwindow.minsize(640, 480)
def run(self):
self.model = Model()
self.mainwindow.mainloop()
def terminate(self):
print("Terminating the application...")
self.listen_for_notes = False
self.stop_thread = True
self.mainwindow.destroy()
if self.note_writer != None:
self.note_writer.terminate()
exit(0)
def update_staff(self):
img = tk.PhotoImage(file="demo.png")
self.image_window = ScrollableImage(self.staff_display, image=img,
scrollbarwidth=2)
self.image_window.configure(height='200', width='200')
self.image_window.place(anchor='nw', relheight='1.0', relwidth='1.0', rely='0', x='0', y='0')
def display_staff(self, notes):
# Generate the staff
self.staff_gen.main(notes, on_exit=self.update_staff)
def note_listener(self):
# This is a multithreaded function which listens for
# increase of notes and synchronizes the two lists
while True:
if self.stop_thread: # stop thread flag
break
if self.listen_for_notes:
if self.audio == None:
time.sleep(2)
continue
else:
if self.notes_played < self.audio.notes_played:
self.notes_played = self.audio.notes_played
# print("Number of notes played = ", self.notes_played)
threading.Thread(target=self.display_staff(self.audio.detected_notes))
else:
time.sleep(1)
continue
def start_audio(self):
threshhold_percentage = self.selected_mic_level.get()
threshold = (threshhold_percentage/100) * -200
self.audio = AudioHandler(threshold)
if self.selected_mic.get() == "":
showerror(
title="Select a microphone",
message="Microphone not selected!"
)
else:
mic = self.mic_map[self.selected_mic.get()]
self.audio.start(mic) # start with the selected mic
self.listen_for_notes = True # start listening for notes
def stop_audio(self):
if self.audio == None:
showwarning(
title="Nothing to stop",
message="Audio is not currently running, nothing to stop."
)
return
self.audio.stop()
# self.audio = None # clear the audio
self.listen_for_notes = False # stop listening for notes
def seq_to_text(self, sequence):
tuning = ["E", "B", "G", "D", "A", "e"]
tab_lists = {"E": [],
"B": [],
"G": [],
"D": [],
"A": [],
"e": []}
for i in sequence:
string = tuning[i[0]-1]
filler = "-" * len(str(i[1])) # get the length of "-" to match chars
for key in tab_lists.keys():
if key == string:
tab_lists[key].append(str(i[1]))
else:
tab_lists[key].append(filler)
tab_lists[key].append("-")
return "e| " + str("".join(tab_lists["e"])) + "\n" \
+ "B| " + str("".join(tab_lists["B"])) + "\n" \
+ "G| " + str("".join(tab_lists["G"])) + "\n" \
+ "D| " + str("".join(tab_lists["D"])) + "\n" \
+ "A| " + str("".join(tab_lists["A"])) + "\n" \
+ "E| " + str("".join(tab_lists["E"]))
def tabify(self):
# Obtain the sequence
if self.audio is None or len(self.audio.detected_notes) == 0:
showwarning(
title="Nothing to Tabify",
message="Record some notes first using the start button"
)
return
# Filter out impossible notes
sequence = self.audio.detected_notes
clean_sequence = []
for i in sequence:
listed = list(i)
if int(listed[-1]) <= 6 and int(listed[-1]) >= 2:
clean_sequence.append(i)
#print(clean_sequence)
# Classify sequence using the Model module
estimation = self.model.eval_sequence(clean_sequence)
#print(estimation)
text = self.seq_to_text(estimation)
# display the text in the text box
self.note_display.delete("1.0", "end-1c")
self.note_display.insert('0.0', text)
def set_microphones(self):
mics, value_map = self.get_audio_devices()
self.mic_map = value_map
self.mic_options.configure(values=mics) # NOTE: If you change microphone box id, this will break
self.mic_options.option_clear()
#print("set_microphone")
def get_audio_devices(self):
p = pyaudio.PyAudio()
mics = []
indx = []
for i in range(p.get_device_count()):
name = p.get_device_info_by_index(i).get('name')
if name.split(" ")[0] == "Microphone":
stripped_name = " ".join(name.split(" ")[1:])
stripped_name = [char for char in stripped_name]
stripped_name = "".join(stripped_name[1:-1]).strip()
if stripped_name not in mics:
mics.append(stripped_name)
indx.append(i)
# Convert to dictionary
dictionary = {}
for i, v in enumerate(mics):
device_indx = indx[i]
dictionary[v] = device_indx
return mics, dictionary
def save_pdf(self):
if self.staff_gen == None or self.audio == None or len(self.audio.detected_notes) == 0:
showwarning(
title="Nothing to save",
message="Nothing to save to PDF."
)
return
else:
self.staff_gen.save_pdf()
if __name__ == '__main__':
app = FirstLayoutApp()
app.run() | python/app/app.py | import pathlib
import threading
import time
import pyaudio
import tkinter as tk
import tkinter.ttk as ttk
from tkinter.messagebox import showerror, showinfo, showwarning
from classes.scrollimage import ScrollableImage
from classes.audio import AudioHandler
from classes.staffgen import StaffGenerator
from classes.model import Model
import speech_recognition as s_r
PROJECT_PATH = pathlib.Path(__file__).parent
PROJECT_UI = PROJECT_PATH / "first_layout.ui"
class FirstLayoutApp:
def __init__(self, master=None):
# build ui
self.mic_map = None
self.build_ui(master=master)
# Local variables
self.selected_mic = tk.StringVar()
self.selected_mic_level = tk.DoubleVar()
# set up microphones
self.set_microphones()
# set up variables
self.mic_options.configure(textvariable=self.selected_mic)
self.mic_level.configure(variable=self.selected_mic_level)
self.listen_for_notes = False
self.stop_thread = False
self.note_listener_thread = threading.Thread(target=self.note_listener)
self.note_listener_thread.start()
# self.note_listener_thread = multiprocessing.Process(target=self.note_listener)
self.notes = []
self.note_writer = None
self.staff_gen = StaffGenerator()
self.test_threshold.configure(command=self.save_pdf)
self.notes_played = 0
self.audio = None
def build_ui(self, master):
""" A function that contains the code from Pygubu to build the application - Simply copy & paste """
self.mainwindow = tk.Tk() if master is None else tk.Toplevel(master)
self.mainwindow.protocol('WM_DELETE_WINDOW', self.terminate)
self.dashboard = ttk.Frame(self.mainwindow)
self.mic_select = ttk.Frame(self.dashboard)
self.mic_label = ttk.Label(self.mic_select)
self.mic_label.configure(text='Select Microphone')
self.mic_label.place(anchor='nw', relwidth='0.60', relx='0.03', rely='0.0', x='0', y='0')
self.mic_options = ttk.Combobox(self.mic_select)
self.mic_options.place(anchor='nw', relx='0.03', rely='0.30', x='0', y='0')
self.mic_level = tk.Scale(self.mic_select)
self.mic_level.configure(digits='0', from_='0', orient='horizontal', repeatdelay='0')
self.mic_level.configure(showvalue='true', sliderrelief='flat', to='100')
self.mic_level.place(anchor='nw', relheight='0.61', relwidth='0.3', relx='0.53', rely='0.36', x='0', y='0')
self.refresh = tk.Button(self.mic_select)
self.refresh.configure(text='refresh')
self.refresh.place(anchor='nw', relx='0.03', rely='0.6', x='0', y='0')
self.refresh.configure(command=self.set_microphones)
self.threshold_label = tk.Label(self.mic_select)
self.threshold_label.configure(text='threshold')
self.threshold_label.place(anchor='nw', relx='0.58', x='0', y='0')
self.test_threshold = tk.Button(self.mic_select)
self.test_threshold.configure(text='save')
self.test_threshold.place(anchor='nw', relx='0.86', rely='0.34', x='0', y='0')
self.mic_select.configure(borderwidth='2', relief='raised')
self.mic_select.place(anchor='nw', relheight='1.0', relwidth='0.50', x='0', y='0')
self.control = tk.Frame(self.dashboard)
self.start = tk.Button(self.control)
self.start.configure(text='start')
self.start.place(anchor='nw', relheight='0.42', relwidth='0.25', relx='0.05', rely='0.25', x='0', y='0')
self.start.configure(command=self.start_audio)
self.stop = tk.Button(self.control)
self.stop.configure(compound='top', text='stop')
self.stop.place(anchor='nw', relheight='0.42', relwidth='0.25', relx='0.35', rely='0.25', x='0', y='0')
self.stop.configure(command=self.stop_audio)
self.record = tk.Button(self.control)
self.record.configure(text='tabify')
self.record.place(anchor='nw', relheight='0.42', relwidth='0.25', relx='0.65', rely='0.25', x='0', y='0')
self.record.configure(command=self.tabify)
self.control.configure(height='200', width='200')
self.control.place(anchor='nw', relheight='1', relwidth='0.5', relx='0.5', x='0', y='0')
self.dashboard.configure(relief='ridge')
self.dashboard.place(anchor='nw', relheight='0.15', relwidth='1.0', relx='0.0', rely='0.0', x='0', y='0')
self.display = ttk.Frame(self.mainwindow)
self.display.configure(height='200', width='200')
self.display.place(anchor='nw', relheight='0.85', relwidth='1.0', rely='0.15', x='0', y='0')
self.staff_display = ttk.Frame(self.display)
self.staff_display.configure(height='100', width='200')
self.staff_display.place(anchor='nw', relheight='0.85', relwidth='1.0', rely='0.15', x='0', y='0')
self.note_display = tk.Text(self.display)
self.note_display.configure(height='10', width='50')
_text_ = "Execute tabify to fill this with tab notation"
self.note_display.insert('0.0', _text_)
self.note_display.place(anchor='nw', relheight='0.5', relwidth='1.0', relx='0.0', rely='0.5', x='0', y='0')
self.mainwindow.configure(height='480', width='640')
self.mainwindow.minsize(640, 480)
def run(self):
self.model = Model()
self.mainwindow.mainloop()
def terminate(self):
print("Terminating the application...")
self.listen_for_notes = False
self.stop_thread = True
self.mainwindow.destroy()
if self.note_writer != None:
self.note_writer.terminate()
exit(0)
def update_staff(self):
img = tk.PhotoImage(file="demo.png")
self.image_window = ScrollableImage(self.staff_display, image=img,
scrollbarwidth=2)
self.image_window.configure(height='200', width='200')
self.image_window.place(anchor='nw', relheight='1.0', relwidth='1.0', rely='0', x='0', y='0')
def display_staff(self, notes):
# Generate the staff
self.staff_gen.main(notes, on_exit=self.update_staff)
def note_listener(self):
# This is a multithreaded function which listens for
# increase of notes and synchronizes the two lists
while True:
if self.stop_thread: # stop thread flag
break
if self.listen_for_notes:
if self.audio == None:
time.sleep(2)
continue
else:
if self.notes_played < self.audio.notes_played:
self.notes_played = self.audio.notes_played
# print("Number of notes played = ", self.notes_played)
threading.Thread(target=self.display_staff(self.audio.detected_notes))
else:
time.sleep(1)
continue
def start_audio(self):
threshhold_percentage = self.selected_mic_level.get()
threshold = (threshhold_percentage/100) * -200
self.audio = AudioHandler(threshold)
if self.selected_mic.get() == "":
showerror(
title="Select a microphone",
message="Microphone not selected!"
)
else:
mic = self.mic_map[self.selected_mic.get()]
self.audio.start(mic) # start with the selected mic
self.listen_for_notes = True # start listening for notes
def stop_audio(self):
if self.audio == None:
showwarning(
title="Nothing to stop",
message="Audio is not currently running, nothing to stop."
)
return
self.audio.stop()
# self.audio = None # clear the audio
self.listen_for_notes = False # stop listening for notes
def seq_to_text(self, sequence):
tuning = ["E", "B", "G", "D", "A", "e"]
tab_lists = {"E": [],
"B": [],
"G": [],
"D": [],
"A": [],
"e": []}
for i in sequence:
string = tuning[i[0]-1]
filler = "-" * len(str(i[1])) # get the length of "-" to match chars
for key in tab_lists.keys():
if key == string:
tab_lists[key].append(str(i[1]))
else:
tab_lists[key].append(filler)
tab_lists[key].append("-")
return "e| " + str("".join(tab_lists["e"])) + "\n" \
+ "B| " + str("".join(tab_lists["B"])) + "\n" \
+ "G| " + str("".join(tab_lists["G"])) + "\n" \
+ "D| " + str("".join(tab_lists["D"])) + "\n" \
+ "A| " + str("".join(tab_lists["A"])) + "\n" \
+ "E| " + str("".join(tab_lists["E"]))
def tabify(self):
# Obtain the sequence
if self.audio is None or len(self.audio.detected_notes) == 0:
showwarning(
title="Nothing to Tabify",
message="Record some notes first using the start button"
)
return
# Filter out impossible notes
sequence = self.audio.detected_notes
clean_sequence = []
for i in sequence:
listed = list(i)
if int(listed[-1]) <= 6 and int(listed[-1]) >= 2:
clean_sequence.append(i)
#print(clean_sequence)
# Classify sequence using the Model module
estimation = self.model.eval_sequence(clean_sequence)
#print(estimation)
text = self.seq_to_text(estimation)
# display the text in the text box
self.note_display.delete("1.0", "end-1c")
self.note_display.insert('0.0', text)
def set_microphones(self):
mics, value_map = self.get_audio_devices()
self.mic_map = value_map
self.mic_options.configure(values=mics) # NOTE: If you change microphone box id, this will break
self.mic_options.option_clear()
#print("set_microphone")
def get_audio_devices(self):
p = pyaudio.PyAudio()
mics = []
indx = []
for i in range(p.get_device_count()):
name = p.get_device_info_by_index(i).get('name')
if name.split(" ")[0] == "Microphone":
stripped_name = " ".join(name.split(" ")[1:])
stripped_name = [char for char in stripped_name]
stripped_name = "".join(stripped_name[1:-1]).strip()
if stripped_name not in mics:
mics.append(stripped_name)
indx.append(i)
# Convert to dictionary
dictionary = {}
for i, v in enumerate(mics):
device_indx = indx[i]
dictionary[v] = device_indx
return mics, dictionary
def save_pdf(self):
if self.staff_gen == None or self.audio == None or len(self.audio.detected_notes) == 0:
showwarning(
title="Nothing to save",
message="Nothing to save to PDF."
)
return
else:
self.staff_gen.save_pdf()
if __name__ == '__main__':
app = FirstLayoutApp()
app.run() | 0.433262 | 0.164315 |
from numpy import exp, array, random, dot
#A single neuron, with 3 input connections and 1 output connection.
class NeuralNetwork():
def __init__(self):
random.seed(1)
#A 3 x 1 matrix with random weights, with values in the range -1 to 1
self.synaptic_weights = 2 * random.random((3, 1)) - 1
# The Sigmoid function, which describes an S shaped curve.
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
# the gradient of the Sigmoid curve.
def __sigmoid_derivative(self, x):
return x * (1 - x)
# Training the neural network through trial and error.
def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
for iteration in xrange(number_of_training_iterations):
# Pass the training set through our neural network (a single neuron).
output = self.think(training_set_inputs)
# Calculate the error
error = training_set_outputs - output
# Multiply the error by the input and again by the gradient of the Sigmoid curve.
adjustment = dot(training_set_inputs.T, error * self.__sigmoid_derivative(output))
# Adjust the weights.
self.synaptic_weights += adjustment
def think(self, inputs):
return self.__sigmoid(dot(inputs, self.synaptic_weights))
if __name__ == "__main__":
#Initialization
neural_network = NeuralNetwork()
print "Random starting synaptic weights: "
print neural_network.synaptic_weights
#Training sets.
training_set_inputs = array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]])
training_set_outputs = array([[0, 1, 1, 0]]).T
# Do it 10,000 times and makeadjustments each time.
neural_network.train(training_set_inputs, training_set_outputs, 10000)
print "New synaptic weights after training: "
print neural_network.synaptic_weights
# Test the neural network with a new situation.
print "Considering new situation [1, 0, 0] -> ?: "
print neural_network.think(array([1, 0, 0])) | main.py | from numpy import exp, array, random, dot
#A single neuron, with 3 input connections and 1 output connection.
class NeuralNetwork():
def __init__(self):
random.seed(1)
#A 3 x 1 matrix with random weights, with values in the range -1 to 1
self.synaptic_weights = 2 * random.random((3, 1)) - 1
# The Sigmoid function, which describes an S shaped curve.
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
# the gradient of the Sigmoid curve.
def __sigmoid_derivative(self, x):
return x * (1 - x)
# Training the neural network through trial and error.
def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
for iteration in xrange(number_of_training_iterations):
# Pass the training set through our neural network (a single neuron).
output = self.think(training_set_inputs)
# Calculate the error
error = training_set_outputs - output
# Multiply the error by the input and again by the gradient of the Sigmoid curve.
adjustment = dot(training_set_inputs.T, error * self.__sigmoid_derivative(output))
# Adjust the weights.
self.synaptic_weights += adjustment
def think(self, inputs):
return self.__sigmoid(dot(inputs, self.synaptic_weights))
if __name__ == "__main__":
#Initialization
neural_network = NeuralNetwork()
print "Random starting synaptic weights: "
print neural_network.synaptic_weights
#Training sets.
training_set_inputs = array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]])
training_set_outputs = array([[0, 1, 1, 0]]).T
# Do it 10,000 times and makeadjustments each time.
neural_network.train(training_set_inputs, training_set_outputs, 10000)
print "New synaptic weights after training: "
print neural_network.synaptic_weights
# Test the neural network with a new situation.
print "Considering new situation [1, 0, 0] -> ?: "
print neural_network.think(array([1, 0, 0])) | 0.896312 | 0.743331 |
from typing import List
import bs4
import re
from typing import Iterable
from dataPipelines.gc_crawler.requestors import (
FileBasedPseudoRequestor,
DefaultRequestor,
)
from dataPipelines.gc_crawler.exec_model import Crawler, Parser, Pager
from dataPipelines.gc_crawler.data_model import Document, DownloadableItem
from dataPipelines.gc_crawler.utils import abs_url
from . import SOURCE_SAMPLE_DIR, BASE_SOURCE_URL
class ExamplePager(Pager):
"""Pager for Example crawler"""
def iter_page_links(self) -> Iterable[str]:
"""Iterator for page links"""
for sample_file in 'page_1.html', 'page_2.html':
yield self.starting_url + "/" + sample_file
class ExampleParser(Parser):
"""Parser for Example crawler"""
def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:
"""Parse document objects from page of text"""
soup = bs4.BeautifulSoup(page_text, features="lxml")
download_item_divs = soup.select(
'div.main-content > div.downloads > div.download-item'
)
parsed_docs = []
for div in download_item_divs:
# general pub and versioning info
title = div.select_one("div.download-title").text.strip()
pub_date = div.select_one("div.download-pub-date").text.strip()
# all fields that will be used for versioning
version_hash_fields = {'pub_date': pub_date}
# gathering downloadable items' info
download_links_div = div.select_one('div.download-links')
download_items: List[DownloadableItem] = []
for pub_type, pattern in [('pdf', r'\bPDF\b'), ('xml', r'\bXML\b')]:
a_tag = download_links_div.find(
name='a', text=re.compile(pattern=pattern, flags=re.IGNORECASE)
)
download_url: str = ""
if a_tag:
download_url = abs_url(page_url, a_tag.attrs['href'])
else:
continue
actual_download_file_type_matcher = re.match(
r".*[.](\w+)$", download_url
)
actual_download_file_type = (
actual_download_file_type_matcher.group(1)
if actual_download_file_type_matcher
else None
)
download_items.append(
DownloadableItem(
doc_type=pub_type,
web_url=download_url,
compression_type=(
actual_download_file_type
if (actual_download_file_type or '').lower() != pub_type
else None
),
)
)
# generate final document object
doc = Document(
doc_name=title,
doc_title=title,
doc_num="1",
doc_type="example",
source_page_url=page_url,
publication_date=None,
cac_login_required=False,
version_hash_raw_data=version_hash_fields,
downloadable_items=download_items,
crawler_used="example"
)
parsed_docs.append(doc)
return parsed_docs
class ExampleCrawler(Crawler):
"""Crawler for the example web scraper"""
def __init__(self, starting_url: str = "http://localhost:8000"):
pager = ExamplePager(starting_url=starting_url)
super().__init__(pager=pager, parser=ExampleParser())
class FakeExampleCrawler(Crawler):
"""Example crawler that just uses local source files"""
def __init__(self, *args, **kwargs):
super().__init__(
*args,
**kwargs,
pager=ExamplePager(
requestor=FileBasedPseudoRequestor(
fake_web_base_url=BASE_SOURCE_URL,
source_sample_dir_path=SOURCE_SAMPLE_DIR,
),
starting_url=BASE_SOURCE_URL,
),
parser=ExampleParser(),
) | dataPipelines/gc_crawler/example/models.py | from typing import List
import bs4
import re
from typing import Iterable
from dataPipelines.gc_crawler.requestors import (
FileBasedPseudoRequestor,
DefaultRequestor,
)
from dataPipelines.gc_crawler.exec_model import Crawler, Parser, Pager
from dataPipelines.gc_crawler.data_model import Document, DownloadableItem
from dataPipelines.gc_crawler.utils import abs_url
from . import SOURCE_SAMPLE_DIR, BASE_SOURCE_URL
class ExamplePager(Pager):
"""Pager for Example crawler"""
def iter_page_links(self) -> Iterable[str]:
"""Iterator for page links"""
for sample_file in 'page_1.html', 'page_2.html':
yield self.starting_url + "/" + sample_file
class ExampleParser(Parser):
"""Parser for Example crawler"""
def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:
"""Parse document objects from page of text"""
soup = bs4.BeautifulSoup(page_text, features="lxml")
download_item_divs = soup.select(
'div.main-content > div.downloads > div.download-item'
)
parsed_docs = []
for div in download_item_divs:
# general pub and versioning info
title = div.select_one("div.download-title").text.strip()
pub_date = div.select_one("div.download-pub-date").text.strip()
# all fields that will be used for versioning
version_hash_fields = {'pub_date': pub_date}
# gathering downloadable items' info
download_links_div = div.select_one('div.download-links')
download_items: List[DownloadableItem] = []
for pub_type, pattern in [('pdf', r'\bPDF\b'), ('xml', r'\bXML\b')]:
a_tag = download_links_div.find(
name='a', text=re.compile(pattern=pattern, flags=re.IGNORECASE)
)
download_url: str = ""
if a_tag:
download_url = abs_url(page_url, a_tag.attrs['href'])
else:
continue
actual_download_file_type_matcher = re.match(
r".*[.](\w+)$", download_url
)
actual_download_file_type = (
actual_download_file_type_matcher.group(1)
if actual_download_file_type_matcher
else None
)
download_items.append(
DownloadableItem(
doc_type=pub_type,
web_url=download_url,
compression_type=(
actual_download_file_type
if (actual_download_file_type or '').lower() != pub_type
else None
),
)
)
# generate final document object
doc = Document(
doc_name=title,
doc_title=title,
doc_num="1",
doc_type="example",
source_page_url=page_url,
publication_date=None,
cac_login_required=False,
version_hash_raw_data=version_hash_fields,
downloadable_items=download_items,
crawler_used="example"
)
parsed_docs.append(doc)
return parsed_docs
class ExampleCrawler(Crawler):
"""Crawler for the example web scraper"""
def __init__(self, starting_url: str = "http://localhost:8000"):
pager = ExamplePager(starting_url=starting_url)
super().__init__(pager=pager, parser=ExampleParser())
class FakeExampleCrawler(Crawler):
"""Example crawler that just uses local source files"""
def __init__(self, *args, **kwargs):
super().__init__(
*args,
**kwargs,
pager=ExamplePager(
requestor=FileBasedPseudoRequestor(
fake_web_base_url=BASE_SOURCE_URL,
source_sample_dir_path=SOURCE_SAMPLE_DIR,
),
starting_url=BASE_SOURCE_URL,
),
parser=ExampleParser(),
) | 0.640973 | 0.227781 |
import requests
import sys
import json
import urllib.parse
server = "172.16.58.3"
# Remeber to change hf_server back to the one above
# when sending a pull request to the public repo
# server = "localhost"
port = "8880"
# Register and enroll new user in organization
def register_user(user_name, organization):
url = "http://{}:{}/users".format(server, port)
headers = {"content-type": "application/x-www-form-urlencoded"}
data = {"username":user_name, "orgName": organization}
print("Registering user")
response = requests.post(url, headers=headers, data=data)
print(json.loads(response.text))
print(json.loads(response.text)['success'] == True)
return json.loads(response.text)['token']
# Create channel request
def create_channel(token, channel_name):
url = "http://{}:{}/channels".format(server, port)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"channelName": channel_name,
"channelConfigPath":"../artifacts/channel/{}.tx".format(channel_name)}
print("Creating channel")
response = requests.post(url, headers=headers, json=data)
print(response.text)
# Join channel request
def join_channel(token, organization_lower, channel_name):
url = "http://{}:{}/channels/{}/peers".format(server, port, channel_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.{}.example.com".format(organization_lower),"peer1.{}.example.com".format(organization_lower)]}
print("Joining channel")
response = requests.post(url, headers=headers, json=data)
print(response.text)
# Install chaincode
def install_chaincode(token, organization_lower ,chaincode_name, chaincode_path, chaincode_lang):
url = "http://{}:{}/chaincodes".format(server, port)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.{}.example.com".format(organization_lower),"peer1.{}.example.com".format(organization_lower)],
"chaincodeName": chaincode_name,
"chaincodePath": chaincode_path,
"chaincodeType": chaincode_lang,
"chaincodeVersion":"v0"}
print("Installing chaincode")
response = requests.post(url, headers=headers, json=data)
print(response.text)
# Instantiate chaincode
def instantiate_chaincode(token, organization_lower, channel_name, chaincode_name, chaincode_lang):
url = "http://{}:{}/channels/{}/chaincodes".format(server, port, channel_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {
"chaincodeName": chaincode_name,
"chaincodeType": chaincode_lang,
"chaincodeVersion":"v0",
"args":["a","100","b","200"]
}
print("Instantiating chaincode")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def get_installed_chaincodes(token, org):
url = "http://{}:{}/chaincodes?peer=peer0.{}.example.com&type=installed".format(server, port, org)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
print("Getting installed chaincodes")
response = requests.get(url, headers=headers)
print(response.text)
def get_instantiated_chaincodes(token, org):
url = "http://{}:{}/chaincodes?peer=peer0.{}.example.com&type=instantiated".format(server, port, org)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
print("Getting instantiated chaincodes")
response = requests.get(url, headers=headers)
print(response.text)
def query_job(token, channel_name, chaincode_name, org, job_name):
query_set = str([job_name])
url = "http://{}:{}/channels/{}/chaincodes/{}?peer=peer0.{}.example.com&fcn=queryJob&args={}".format(server, port,
channel_name, chaincode_name, org, urllib.parse.quote(query_set))
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
print("Querying job")
response = requests.get(url, headers=headers)
print(response.text)
def invoke_new_job(token, channel_name, chaincode_name, org, job_id, service, developer, provider, provider_org):
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"createJob",
"args":[job_id, service, developer, provider, provider_org]}
print("Creating new job")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def invoke_set_time(token, channel_name, chaincode_name, org, job_id, time):
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"setTime",
"args":[job_id, time]}
print("Setting the time")
response = requests.post(url, headers=headers, json=data)
print(response.text)
return response
def invoke_received_result(token, channel_name, chaincode_name, org, job_id):
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"receivedResult",
"args":[job_id]}
print("Setting the received result value")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def invoke_balance_transfer_from_fabcar(token, channel_name, chaincode_name, org):
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"my_cc",
"args":["move", "b", "a", "hi"]}
print("Invoke balance transfer")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def invoke_balance_transfer(token, channel_name, chaincode_name, org):
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"move",
"args":["a", "b", "1"]}
print("Invoke balance transfer")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def invoke_balance_transfer_new_user(token, channel_name, chaincode_name, org, name, balance):
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"new_user",
"args":[name, balance]}
print("Adding new user")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def query_account(token, channel_name, chaincode_name, org, job_name):
query_set = str([job_name])
url = "http://{}:{}/channels/{}/chaincodes/{}?peer=peer0.{}.example.com&fcn=query&args={}".format(server, port,
channel_name, chaincode_name, org, urllib.parse.quote(query_set))
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
print("Querying account")
response = requests.get(url, headers=headers)
print(response.text)
def get_logs(token):
url = "http://{}:{}/logs".format(server, port)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
print("Getting logs")
response = requests.get(url, headers=headers)
print(response.text)
hf_server = "172.16.31.10"
controller_token = "<KEY>"
channel_name="mychannel"
def invoke_received_result2(job_id, server=hf_server, token=controller_token, channel_name=channel_name):
chaincode_name = "monitoring"
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"receivedResult",
"args":[job_id]}
print("Setting the received result value")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def initialize_network():
monetaryChaincode = 'monetary'
monetaryPath = "./artifacts/src/monetary"
monitoringChaincode = 'monitoring'
monitoringPath= "./artifacts/src/monitoring"
chaincodeLang = "node"
user_list = ['sghaemi', 'admin', 'controller', 'cc_provider1', 'cc_provider2', 'cc_provider3', 'cc_provider4']
token1 = register_user('temp', 'Org1')
token2 = register_user('temp', 'Org2')
create_channel(token1, channelName)
join_channel(token1, 'org1', channelName)
join_channel(token2, 'org2', channelName)
install_chaincode(token1, 'org1', monetaryChaincode, monetaryPath, chaincodeLang)
install_chaincode(token2, 'org2', monetaryChaincode, monetaryPath, chaincodeLang)
install_chaincode(token1, 'org1', monitoringChaincode, monitoringPath, chaincodeLang)
install_chaincode(token2, 'org2', monitoringChaincode, monitoringPath, chaincodeLang)
instantiate_chaincode(token1, 'org1', channelName, monetaryChaincode, chaincodeLang)
instantiate_chaincode(token2, 'org2', channelName, monitoringChaincode, chaincodeLang)
for user in user_list:
invoke_balance_transfer_new_user(token1, channelName, monetaryChaincode, 'org1', user, "700")
return token1, token2
if __name__ == "__main__":
username = sys.argv[1]
org = sys.argv[2]
orgLower = org.lower()
channelName = "mychannel"
# chaincodePath = "./artifacts/src/monetary"
# chaincodeName = "monetary"
chaincodePath = "./artifacts/src/monitoring"
chaincodeName = "monitoring"
chaincodeLang = "node"
token_org2 = "<KEY>"
token_org1 = "<KEY>"
# token_org1, token_org2 = initialize_network()
if org == "Org1":
token = token_org1
elif org == "Org2":
token = token_org2
token = register_user(username, org)
# print(get_logs(token))
# invoke_balance_transfer_new_user(token, channelName, "monetary", 'org1', 'controller', "600")
# create_channel(token, channelName)
# join_channel(token, orgLower, channelName)
# install_chaincode(token, orgLower, chaincodeName, chaincodePath, chaincodeLang)
# instantiate_chaincode(token, orgLower, channelName, chaincodeName, chaincodeLang)
# query_job(token, channelName, chaincodeName, orgLower, "90")
# invoke_new_job(token, channelName, chaincodeName, orgLower, "-1", "10", "admin", "sghaemi", "Org2")
# r = invoke_set_time(token, channelName, chaincodeName, orgLower, "-1", "100")
# print('User was not found' in r.text)
# invoke_received_result(token, channelName, chaincodeName, orgLower, "16")
# invoke_received_result2("97")
# query_job(token, channelName, chaincodeName, orgLower, "100")
# invoke_balance_transfer_from_fabcar(token, channelName, chaincodeName, orgLower)
# invoke_balance_transfer(token, channelName, chaincodeName, orgLower)
# query_account(token, channelName, 'monetary', orgLower, 'developer_test')
# query_account(token, channelName, "monetary", orgLower, 'cc_provider1')
# query_account(token, channelName, "monetary", orgLower, 'cc_provider2')
# query_account(token, channelName, "monetary", orgLower, 'cc_provider3')
# query_account(token, channelName, "monetary", orgLower, 'cc_provider4')
# invoke_balance_transfer_new_user(token, channelName, "monetary", orgLower, "controller", "600")
# invoke_balance_transfer_new_user(token, channelName, "monetary", orgLower, "admin", "600")
# invoke_balance_transfer_new_user(token, channelName, "monetary", orgLower, "sara_test", "600")
# get_installed_chaincodes(token, orgLower)
# get_instantiated_chaincodes(token, orgLower) | ComputeProvider/HFRequests.py | import requests
import sys
import json
import urllib.parse
server = "172.16.58.3"
# Remeber to change hf_server back to the one above
# when sending a pull request to the public repo
# server = "localhost"
port = "8880"
# Register and enroll new user in organization
def register_user(user_name, organization):
url = "http://{}:{}/users".format(server, port)
headers = {"content-type": "application/x-www-form-urlencoded"}
data = {"username":user_name, "orgName": organization}
print("Registering user")
response = requests.post(url, headers=headers, data=data)
print(json.loads(response.text))
print(json.loads(response.text)['success'] == True)
return json.loads(response.text)['token']
# Create channel request
def create_channel(token, channel_name):
url = "http://{}:{}/channels".format(server, port)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"channelName": channel_name,
"channelConfigPath":"../artifacts/channel/{}.tx".format(channel_name)}
print("Creating channel")
response = requests.post(url, headers=headers, json=data)
print(response.text)
# Join channel request
def join_channel(token, organization_lower, channel_name):
url = "http://{}:{}/channels/{}/peers".format(server, port, channel_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.{}.example.com".format(organization_lower),"peer1.{}.example.com".format(organization_lower)]}
print("Joining channel")
response = requests.post(url, headers=headers, json=data)
print(response.text)
# Install chaincode
def install_chaincode(token, organization_lower ,chaincode_name, chaincode_path, chaincode_lang):
url = "http://{}:{}/chaincodes".format(server, port)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.{}.example.com".format(organization_lower),"peer1.{}.example.com".format(organization_lower)],
"chaincodeName": chaincode_name,
"chaincodePath": chaincode_path,
"chaincodeType": chaincode_lang,
"chaincodeVersion":"v0"}
print("Installing chaincode")
response = requests.post(url, headers=headers, json=data)
print(response.text)
# Instantiate chaincode
def instantiate_chaincode(token, organization_lower, channel_name, chaincode_name, chaincode_lang):
url = "http://{}:{}/channels/{}/chaincodes".format(server, port, channel_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {
"chaincodeName": chaincode_name,
"chaincodeType": chaincode_lang,
"chaincodeVersion":"v0",
"args":["a","100","b","200"]
}
print("Instantiating chaincode")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def get_installed_chaincodes(token, org):
url = "http://{}:{}/chaincodes?peer=peer0.{}.example.com&type=installed".format(server, port, org)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
print("Getting installed chaincodes")
response = requests.get(url, headers=headers)
print(response.text)
def get_instantiated_chaincodes(token, org):
url = "http://{}:{}/chaincodes?peer=peer0.{}.example.com&type=instantiated".format(server, port, org)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
print("Getting instantiated chaincodes")
response = requests.get(url, headers=headers)
print(response.text)
def query_job(token, channel_name, chaincode_name, org, job_name):
query_set = str([job_name])
url = "http://{}:{}/channels/{}/chaincodes/{}?peer=peer0.{}.example.com&fcn=queryJob&args={}".format(server, port,
channel_name, chaincode_name, org, urllib.parse.quote(query_set))
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
print("Querying job")
response = requests.get(url, headers=headers)
print(response.text)
def invoke_new_job(token, channel_name, chaincode_name, org, job_id, service, developer, provider, provider_org):
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"createJob",
"args":[job_id, service, developer, provider, provider_org]}
print("Creating new job")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def invoke_set_time(token, channel_name, chaincode_name, org, job_id, time):
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"setTime",
"args":[job_id, time]}
print("Setting the time")
response = requests.post(url, headers=headers, json=data)
print(response.text)
return response
def invoke_received_result(token, channel_name, chaincode_name, org, job_id):
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"receivedResult",
"args":[job_id]}
print("Setting the received result value")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def invoke_balance_transfer_from_fabcar(token, channel_name, chaincode_name, org):
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"my_cc",
"args":["move", "b", "a", "hi"]}
print("Invoke balance transfer")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def invoke_balance_transfer(token, channel_name, chaincode_name, org):
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"move",
"args":["a", "b", "1"]}
print("Invoke balance transfer")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def invoke_balance_transfer_new_user(token, channel_name, chaincode_name, org, name, balance):
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"new_user",
"args":[name, balance]}
print("Adding new user")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def query_account(token, channel_name, chaincode_name, org, job_name):
query_set = str([job_name])
url = "http://{}:{}/channels/{}/chaincodes/{}?peer=peer0.{}.example.com&fcn=query&args={}".format(server, port,
channel_name, chaincode_name, org, urllib.parse.quote(query_set))
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
print("Querying account")
response = requests.get(url, headers=headers)
print(response.text)
def get_logs(token):
url = "http://{}:{}/logs".format(server, port)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
print("Getting logs")
response = requests.get(url, headers=headers)
print(response.text)
hf_server = "172.16.31.10"
controller_token = "<KEY>"
channel_name="mychannel"
def invoke_received_result2(job_id, server=hf_server, token=controller_token, channel_name=channel_name):
chaincode_name = "monitoring"
url = "http://{}:{}/channels/{}/chaincodes/{}".format(server, port, channel_name, chaincode_name)
auth = "Bearer " + token
headers = {"authorization": auth, "content-type": "application/json"}
data = {"peers": ["peer0.org1.example.com","peer0.org2.example.com"],
"fcn":"receivedResult",
"args":[job_id]}
print("Setting the received result value")
response = requests.post(url, headers=headers, json=data)
print(response.text)
def initialize_network():
monetaryChaincode = 'monetary'
monetaryPath = "./artifacts/src/monetary"
monitoringChaincode = 'monitoring'
monitoringPath= "./artifacts/src/monitoring"
chaincodeLang = "node"
user_list = ['sghaemi', 'admin', 'controller', 'cc_provider1', 'cc_provider2', 'cc_provider3', 'cc_provider4']
token1 = register_user('temp', 'Org1')
token2 = register_user('temp', 'Org2')
create_channel(token1, channelName)
join_channel(token1, 'org1', channelName)
join_channel(token2, 'org2', channelName)
install_chaincode(token1, 'org1', monetaryChaincode, monetaryPath, chaincodeLang)
install_chaincode(token2, 'org2', monetaryChaincode, monetaryPath, chaincodeLang)
install_chaincode(token1, 'org1', monitoringChaincode, monitoringPath, chaincodeLang)
install_chaincode(token2, 'org2', monitoringChaincode, monitoringPath, chaincodeLang)
instantiate_chaincode(token1, 'org1', channelName, monetaryChaincode, chaincodeLang)
instantiate_chaincode(token2, 'org2', channelName, monitoringChaincode, chaincodeLang)
for user in user_list:
invoke_balance_transfer_new_user(token1, channelName, monetaryChaincode, 'org1', user, "700")
return token1, token2
if __name__ == "__main__":
username = sys.argv[1]
org = sys.argv[2]
orgLower = org.lower()
channelName = "mychannel"
# chaincodePath = "./artifacts/src/monetary"
# chaincodeName = "monetary"
chaincodePath = "./artifacts/src/monitoring"
chaincodeName = "monitoring"
chaincodeLang = "node"
token_org2 = "<KEY>"
token_org1 = "<KEY>"
# token_org1, token_org2 = initialize_network()
if org == "Org1":
token = token_org1
elif org == "Org2":
token = token_org2
token = register_user(username, org)
# print(get_logs(token))
# invoke_balance_transfer_new_user(token, channelName, "monetary", 'org1', 'controller', "600")
# create_channel(token, channelName)
# join_channel(token, orgLower, channelName)
# install_chaincode(token, orgLower, chaincodeName, chaincodePath, chaincodeLang)
# instantiate_chaincode(token, orgLower, channelName, chaincodeName, chaincodeLang)
# query_job(token, channelName, chaincodeName, orgLower, "90")
# invoke_new_job(token, channelName, chaincodeName, orgLower, "-1", "10", "admin", "sghaemi", "Org2")
# r = invoke_set_time(token, channelName, chaincodeName, orgLower, "-1", "100")
# print('User was not found' in r.text)
# invoke_received_result(token, channelName, chaincodeName, orgLower, "16")
# invoke_received_result2("97")
# query_job(token, channelName, chaincodeName, orgLower, "100")
# invoke_balance_transfer_from_fabcar(token, channelName, chaincodeName, orgLower)
# invoke_balance_transfer(token, channelName, chaincodeName, orgLower)
# query_account(token, channelName, 'monetary', orgLower, 'developer_test')
# query_account(token, channelName, "monetary", orgLower, 'cc_provider1')
# query_account(token, channelName, "monetary", orgLower, 'cc_provider2')
# query_account(token, channelName, "monetary", orgLower, 'cc_provider3')
# query_account(token, channelName, "monetary", orgLower, 'cc_provider4')
# invoke_balance_transfer_new_user(token, channelName, "monetary", orgLower, "controller", "600")
# invoke_balance_transfer_new_user(token, channelName, "monetary", orgLower, "admin", "600")
# invoke_balance_transfer_new_user(token, channelName, "monetary", orgLower, "sara_test", "600")
# get_installed_chaincodes(token, orgLower)
# get_instantiated_chaincodes(token, orgLower) | 0.298491 | 0.120724 |
import os
import torch
import numpy as np
import cv2
from torch.utils import data
from ptsemseg.utils import recursive_glob
from ptsemseg.augmentations import *
class larynxLoader(data.Dataset):
def __init__(self, root, split="train", is_transform=False, img_size=(480, 640), augmentations=None, img_norm=True):
"""__init__
:param root:
:param split:
:param is_transform:
:param img_size:
:param augmentations
"""
self.root = "/home/felix/projects/larynx/data/"
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.img_norm = img_norm
self.n_classes = 3
self.img_size = img_size if isinstance(img_size, (tuple, list)) else (img_size, img_size)
self.mean = np.array([103.939, 116.779, 123.68])
self.ignore_index = 250
self.files = {}
self.void_classes = []
self.valid_classes = [0, 1, 2]
self.class_map = dict(zip(self.valid_classes, range(self.n_classes)))
self.colors = [
[ 0, 0, 0],
[255, 0, 0],
[ 0, 0, 255],
]
self.label_colours = dict(zip(range(self.n_classes), self.colors))
self.class_names = ["background", "granuloma", "ulcerations"]
self.images_base = os.path.join(self.root, self.split, "images")
self.annotations_base = os.path.join(self.root, self.split, "annotations")
self.files[split] = recursive_glob(rootdir=self.images_base, suffix=".png")
if not self.files[split]:
raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))
print("Found %d %s images" % (len(self.files[split]), split))
def __len__(self):
"""__len__"""
return len(self.files[self.split])
def __getitem__(self, index):
"""__getitem__
:param index:
"""
img_path = self.files[self.split][index].rstrip()
lbl_path = os.path.join(self.annotations_base, os.path.basename(img_path))
img = cv2.imread(img_path)
lbl = cv2.imread(lbl_path, 0)
lbl = self.encode_segmap(lbl)
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl
def transform(self, img, lbl):
"""transform
:param img:
:param lbl:
"""
img = cv2.resize(img, (self.img_size[1], self.img_size[0]), interpolation=cv2.INTER_AREA)
img = img.astype(np.float64)
img -= self.mean
if self.img_norm: img = img / 255.0
img = img.transpose(2, 0, 1) # NHWC -> NCHW
classes = np.unique(lbl)
lbl = cv2.resize(lbl, (self.img_size[1], self.img_size[0]), interpolation=cv2.INTER_NEAREST)
if not np.all(classes == np.unique(lbl)):
print("WARN: resizing labels yielded fewer classes")
if not np.all(np.unique(lbl[lbl != self.ignore_index]) < self.n_classes):
print("after det", classes, np.unique(lbl))
raise ValueError("Segmentation map contained invalid class values")
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
def encode_segmap(self, mask):
for _voidc in self.void_classes:
mask[mask == _voidc] = self.ignore_index
for _validc in self.valid_classes:
mask[mask == _validc] = self.class_map[_validc]
return mask
def decode_segmap(self, temp):
dest = np.zeros((temp.shape[0], temp.shape[1], 3))
for l in range(0, self.n_classes):
dest[temp == l] = self.label_colours[l]
return dest
def decode_image(self, img):
img = img.transpose(1, 2, 0) # NHWC -> NCHW
img = img.astype(np.float64)
if self.img_norm: img = img * 255.0
img += self.mean
img = img.astype(np.uint8)
return img | ptsemseg/loader/larynx_loader.py | import os
import torch
import numpy as np
import cv2
from torch.utils import data
from ptsemseg.utils import recursive_glob
from ptsemseg.augmentations import *
class larynxLoader(data.Dataset):
def __init__(self, root, split="train", is_transform=False, img_size=(480, 640), augmentations=None, img_norm=True):
"""__init__
:param root:
:param split:
:param is_transform:
:param img_size:
:param augmentations
"""
self.root = "/home/felix/projects/larynx/data/"
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.img_norm = img_norm
self.n_classes = 3
self.img_size = img_size if isinstance(img_size, (tuple, list)) else (img_size, img_size)
self.mean = np.array([103.939, 116.779, 123.68])
self.ignore_index = 250
self.files = {}
self.void_classes = []
self.valid_classes = [0, 1, 2]
self.class_map = dict(zip(self.valid_classes, range(self.n_classes)))
self.colors = [
[ 0, 0, 0],
[255, 0, 0],
[ 0, 0, 255],
]
self.label_colours = dict(zip(range(self.n_classes), self.colors))
self.class_names = ["background", "granuloma", "ulcerations"]
self.images_base = os.path.join(self.root, self.split, "images")
self.annotations_base = os.path.join(self.root, self.split, "annotations")
self.files[split] = recursive_glob(rootdir=self.images_base, suffix=".png")
if not self.files[split]:
raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))
print("Found %d %s images" % (len(self.files[split]), split))
def __len__(self):
"""__len__"""
return len(self.files[self.split])
def __getitem__(self, index):
"""__getitem__
:param index:
"""
img_path = self.files[self.split][index].rstrip()
lbl_path = os.path.join(self.annotations_base, os.path.basename(img_path))
img = cv2.imread(img_path)
lbl = cv2.imread(lbl_path, 0)
lbl = self.encode_segmap(lbl)
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl
def transform(self, img, lbl):
"""transform
:param img:
:param lbl:
"""
img = cv2.resize(img, (self.img_size[1], self.img_size[0]), interpolation=cv2.INTER_AREA)
img = img.astype(np.float64)
img -= self.mean
if self.img_norm: img = img / 255.0
img = img.transpose(2, 0, 1) # NHWC -> NCHW
classes = np.unique(lbl)
lbl = cv2.resize(lbl, (self.img_size[1], self.img_size[0]), interpolation=cv2.INTER_NEAREST)
if not np.all(classes == np.unique(lbl)):
print("WARN: resizing labels yielded fewer classes")
if not np.all(np.unique(lbl[lbl != self.ignore_index]) < self.n_classes):
print("after det", classes, np.unique(lbl))
raise ValueError("Segmentation map contained invalid class values")
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
def encode_segmap(self, mask):
for _voidc in self.void_classes:
mask[mask == _voidc] = self.ignore_index
for _validc in self.valid_classes:
mask[mask == _validc] = self.class_map[_validc]
return mask
def decode_segmap(self, temp):
dest = np.zeros((temp.shape[0], temp.shape[1], 3))
for l in range(0, self.n_classes):
dest[temp == l] = self.label_colours[l]
return dest
def decode_image(self, img):
img = img.transpose(1, 2, 0) # NHWC -> NCHW
img = img.astype(np.float64)
if self.img_norm: img = img * 255.0
img += self.mean
img = img.astype(np.uint8)
return img | 0.591959 | 0.236252 |
import argparse
import os
import time
import random
import tqdm
import numpy as np
import imageio
from perlin_noise import fractal2d
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--num-images", "-n", type=int, default=1)
parser.add_argument("--out-tmpl", "-o", default=None)
parser.add_argument("--width", "-w", type=int, default=320)
parser.add_argument("--height", "-h", type=int, default=240)
parser.add_argument("--hperiod", "-l", type=float, default=100)
parser.add_argument("--vperiod", "-m", type=float, default=None)
parser.add_argument("--octaves", "-k", type=int, default=1)
parser.add_argument("--persistence", "-p", type=float, default=0.5)
parser.add_argument("--seed", "-s", type=int, default=None)
parser.add_argument("--verbose", "-v", action="count", default=0)
parser.add_argument("--help", "-?", action="help")
return parser.parse_args()
def main(args: argparse.Namespace) -> None:
if args.vperiod is None:
args.vperiod = args.hperiod
if args.seed is None:
args.seed = random.randint(1000, 9999)
rng = np.random.default_rng(args.seed)
if args.out_tmpl is None:
args.out_tmpl = f"out/{args.seed}/%.png"
dirname = os.path.dirname(args.out_tmpl)
if '%' in dirname:
raise ValueError("Cannot use index substitution in output dirname")
os.makedirs(dirname, exist_ok=True)
gen_times = []
for i in tqdm.trange(args.num_images, disable=(args.verbose < 1 or args.verbose > 2)):
start = time.monotonic()
img = fractal2d(args.width, args.height,
args.hperiod, args.vperiod,
args.octaves, args.persistence,
rng)
gen_times.append(1000 * (time.monotonic() - start))
img = (127 * (img + 1)).astype(np.uint8)
out_path = args.out_tmpl.replace('%', f"{i:02d}")
imageio.imwrite(out_path, img)
if args.verbose > 2:
print(f"{out_path} written (gen. {gen_times[-1]:.1f}ms).")
if args.verbose > 1:
print(f"Compile + first gen. time: {gen_times[0]:.1f}ms")
avg = np.mean(gen_times[1:])
print(f"Average gen. time: {avg:.1f}ms")
if __name__ == "__main__":
main(parse_args()) | scripts/generate_images.py | import argparse
import os
import time
import random
import tqdm
import numpy as np
import imageio
from perlin_noise import fractal2d
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--num-images", "-n", type=int, default=1)
parser.add_argument("--out-tmpl", "-o", default=None)
parser.add_argument("--width", "-w", type=int, default=320)
parser.add_argument("--height", "-h", type=int, default=240)
parser.add_argument("--hperiod", "-l", type=float, default=100)
parser.add_argument("--vperiod", "-m", type=float, default=None)
parser.add_argument("--octaves", "-k", type=int, default=1)
parser.add_argument("--persistence", "-p", type=float, default=0.5)
parser.add_argument("--seed", "-s", type=int, default=None)
parser.add_argument("--verbose", "-v", action="count", default=0)
parser.add_argument("--help", "-?", action="help")
return parser.parse_args()
def main(args: argparse.Namespace) -> None:
if args.vperiod is None:
args.vperiod = args.hperiod
if args.seed is None:
args.seed = random.randint(1000, 9999)
rng = np.random.default_rng(args.seed)
if args.out_tmpl is None:
args.out_tmpl = f"out/{args.seed}/%.png"
dirname = os.path.dirname(args.out_tmpl)
if '%' in dirname:
raise ValueError("Cannot use index substitution in output dirname")
os.makedirs(dirname, exist_ok=True)
gen_times = []
for i in tqdm.trange(args.num_images, disable=(args.verbose < 1 or args.verbose > 2)):
start = time.monotonic()
img = fractal2d(args.width, args.height,
args.hperiod, args.vperiod,
args.octaves, args.persistence,
rng)
gen_times.append(1000 * (time.monotonic() - start))
img = (127 * (img + 1)).astype(np.uint8)
out_path = args.out_tmpl.replace('%', f"{i:02d}")
imageio.imwrite(out_path, img)
if args.verbose > 2:
print(f"{out_path} written (gen. {gen_times[-1]:.1f}ms).")
if args.verbose > 1:
print(f"Compile + first gen. time: {gen_times[0]:.1f}ms")
avg = np.mean(gen_times[1:])
print(f"Average gen. time: {avg:.1f}ms")
if __name__ == "__main__":
main(parse_args()) | 0.337094 | 0.0713 |
__author__ = 'ChenyangGao <https://chenyanggao.github.io/>'
__version__ = (0, 0, 1)
__all__ = ['make_highlighter', 'render']
try:
plugin.ensure_import('pygments', 'Pygments') # type: ignore
except:
pass
from html import escape
from typing import Callable, Optional, Union
from pygments import highlight # type: ignore
from pygments.formatter import Formatter # type: ignore
from pygments.formatters import HtmlFormatter # type: ignore
from pygments.lexer import Lexer # type: ignore
from pygments.lexers import ( # type: ignore
get_lexer_by_name, get_lexer_for_filename, guess_lexer,
TextLexer,
)
def make_highlighter(
formatter: Formatter = HtmlFormatter(),
) -> Callable[[str, Union[str, Lexer, Callable[..., Lexer]]], str]:
'''高阶函数,创建一个做代码高亮的函数 | Powered by [Pygments](https://pygments.org/)
NOTE: 你可以创建自定义的格式化器 formatter,并且给予自定义的 style
可参考:
- [Available formatters](https://pygments.org/docs/formatters/)
- [Styles](https://pygments.org/docs/styles/)
NOTE: 你可以创建自定义的词法分析器 lexer,并且给予自定义的 filter,
如果你想让自定义的词法分析器可以被诸如
· pygments.lexers.get_lexer_by_name
· pygments.lexers.get_lexer_for_filename
· pygments.lexers.get_lexer_for_mimetype
等函数获取到,可以把一些相关的信息添加到 pygments.lexers.LEXERS
可参考:
- [Available lexers](https://pygments.org/docs/lexers/)
- [Filters](https://pygments.org/docs/filters/)
:param formatter: 某个 pygments 格式化器
:return: 代码高亮函数
'''
def highlighter(
code: str,
lexer: Union[str, Lexer, Callable[..., Lexer]] = TextLexer(),
) -> str:
'''代码高亮 | Powered by [Pygments](https://pygments.org/)
NOTE: 这个函数还有几个属性:
· highlighter.formatter: 关联的 pygments 格式化器
· highlighter.highlight: 就是这个函数的引用,即 highlighter is highlighter.highlight
· highlighter.style: 样式表,如果没有则为 ''
:param code: 代码文本
:param lexer: 某个 pygments 词法分析器
· 如果为 pygments.lexer.Lexer 实例,则直接使用之
· 如果为 '',则根据代码文本猜测一个 lexer
· 如果为非空字符串,则被认为是编程语言名或文件名,而尝试获取 lexer
· 否则,应该是一个返回 pygments.lexer.Lexer 实例的类或工厂函数,直接调用之
:return: 高亮处理后的代码文本
'''
if not isinstance(lexer, Lexer):
if isinstance(lexer, str):
if lexer == '':
lexer = guess_lexer(code)
else:
try:
lexer = get_lexer_by_name(lexer)
except:
lexer = get_lexer_for_filename(lexer)
else:
lexer = lexer()
return highlight(code, lexer, formatter)
highlighter.formatter = formatter # type: ignore
highlighter.highlight = highlighter # type: ignore
try:
highlighter.style = formatter.get_style_defs() # type: ignore
except NotImplementedError:
highlighter.style = '' # type: ignore
return highlighter
def render(
code: str,
lang: Optional[str] = '',
formatter: Formatter = HtmlFormatter(),
) -> str:
'''代码高亮 | Powered by [Pygments](https://pygments.org/)
NOTE: 你可以创建自定义的格式化器 formatter,并且给予自定义的 style
可参考:
- [Available formatters](https://pygments.org/docs/formatters/)
- [Styles](https://pygments.org/docs/styles/)
NOTE: 你可以创建自定义的词法分析器 lexer,并且给予自定义的 filter,
如果你想让自定义的词法分析器可以被诸如
· pygments.lexers.get_lexer_by_name
· pygments.lexers.get_lexer_for_filename
· pygments.lexers.get_lexer_for_mimetype
等函数获取到,可以把一些相关的信息添加到 pygments.lexers.LEXERS
可参考:
- [Available lexers](https://pygments.org/docs/lexers/)
- [Filters](https://pygments.org/docs/filters/)
:param code: 代码文本
:param lang: 编程语言
· 如果为 None,则不进行高亮
· 如果为 '',则根据代码文本猜测一个 lexer
· 否则,会用相应编程语言的 lexer
:param formatter: 某个 pygments 格式化器
:return: 高亮处理后的代码文本,html 格式
'''
assert isinstance(formatter, HtmlFormatter), \
'formatter 必须是 pygments.formatters.html.HtmlFormatter 实例'
if lang is None:
return '<pre><code>%s</code></pre>' % escape(code)
elif lang == '':
lexer = guess_lexer(code)
else:
lexer = get_lexer_by_name(lang)
return highlight(code, lexer, formatter) | script/startup/highlight.py |
__author__ = 'ChenyangGao <https://chenyanggao.github.io/>'
__version__ = (0, 0, 1)
__all__ = ['make_highlighter', 'render']
try:
plugin.ensure_import('pygments', 'Pygments') # type: ignore
except:
pass
from html import escape
from typing import Callable, Optional, Union
from pygments import highlight # type: ignore
from pygments.formatter import Formatter # type: ignore
from pygments.formatters import HtmlFormatter # type: ignore
from pygments.lexer import Lexer # type: ignore
from pygments.lexers import ( # type: ignore
get_lexer_by_name, get_lexer_for_filename, guess_lexer,
TextLexer,
)
def make_highlighter(
formatter: Formatter = HtmlFormatter(),
) -> Callable[[str, Union[str, Lexer, Callable[..., Lexer]]], str]:
'''高阶函数,创建一个做代码高亮的函数 | Powered by [Pygments](https://pygments.org/)
NOTE: 你可以创建自定义的格式化器 formatter,并且给予自定义的 style
可参考:
- [Available formatters](https://pygments.org/docs/formatters/)
- [Styles](https://pygments.org/docs/styles/)
NOTE: 你可以创建自定义的词法分析器 lexer,并且给予自定义的 filter,
如果你想让自定义的词法分析器可以被诸如
· pygments.lexers.get_lexer_by_name
· pygments.lexers.get_lexer_for_filename
· pygments.lexers.get_lexer_for_mimetype
等函数获取到,可以把一些相关的信息添加到 pygments.lexers.LEXERS
可参考:
- [Available lexers](https://pygments.org/docs/lexers/)
- [Filters](https://pygments.org/docs/filters/)
:param formatter: 某个 pygments 格式化器
:return: 代码高亮函数
'''
def highlighter(
code: str,
lexer: Union[str, Lexer, Callable[..., Lexer]] = TextLexer(),
) -> str:
'''代码高亮 | Powered by [Pygments](https://pygments.org/)
NOTE: 这个函数还有几个属性:
· highlighter.formatter: 关联的 pygments 格式化器
· highlighter.highlight: 就是这个函数的引用,即 highlighter is highlighter.highlight
· highlighter.style: 样式表,如果没有则为 ''
:param code: 代码文本
:param lexer: 某个 pygments 词法分析器
· 如果为 pygments.lexer.Lexer 实例,则直接使用之
· 如果为 '',则根据代码文本猜测一个 lexer
· 如果为非空字符串,则被认为是编程语言名或文件名,而尝试获取 lexer
· 否则,应该是一个返回 pygments.lexer.Lexer 实例的类或工厂函数,直接调用之
:return: 高亮处理后的代码文本
'''
if not isinstance(lexer, Lexer):
if isinstance(lexer, str):
if lexer == '':
lexer = guess_lexer(code)
else:
try:
lexer = get_lexer_by_name(lexer)
except:
lexer = get_lexer_for_filename(lexer)
else:
lexer = lexer()
return highlight(code, lexer, formatter)
highlighter.formatter = formatter # type: ignore
highlighter.highlight = highlighter # type: ignore
try:
highlighter.style = formatter.get_style_defs() # type: ignore
except NotImplementedError:
highlighter.style = '' # type: ignore
return highlighter
def render(
code: str,
lang: Optional[str] = '',
formatter: Formatter = HtmlFormatter(),
) -> str:
'''代码高亮 | Powered by [Pygments](https://pygments.org/)
NOTE: 你可以创建自定义的格式化器 formatter,并且给予自定义的 style
可参考:
- [Available formatters](https://pygments.org/docs/formatters/)
- [Styles](https://pygments.org/docs/styles/)
NOTE: 你可以创建自定义的词法分析器 lexer,并且给予自定义的 filter,
如果你想让自定义的词法分析器可以被诸如
· pygments.lexers.get_lexer_by_name
· pygments.lexers.get_lexer_for_filename
· pygments.lexers.get_lexer_for_mimetype
等函数获取到,可以把一些相关的信息添加到 pygments.lexers.LEXERS
可参考:
- [Available lexers](https://pygments.org/docs/lexers/)
- [Filters](https://pygments.org/docs/filters/)
:param code: 代码文本
:param lang: 编程语言
· 如果为 None,则不进行高亮
· 如果为 '',则根据代码文本猜测一个 lexer
· 否则,会用相应编程语言的 lexer
:param formatter: 某个 pygments 格式化器
:return: 高亮处理后的代码文本,html 格式
'''
assert isinstance(formatter, HtmlFormatter), \
'formatter 必须是 pygments.formatters.html.HtmlFormatter 实例'
if lang is None:
return '<pre><code>%s</code></pre>' % escape(code)
elif lang == '':
lexer = guess_lexer(code)
else:
lexer = get_lexer_by_name(lang)
return highlight(code, lexer, formatter) | 0.606848 | 0.367299 |
import argparse
import collections
import torch
import torch.nn as nn
import torchvision
from torchvision import datasets, transforms
import numpy as np
import CNNScan.Settings
import CNNScan.utils as utils
import CNNScan.Mark.gan
"""
This class acts as the encodee stage of an auto-encoder.
It can be configured with a number fully-connected layers and convolutional layers.
It may help debug the generator portion of a GAN, since the generator is basically a decoder.
"""
class Encoder(nn.Module):
def __init__(self, config, input_size, output_size):
super(Encoder, self).__init__()
self.config = config
self.input_size = input_size
self.output_size = config['gen_seed_image']
# Create a convolutional network from the settings file definition
conv_layers = config['enc_conv_layers']
nlo_name = config['nlo']
tup = CNNScan.Settings.create_conv_layers(conv_layers, self.input_size[1:], self.input_size[0], nlo_name, config['dropout'], True)
conv_list, cnn_output_size, _, _, _ = tup
# Group all the convolutional layers into a single callable object.
self.conv_layers = nn.Sequential(collections.OrderedDict(conv_list))
last_size = cnn_output_size
layer_list =[]
# Create FC layers based on configuration.
# Append layer to layer list that is the correct "size" for the network output.
for index, layer_size in enumerate(config['enc_full_layers']+[output_size]):
layer_list.append((f"fc{index}", nn.Linear(last_size, layer_size)))
layer_list.append((f"{config['nlo']}{index}", CNNScan.Settings.get_nonlinear(config['nlo'])))
layer_list.append((f"dropout{index}", nn.Dropout(config['dropout'])))
last_size = layer_size
self.linear = nn.Sequential(collections.OrderedDict(layer_list))
def forward(self, images):
output = images.view(len(images), self.input_size[0], self.input_size[1], self.input_size[2])
output = self.conv_layers(images)
output = output.view(len(images), -1)
output = self.linear(output)
return output
"""
This class combines an encoder with a GAN generator to form a full auto-encoder.
The goal of the network is to produce outputs that are identical to the inputs.
"""
class AutoEncoder(nn.Module):
def __init__(self, config):
super(AutoEncoder, self).__init__()
self.config = config
self.encoder = Encoder(config, config['im_size'], config['gen_seed_len'])
self.decoder = CNNScan.Mark.gan.MarkGenerator(config, config['gen_seed_len'])
def forward(self, images):
output = self.encoder(images)
output = self.decoder(output)
return output
# Given a data loader and a model, iterate over the the loader for a single epoch.
def iterate_loader_once(config, model, loader, criterion, do_train=False, optimizer=None):
batch_loss, batch_count = 0, 0
for images, _ in loader:
images = utils.cuda(images, config)
# Feed all data through auto encoder.
out_images = model(images)
loss = criterion(out_images, images)
# If in training mode, must update the model to reduce loss.
if do_train:
loss.backward()
optimizer.step()
batch_count += len(images)
batch_loss += loss.data.item()
# Free allocated memory to prevent crashes.
del loss
del out_images
del images
torch.cuda.empty_cache()
return (batch_count, batch_loss)
# Given training and testing data, train the model for all epochs and report statistics.
def train_autoencoder(config, model, train_loader, test_loader):
model = utils.cuda(model, config)
# Choose a criterion.
criterion = CNNScan.Settings.get_criterion(config)
# Choose an optimizer.
optimizer = CNNScan.Settings.get_optimizer(config, model)
for epoch in range(config['epochs']):
# Perform training
model.train()
optimizer.zero_grad()
count, loss = iterate_loader_once(config, model, train_loader, criterion, True, optimizer)
print(f"Saw {count} images in epoch {epoch} with loss of {loss}.")
# Perform evaluation.
with torch.no_grad():
pass
return model | CNNScan/Mark/encoder.py | import argparse
import collections
import torch
import torch.nn as nn
import torchvision
from torchvision import datasets, transforms
import numpy as np
import CNNScan.Settings
import CNNScan.utils as utils
import CNNScan.Mark.gan
"""
This class acts as the encodee stage of an auto-encoder.
It can be configured with a number fully-connected layers and convolutional layers.
It may help debug the generator portion of a GAN, since the generator is basically a decoder.
"""
class Encoder(nn.Module):
def __init__(self, config, input_size, output_size):
super(Encoder, self).__init__()
self.config = config
self.input_size = input_size
self.output_size = config['gen_seed_image']
# Create a convolutional network from the settings file definition
conv_layers = config['enc_conv_layers']
nlo_name = config['nlo']
tup = CNNScan.Settings.create_conv_layers(conv_layers, self.input_size[1:], self.input_size[0], nlo_name, config['dropout'], True)
conv_list, cnn_output_size, _, _, _ = tup
# Group all the convolutional layers into a single callable object.
self.conv_layers = nn.Sequential(collections.OrderedDict(conv_list))
last_size = cnn_output_size
layer_list =[]
# Create FC layers based on configuration.
# Append layer to layer list that is the correct "size" for the network output.
for index, layer_size in enumerate(config['enc_full_layers']+[output_size]):
layer_list.append((f"fc{index}", nn.Linear(last_size, layer_size)))
layer_list.append((f"{config['nlo']}{index}", CNNScan.Settings.get_nonlinear(config['nlo'])))
layer_list.append((f"dropout{index}", nn.Dropout(config['dropout'])))
last_size = layer_size
self.linear = nn.Sequential(collections.OrderedDict(layer_list))
def forward(self, images):
output = images.view(len(images), self.input_size[0], self.input_size[1], self.input_size[2])
output = self.conv_layers(images)
output = output.view(len(images), -1)
output = self.linear(output)
return output
"""
This class combines an encoder with a GAN generator to form a full auto-encoder.
The goal of the network is to produce outputs that are identical to the inputs.
"""
class AutoEncoder(nn.Module):
def __init__(self, config):
super(AutoEncoder, self).__init__()
self.config = config
self.encoder = Encoder(config, config['im_size'], config['gen_seed_len'])
self.decoder = CNNScan.Mark.gan.MarkGenerator(config, config['gen_seed_len'])
def forward(self, images):
output = self.encoder(images)
output = self.decoder(output)
return output
# Given a data loader and a model, iterate over the the loader for a single epoch.
def iterate_loader_once(config, model, loader, criterion, do_train=False, optimizer=None):
batch_loss, batch_count = 0, 0
for images, _ in loader:
images = utils.cuda(images, config)
# Feed all data through auto encoder.
out_images = model(images)
loss = criterion(out_images, images)
# If in training mode, must update the model to reduce loss.
if do_train:
loss.backward()
optimizer.step()
batch_count += len(images)
batch_loss += loss.data.item()
# Free allocated memory to prevent crashes.
del loss
del out_images
del images
torch.cuda.empty_cache()
return (batch_count, batch_loss)
# Given training and testing data, train the model for all epochs and report statistics.
def train_autoencoder(config, model, train_loader, test_loader):
model = utils.cuda(model, config)
# Choose a criterion.
criterion = CNNScan.Settings.get_criterion(config)
# Choose an optimizer.
optimizer = CNNScan.Settings.get_optimizer(config, model)
for epoch in range(config['epochs']):
# Perform training
model.train()
optimizer.zero_grad()
count, loss = iterate_loader_once(config, model, train_loader, criterion, True, optimizer)
print(f"Saw {count} images in epoch {epoch} with loss of {loss}.")
# Perform evaluation.
with torch.no_grad():
pass
return model | 0.930648 | 0.517998 |
from __future__ import unicode_literals
from django.test import TestCase
from api.models import Event, Attempt
from django.contrib.auth.models import User
from api import serializers
import datetime
"""
Tests all parameters and fields of AttemptSerializer
"""
def create_event():
return Event.objects.create(
organiser="user1",
event_name="test",
location="test",
start_time='2001-01-29T12:00:00+00:00',
finish_time='2050-01-29T12:30:00+00:00',
sign_in_time='2001-01-29T12:00:00+00:00',
attendees=['user2', 'user3', 'user4']
)
def create_users():
return User.objects.create_user("user1", "<EMAIL>", "<PASSWORD>"), \
User.objects.create_user("user2", "<EMAIL>", "<PASSWORD>"), \
User.objects.create_user("user3", "<EMAIL>", "<PASSWORD>"), \
User.objects.create_user("user4", "<EMAIL>", "<PASSWORD>")
class AttemptSerializerTestCase(TestCase):
# Does not test the verify scan. Tests for that are down below
# Check if user set created
def setUp(self):
(self.user1, self.user2, self.user3, self.user4) = create_users()
self.event_serializer_data = {
'organiser': self.user1.username,
'event_name': 'test',
'location': 'test',
'start_time': '2050-01-29T12:00:00',
'finish_time': '2050-01-29T12:30:00',
'sign_in_time': '2050-01-29T12:00:00',
'attendees': [self.user2.username, self.user4.username, self.user4.username]
}
self.event = create_event()
self.attempt_serializer_data = {
'username': self.user2.username,
'event_id': self.event.id,
'time_on_screen': '12:30:00',
'date_on_screen': '2050-01-29'
}
def test_serializer_valid(self):
serializer = serializers.AttemptSerializer(data=self.attempt_serializer_data)
self.assertTrue(serializer.is_valid())
def test_serializer_null_username(self):
new_serializer_data = self.attempt_serializer_data
new_serializer_data['username'] = None
serializer = serializers.AttemptSerializer(data=new_serializer_data)
self.assertFalse(serializer.is_valid())
self.assertEquals(serializer.errors.keys(), set(['username']))
def test_serializer_null_event_id(self):
new_serializer_data = self.attempt_serializer_data
new_serializer_data['event_id'] = None
serializer = serializers.AttemptSerializer(data=new_serializer_data)
self.assertFalse(serializer.is_valid())
self.assertEquals(serializer.errors.keys(), set(['event_id']))
def test_serializer_null_time_on_screen(self):
new_serializer_data = self.attempt_serializer_data
new_serializer_data['time_on_screen'] = None
serializer = serializers.AttemptSerializer(data=new_serializer_data)
self.assertFalse(serializer.is_valid())
self.assertEquals(serializer.errors.keys(), set(['time_on_screen']))
def test_serializer_null_date_on_screen(self):
new_serializer_data = self.attempt_serializer_data
new_serializer_data['date_on_screen'] = None
serializer = serializers.AttemptSerializer(data=new_serializer_data)
self.assertFalse(serializer.is_valid())
self.assertEquals(serializer.errors.keys(), set(['date_on_screen']))
def test_serializer_multiple_fields_null(self):
new_serializer_data = self.attempt_serializer_data
new_serializer_data['date_on_screen'] = None
new_serializer_data['username'] = None
serializer = serializers.AttemptSerializer(data=new_serializer_data)
self.assertFalse(serializer.is_valid())
self.assertEquals(serializer.errors.keys(), set(['date_on_screen', 'username']))
def test_serializer_created_auto_set_in_serializer(self):
new_serializer_data = self.attempt_serializer_data
new_created = datetime.datetime.now()
serializer = serializers.AttemptSerializer(data=new_serializer_data)
self.assertTrue(serializer.is_valid())
self.assertNotEquals(new_created, serializer.validated_data.get('created')) | fyp/api/tests/serializers/test_attempt_serializer.py | from __future__ import unicode_literals
from django.test import TestCase
from api.models import Event, Attempt
from django.contrib.auth.models import User
from api import serializers
import datetime
"""
Tests all parameters and fields of AttemptSerializer
"""
def create_event():
return Event.objects.create(
organiser="user1",
event_name="test",
location="test",
start_time='2001-01-29T12:00:00+00:00',
finish_time='2050-01-29T12:30:00+00:00',
sign_in_time='2001-01-29T12:00:00+00:00',
attendees=['user2', 'user3', 'user4']
)
def create_users():
return User.objects.create_user("user1", "<EMAIL>", "<PASSWORD>"), \
User.objects.create_user("user2", "<EMAIL>", "<PASSWORD>"), \
User.objects.create_user("user3", "<EMAIL>", "<PASSWORD>"), \
User.objects.create_user("user4", "<EMAIL>", "<PASSWORD>")
class AttemptSerializerTestCase(TestCase):
# Does not test the verify scan. Tests for that are down below
# Check if user set created
def setUp(self):
(self.user1, self.user2, self.user3, self.user4) = create_users()
self.event_serializer_data = {
'organiser': self.user1.username,
'event_name': 'test',
'location': 'test',
'start_time': '2050-01-29T12:00:00',
'finish_time': '2050-01-29T12:30:00',
'sign_in_time': '2050-01-29T12:00:00',
'attendees': [self.user2.username, self.user4.username, self.user4.username]
}
self.event = create_event()
self.attempt_serializer_data = {
'username': self.user2.username,
'event_id': self.event.id,
'time_on_screen': '12:30:00',
'date_on_screen': '2050-01-29'
}
def test_serializer_valid(self):
serializer = serializers.AttemptSerializer(data=self.attempt_serializer_data)
self.assertTrue(serializer.is_valid())
def test_serializer_null_username(self):
new_serializer_data = self.attempt_serializer_data
new_serializer_data['username'] = None
serializer = serializers.AttemptSerializer(data=new_serializer_data)
self.assertFalse(serializer.is_valid())
self.assertEquals(serializer.errors.keys(), set(['username']))
def test_serializer_null_event_id(self):
new_serializer_data = self.attempt_serializer_data
new_serializer_data['event_id'] = None
serializer = serializers.AttemptSerializer(data=new_serializer_data)
self.assertFalse(serializer.is_valid())
self.assertEquals(serializer.errors.keys(), set(['event_id']))
def test_serializer_null_time_on_screen(self):
new_serializer_data = self.attempt_serializer_data
new_serializer_data['time_on_screen'] = None
serializer = serializers.AttemptSerializer(data=new_serializer_data)
self.assertFalse(serializer.is_valid())
self.assertEquals(serializer.errors.keys(), set(['time_on_screen']))
def test_serializer_null_date_on_screen(self):
new_serializer_data = self.attempt_serializer_data
new_serializer_data['date_on_screen'] = None
serializer = serializers.AttemptSerializer(data=new_serializer_data)
self.assertFalse(serializer.is_valid())
self.assertEquals(serializer.errors.keys(), set(['date_on_screen']))
def test_serializer_multiple_fields_null(self):
new_serializer_data = self.attempt_serializer_data
new_serializer_data['date_on_screen'] = None
new_serializer_data['username'] = None
serializer = serializers.AttemptSerializer(data=new_serializer_data)
self.assertFalse(serializer.is_valid())
self.assertEquals(serializer.errors.keys(), set(['date_on_screen', 'username']))
def test_serializer_created_auto_set_in_serializer(self):
new_serializer_data = self.attempt_serializer_data
new_created = datetime.datetime.now()
serializer = serializers.AttemptSerializer(data=new_serializer_data)
self.assertTrue(serializer.is_valid())
self.assertNotEquals(new_created, serializer.validated_data.get('created')) | 0.736969 | 0.327695 |
import configparser
import gettext
import itertools
import pathlib
import subprocess
import threading
import tkinter as tk
import tkinter.messagebox
import tkinter.ttk
import firefox_helper
import nicofox2bookmarks
__title__ = 'NicoFox to Firefox Bookmarks'
__version__ = '0.1.0'
# Private constants.
_CONFIG_FILENAME = 'configs.ini'
_LOCALE_DIRNAME = 'locale'
_TRANSLATION_DOMAIN = 'nicofox2bookmarks_gui'
_PADX = 4 # Default X padding between widgets.
_PADY = 2 # Default Y padding between widgets.
_STARTUP_MIN_WIDTH = 480
def _load_configs(filename=_CONFIG_FILENAME):
configs = configparser.ConfigParser()
configs.read(filename)
return configs
def _setup_i18n(configs):
prefered_languages = configs.get('General', 'PreferredLanguages', fallback='')
if prefered_languages:
prefered_languages = [lang.strip() for lang in prefered_languages.split(',') if lang.strip()]
else:
prefered_languages = ['zh_TW']
locale_dir = pathlib.Path(_LOCALE_DIRNAME).absolute()
translation = gettext.translation(
_TRANSLATION_DOMAIN,
localedir=locale_dir,
languages=prefered_languages,
fallback=True)
translation.install()
def _open_in_explorer(path):
subprocess.Popen([r'explorer.exe', r'/select,', path])
def _make_open_in_explorer(get_path):
def _do_open_in_explorer():
path = get_path().strip().strip('"')
if path:
if pathlib.Path(path).exists():
_open_in_explorer(path)
else:
tk.messagebox.showinfo(__title__, _('Target path does not exist.'))
return _do_open_in_explorer
def _get_widget_geometry(widget):
geometry = widget.geometry()
size, x, y = geometry.split('+')
width, height = size.split('x')
return int(width), int(height), int(x), int(y)
def _set_widget_geometry(widget, width, height, x, y):
new_geometry = '{}x{}+{}+{}'.format(width, height, x, y)
widget.geometry(newGeometry=new_geometry)
def _create_section_title_label(parent, text):
label = tk.ttk.Label(parent, text=text, anchor=tk.CENTER)
label.config(background='black')
label.config(foreground='white')
return label
def _pad_widget_children_grid(widget, padx=_PADX, pady=_PADY):
for child in widget.winfo_children():
child.grid_configure(padx=padx, pady=pady)
def _pad_widget_children_pack(widget, padx=_PADX, pady=_PADY):
for child in widget.winfo_children():
child.pack_configure(padx=padx, pady=pady)
def _porting_task(param, on_exit):
try:
nicofox_path = param['nicofox_path']
bookmark_path = param['bookmark_path']
output_path = param['output_path']
metadata = param['metadata']
bookmarks = nicofox2bookmarks.import_nicofox_db(str(nicofox_path))
if bookmarks:
nicofox2bookmarks.export_bookmarks_to_json(str(output_path), str(bookmark_path), bookmarks, metadata)
tk.messagebox.showinfo(__title__, _('Successful! {} bookmark(s) are ported.').format(len(bookmarks)))
else:
tk.messagebox.showinfo(__title__, _('No data to port.'))
except Exception as ex:
tk.messagebox.showerror(__title__, _('Exception occurred during porting data:\n{}').format(ex))
finally:
on_exit()
class TaskDialog:
"""Show the task status visually and start the worker thread."""
def __init__(self, parent, task_param):
# Setup GUI.
self._top = tk.Toplevel(parent)
self._top.resizable(width=True, height=False)
self._top.protocol('WM_DELETE_WINDOW', self.on_user_close)
self._label = tk.ttk.Label(self._top, text=_('Porting data, please wait.'), anchor=tk.CENTER)
self._label.pack(fill=tk.BOTH)
self._progress_bar = tk.ttk.Progressbar(self._top, orient=tk.HORIZONTAL, mode='indeterminate')
self._progress_bar.start()
self._progress_bar.pack(fill=tk.BOTH)
_pad_widget_children_pack(self._top)
# Move this window to the center of parent.
parent_width, parent_height, parent_x, parent_y = _get_widget_geometry(parent)
self._top.update_idletasks()
my_width, my_height, my_x, my_y = _get_widget_geometry(self._top)
my_x = int(parent_x + (parent_width - my_width) / 2)
my_y = int(parent_y + (parent_height - my_height) / 2)
_set_widget_geometry(self._top, my_width, my_height, my_x, my_y)
# Start task
self._done = False
self._closed = False
self._closing_lock = threading.Lock()
self._worker = threading.Thread(target=_porting_task, args=(task_param, self.on_task_exit))
self._worker.start()
def close(self):
try:
self._closing_lock.acquire()
if not self._closed:
self._progress_bar.stop()
self._top.destroy()
self._closed = True
finally:
self._closing_lock.release()
def on_task_exit(self):
self.close()
self._done = True
def on_user_close(self):
to_close = tk.messagebox.askyesno(
__title__,
_('Close this window will NOT stop the porting task.\nDo you still want to close it?'))
if to_close == tk.YES:
self.close()
@property
def done(self):
return self._done
class ProfilesSelector(tk.Frame):
"""Panel for select Firefox profile."""
def __init__(self, *args, **kwargs):
super(ProfilesSelector, self).__init__(*args, **kwargs)
# Setup attributes.
self._profiles_loaded = False
self._profiles_namelist = []
self._profiles = []
# Setup GUI.
_create_section_title_label(self, text=_('Profiles')).pack(fill=tk.BOTH)
self._profiles_combobox = tk.ttk.Combobox(self)
self._profiles_combobox.config(state='readonly')
self._profiles_combobox.pack(fill=tk.BOTH)
_pad_widget_children_pack(self)
def load_profiles(self, force_reload=False):
if force_reload:
self._profiles_namelist.clear()
self._profiles.clear()
self._profiles_loaded = False
if not self._profiles_loaded:
self._profiles = firefox_helper.get_firefox_profiles()
if self._profiles:
self._profiles_namelist = [profile.name for profile in self._profiles]
try:
default_index = next(index for index, profile in enumerate(self._profiles) if profile.is_default)
self._profiles_namelist[default_index] += ' ({})'.format(_('default'))
except StopIteration:
default_index = -1
else:
default_index = -1
self._profiles.insert(0, None)
self._profiles_namelist.insert(0, _('<Manual Settings>'))
self._profiles_combobox['values'] = self._profiles_namelist
self._profiles_combobox.current(1 + default_index)
self._profiles_loaded = True
@property
def selected_profile(self):
selection = self._profiles_combobox.current()
return self._profiles[selection]
class PathField(tk.Frame):
def __init__(self, *args, **kwargs):
super(PathField, self).__init__(*args, **kwargs)
self._label = tk.ttk.Label(self, text='Path:')
self._label.grid(sticky=tk.W)
self._entry = tk.ttk.Entry(self)
self._entry.grid(row=1, columnspan=2, sticky=tk.W+tk.E)
self._open_in_folder_btn = tk.ttk.Button(
self,
text=_('Open in Explorer'),
command=_make_open_in_explorer(lambda: self._entry.get()))
self._open_in_folder_btn.grid(row=0, column=1, sticky=tk.E)
self.columnconfigure(0, weight=3)
self.columnconfigure(1, weight=1)
_pad_widget_children_grid(self)
@property
def label(self):
return self._label.cget('text')
@label.setter
def label(self, text):
self._label.config(text=text)
@property
def path(self):
return self._entry.get().strip().strip('"')
@path.setter
def path(self, text):
self._entry.delete(0, tk.END)
self._entry.insert(0, text)
class PathPanel(tk.Frame):
"""Panel for input the files' path."""
def __init__(self, *args, **kwargs):
super(PathPanel, self).__init__(*args, **kwargs)
title_label = _create_section_title_label(self, text=_('Pathes'))
title_label.pack(fill=tk.BOTH)
self._nicofox_field = PathField(self)
self._nicofox_field.label = _('NicoFox Database:')
self._nicofox_field.pack(fill=tk.BOTH)
self._bookmark_field = PathField(self)
self._bookmark_field.label = _('Bookmarks Backup:')
self._bookmark_field.pack(fill=tk.BOTH)
self._output_field = PathField(self)
self._output_field.label = _('Output Bookmarks:')
self._output_field.pack(fill=tk.BOTH)
_pad_widget_children_pack(self, padx=0)
title_label.pack_configure(padx=_PADX)
@property
def nicofox_path(self):
return self._nicofox_field.path
@nicofox_path.setter
def nicofox_path(self, path):
self._nicofox_field.path = path
@property
def bookmark_path(self):
return self._bookmark_field.path
@bookmark_path.setter
def bookmark_path(self, path):
self._bookmark_field.path = path
@property
def output_path(self):
return self._output_field.path
@output_path.setter
def output_path(self, path):
self._output_field.path = path
class MetaPanel(tk.Frame):
"""Panel for input metadata like container name, common tags, etc."""
def __init__(self, *args, **kwargs):
super(MetaPanel, self).__init__(*args, **kwargs)
row_counter = itertools.count()
_create_section_title_label(self, text=_('Metadata')).grid(
row=next(row_counter), columnspan=2, sticky=tk.W+tk.E)
self._container_entry = self._create_field(_('Container:'), next(row_counter))
self._container_desc_entry = self._create_field(_('Container Description:'), next(row_counter))
self._common_tags_entry = self._create_field(_('Common Tags:'), next(row_counter))
self.columnconfigure(1, weight=1)
_pad_widget_children_grid(self)
def _create_field(self, label, row):
tk.ttk.Label(self, text=label).grid(row=row, column=0, sticky=tk.E)
entry = tk.ttk.Entry(self)
entry.grid(row=row, column=1, sticky=tk.W+tk.E)
return entry
@property
def container(self):
return self._container_entry.get().strip()
@container.setter
def container(self, text):
self._container_entry.delete(0, tk.END)
self._container_entry.insert(0, text)
@property
def container_description(self):
return self._container_desc_entry.get().strip()
@container_description.setter
def container_description(self, text):
self._container_desc_entry.delete(0, tk.END)
self._container_desc_entry.insert(0, text)
@property
def common_tags(self):
tags_text = self._common_tags_entry.get().strip()
return [tag.strip() for tag in tags_text.split(',') if tag.strip()]
@common_tags.setter
def common_tags(self, tags):
tags_text = ', '.join(tag.strip() for tag in tags if tag.strip())
self._common_tags_entry.delete(0, tk.END)
self._common_tags_entry.insert(0, tags_text)
class Processor:
"""Collect data from UI and launch porting tasks."""
def __init__(self):
self._profile_getter = None
self._path_source = None
self._meta_source = None
self._tasks = []
self._on_all_tasks_complete = None
@property
def profile_getter(self):
return self._profile_getter
@profile_getter.setter
def profile_getter(self, getter):
self._profile_getter = getter
@property
def path_source(self):
return self._path_source
@path_source.setter
def path_source(self, source):
self._path_source = source
@property
def meta_source(self):
return self._meta_source
@meta_source.setter
def meta_source(self, source):
self._meta_source = source
@property
def has_running_task(self):
self._clear_finished_tasks()
return bool(self._tasks)
@staticmethod
def _lookup_nicofox_path(profile):
"""Find the path to the NicoFox database and return it.
First, find the NicoFox database in current working directory.
If doesn't exist, then find it in profile directory if there has one.
Finally, if nowhere can find it, return None.
"""
NICOFOX_DATABASE_NAME = 'smilefox.sqlite'
# Find in current working directory.
nicofox_path = pathlib.Path(NICOFOX_DATABASE_NAME)
if nicofox_path.is_file():
return nicofox_path.absolute()
# Find in profile directory.
if profile is not None:
nicofox_path = pathlib.Path(profile.path, NICOFOX_DATABASE_NAME)
if nicofox_path.is_file():
return nicofox_path.absolute()
# Nowhere can find it.
return None
@staticmethod
def _lookup_bookmark_path(profile):
"""Find the path to the Firefox bookmarks backup and return it.
First, find the Firefox bookmarks backup with today's date in current working directory.
If doesn't exist and there has a profile, try to find the last automatic bookmarks backup.
Finally, if nowhere can find it, return None.
Note: it is highly recommended to use the manually backup.
"""
# Find in current working directory.
bookmarks_filename_today = firefox_helper.get_bookmarks_backup_filename()
bookmark_path = pathlib.Path(bookmarks_filename_today)
if bookmark_path.is_file():
return bookmark_path.absolute()
# Find the lastest one in profile directory.
if profile is not None:
bookmark_path = firefox_helper.get_last_firefox_bookmarks_backup_path(profile)
if bookmark_path is not None:
return bookmark_path.absolute()
# Nowhere can find it.
return None
@staticmethod
def _make_output_path():
"""Make the output filename and return it.
The output filename will be bookmarks filename today suffix with "-with-nicofox".
e.g. bookmarks-yyyy-mm-dd-with-nicofox.json
This function also prevents the name which conflicts with existing files.
It will try append "-number" to the end of file stem in order
until the filename doesn't exist.
e.g. bookmarks-yyyy-mm-dd-with-nicofox-2.json
"""
bookmarks_filename_today = firefox_helper.get_bookmarks_backup_filename()
output_path = pathlib.Path(bookmarks_filename_today)
stem = output_path.stem + '-with-nicofox'
ext = output_path.suffix
output_path = pathlib.Path(stem + ext)
if output_path.exists():
for suffix_num in itertools.count(2):
output_path = pathlib.Path(stem + '-' + str(suffix_num) + ext)
if not output_path.exists():
break
return output_path.absolute()
def _clear_finished_tasks(self):
self._tasks = [task for task in self._tasks if not task.done]
def close_all_dialogs(self):
"""Close all task dialogs. (but does NOT stop the tasks.)"""
for task in self._tasks:
task.close()
def start_port(self, root):
"""Collect information form UI and start porting task."""
assert self._path_source is not None
assert self._meta_source is not None
# Get current referred profile.
profile = self._profile_getter() if self._profile_getter is not None else None
# Collect path arguments and correct them.
nicofox_path = self._path_source.nicofox_path
bookmark_path = self._path_source.bookmark_path
output_path = self._path_source.output_path
if not nicofox_path:
nicofox_path = Processor._lookup_nicofox_path(profile)
if nicofox_path is None:
tk.messagebox.showwarning(__title__, _('NicoFox database path is required.'))
return
if not bookmark_path:
bookmark_path = Processor._lookup_bookmark_path(profile)
if bookmark_path is None:
tk.messagebox.showwarning(__title__, _('Bookmarks backup path is required.'))
return
if not output_path:
output_path = Processor._make_output_path()
self._path_source.nicofox_path = nicofox_path
self._path_source.bookmark_path = bookmark_path
self._path_source.output_path = output_path
# Collect metadata arguments and correct them.
metadata = nicofox2bookmarks.create_metadata()
metadata['container'] = self._meta_source.container or _('NicoFox')
metadata['description'] = self._meta_source.container_description\
or _('Bookmarks imported from NicoFox database using {}.').format(__title__)
metadata['common_tags'] = self._meta_source.common_tags
# Feedback the correct metadata to UI.
self._meta_source.container = metadata['container']
self._meta_source.container_description = metadata['description']
self._meta_source.common_tags = metadata['common_tags']
# Setup task parameters and start task.
task_param = {
'nicofox_path': nicofox_path,
'bookmark_path': bookmark_path,
'output_path': output_path,
'metadata': metadata,
}
if len(self._tasks) >= 8:
self._clear_finished_tasks()
self._tasks.append(TaskDialog(root, task_param))
def _on_root_close(root, processor):
if processor.has_running_task:
to_close = tk.messagebox.askyesno(
__title__,
_('There are still running task(s). Close this window will NOT stop them.\n'
'Do you want to close it?'))
if to_close == tk.NO:
return
processor.close_all_dialogs()
root.destroy()
def main():
"""Main function."""
# Load configs and setup i18n.
config = _load_configs()
_setup_i18n(config)
# Setup root window.
root = tk.Tk()
root.title(__title__ + ' ver.' + __version__)
root.resizable(width=True, height=False)
# Setup profiles selector.
profiles_selector = ProfilesSelector(root)
profiles_selector.load_profiles()
profiles_selector.pack(fill=tk.BOTH)
# Setup processor.
processor = Processor()
processor.profile_getter = lambda: profiles_selector.selected_profile
# Setup path panel.
path_panel = PathPanel(root)
path_panel.pack(fill=tk.BOTH)
processor.path_source = path_panel
# Setup meta panel.
meta_panel = MetaPanel(root)
meta_panel.pack(fill=tk.BOTH)
processor.meta_source = meta_panel
# Setup OK button.
ok_button = tk.ttk.Button(root, text=_('Start Port'), command=lambda: processor.start_port(root))
ok_button.pack(fill=tk.BOTH)
# Optimize the root window size.
root.update_idletasks()
width, height, x, y = _get_widget_geometry(root)
if width < _STARTUP_MIN_WIDTH:
width = _STARTUP_MIN_WIDTH
_set_widget_geometry(root, width, height, x, y)
# Start GUI.
root.protocol('WM_DELETE_WINDOW', lambda: _on_root_close(root, processor))
root.mainloop()
if __name__ == '__main__':
main() | src/nicofox2bookmarks_gui.py | import configparser
import gettext
import itertools
import pathlib
import subprocess
import threading
import tkinter as tk
import tkinter.messagebox
import tkinter.ttk
import firefox_helper
import nicofox2bookmarks
__title__ = 'NicoFox to Firefox Bookmarks'
__version__ = '0.1.0'
# Private constants.
_CONFIG_FILENAME = 'configs.ini'
_LOCALE_DIRNAME = 'locale'
_TRANSLATION_DOMAIN = 'nicofox2bookmarks_gui'
_PADX = 4 # Default X padding between widgets.
_PADY = 2 # Default Y padding between widgets.
_STARTUP_MIN_WIDTH = 480
def _load_configs(filename=_CONFIG_FILENAME):
configs = configparser.ConfigParser()
configs.read(filename)
return configs
def _setup_i18n(configs):
prefered_languages = configs.get('General', 'PreferredLanguages', fallback='')
if prefered_languages:
prefered_languages = [lang.strip() for lang in prefered_languages.split(',') if lang.strip()]
else:
prefered_languages = ['zh_TW']
locale_dir = pathlib.Path(_LOCALE_DIRNAME).absolute()
translation = gettext.translation(
_TRANSLATION_DOMAIN,
localedir=locale_dir,
languages=prefered_languages,
fallback=True)
translation.install()
def _open_in_explorer(path):
subprocess.Popen([r'explorer.exe', r'/select,', path])
def _make_open_in_explorer(get_path):
def _do_open_in_explorer():
path = get_path().strip().strip('"')
if path:
if pathlib.Path(path).exists():
_open_in_explorer(path)
else:
tk.messagebox.showinfo(__title__, _('Target path does not exist.'))
return _do_open_in_explorer
def _get_widget_geometry(widget):
geometry = widget.geometry()
size, x, y = geometry.split('+')
width, height = size.split('x')
return int(width), int(height), int(x), int(y)
def _set_widget_geometry(widget, width, height, x, y):
new_geometry = '{}x{}+{}+{}'.format(width, height, x, y)
widget.geometry(newGeometry=new_geometry)
def _create_section_title_label(parent, text):
label = tk.ttk.Label(parent, text=text, anchor=tk.CENTER)
label.config(background='black')
label.config(foreground='white')
return label
def _pad_widget_children_grid(widget, padx=_PADX, pady=_PADY):
for child in widget.winfo_children():
child.grid_configure(padx=padx, pady=pady)
def _pad_widget_children_pack(widget, padx=_PADX, pady=_PADY):
for child in widget.winfo_children():
child.pack_configure(padx=padx, pady=pady)
def _porting_task(param, on_exit):
try:
nicofox_path = param['nicofox_path']
bookmark_path = param['bookmark_path']
output_path = param['output_path']
metadata = param['metadata']
bookmarks = nicofox2bookmarks.import_nicofox_db(str(nicofox_path))
if bookmarks:
nicofox2bookmarks.export_bookmarks_to_json(str(output_path), str(bookmark_path), bookmarks, metadata)
tk.messagebox.showinfo(__title__, _('Successful! {} bookmark(s) are ported.').format(len(bookmarks)))
else:
tk.messagebox.showinfo(__title__, _('No data to port.'))
except Exception as ex:
tk.messagebox.showerror(__title__, _('Exception occurred during porting data:\n{}').format(ex))
finally:
on_exit()
class TaskDialog:
"""Show the task status visually and start the worker thread."""
def __init__(self, parent, task_param):
# Setup GUI.
self._top = tk.Toplevel(parent)
self._top.resizable(width=True, height=False)
self._top.protocol('WM_DELETE_WINDOW', self.on_user_close)
self._label = tk.ttk.Label(self._top, text=_('Porting data, please wait.'), anchor=tk.CENTER)
self._label.pack(fill=tk.BOTH)
self._progress_bar = tk.ttk.Progressbar(self._top, orient=tk.HORIZONTAL, mode='indeterminate')
self._progress_bar.start()
self._progress_bar.pack(fill=tk.BOTH)
_pad_widget_children_pack(self._top)
# Move this window to the center of parent.
parent_width, parent_height, parent_x, parent_y = _get_widget_geometry(parent)
self._top.update_idletasks()
my_width, my_height, my_x, my_y = _get_widget_geometry(self._top)
my_x = int(parent_x + (parent_width - my_width) / 2)
my_y = int(parent_y + (parent_height - my_height) / 2)
_set_widget_geometry(self._top, my_width, my_height, my_x, my_y)
# Start task
self._done = False
self._closed = False
self._closing_lock = threading.Lock()
self._worker = threading.Thread(target=_porting_task, args=(task_param, self.on_task_exit))
self._worker.start()
def close(self):
try:
self._closing_lock.acquire()
if not self._closed:
self._progress_bar.stop()
self._top.destroy()
self._closed = True
finally:
self._closing_lock.release()
def on_task_exit(self):
self.close()
self._done = True
def on_user_close(self):
to_close = tk.messagebox.askyesno(
__title__,
_('Close this window will NOT stop the porting task.\nDo you still want to close it?'))
if to_close == tk.YES:
self.close()
@property
def done(self):
return self._done
class ProfilesSelector(tk.Frame):
"""Panel for select Firefox profile."""
def __init__(self, *args, **kwargs):
super(ProfilesSelector, self).__init__(*args, **kwargs)
# Setup attributes.
self._profiles_loaded = False
self._profiles_namelist = []
self._profiles = []
# Setup GUI.
_create_section_title_label(self, text=_('Profiles')).pack(fill=tk.BOTH)
self._profiles_combobox = tk.ttk.Combobox(self)
self._profiles_combobox.config(state='readonly')
self._profiles_combobox.pack(fill=tk.BOTH)
_pad_widget_children_pack(self)
def load_profiles(self, force_reload=False):
if force_reload:
self._profiles_namelist.clear()
self._profiles.clear()
self._profiles_loaded = False
if not self._profiles_loaded:
self._profiles = firefox_helper.get_firefox_profiles()
if self._profiles:
self._profiles_namelist = [profile.name for profile in self._profiles]
try:
default_index = next(index for index, profile in enumerate(self._profiles) if profile.is_default)
self._profiles_namelist[default_index] += ' ({})'.format(_('default'))
except StopIteration:
default_index = -1
else:
default_index = -1
self._profiles.insert(0, None)
self._profiles_namelist.insert(0, _('<Manual Settings>'))
self._profiles_combobox['values'] = self._profiles_namelist
self._profiles_combobox.current(1 + default_index)
self._profiles_loaded = True
@property
def selected_profile(self):
selection = self._profiles_combobox.current()
return self._profiles[selection]
class PathField(tk.Frame):
def __init__(self, *args, **kwargs):
super(PathField, self).__init__(*args, **kwargs)
self._label = tk.ttk.Label(self, text='Path:')
self._label.grid(sticky=tk.W)
self._entry = tk.ttk.Entry(self)
self._entry.grid(row=1, columnspan=2, sticky=tk.W+tk.E)
self._open_in_folder_btn = tk.ttk.Button(
self,
text=_('Open in Explorer'),
command=_make_open_in_explorer(lambda: self._entry.get()))
self._open_in_folder_btn.grid(row=0, column=1, sticky=tk.E)
self.columnconfigure(0, weight=3)
self.columnconfigure(1, weight=1)
_pad_widget_children_grid(self)
@property
def label(self):
return self._label.cget('text')
@label.setter
def label(self, text):
self._label.config(text=text)
@property
def path(self):
return self._entry.get().strip().strip('"')
@path.setter
def path(self, text):
self._entry.delete(0, tk.END)
self._entry.insert(0, text)
class PathPanel(tk.Frame):
"""Panel for input the files' path."""
def __init__(self, *args, **kwargs):
super(PathPanel, self).__init__(*args, **kwargs)
title_label = _create_section_title_label(self, text=_('Pathes'))
title_label.pack(fill=tk.BOTH)
self._nicofox_field = PathField(self)
self._nicofox_field.label = _('NicoFox Database:')
self._nicofox_field.pack(fill=tk.BOTH)
self._bookmark_field = PathField(self)
self._bookmark_field.label = _('Bookmarks Backup:')
self._bookmark_field.pack(fill=tk.BOTH)
self._output_field = PathField(self)
self._output_field.label = _('Output Bookmarks:')
self._output_field.pack(fill=tk.BOTH)
_pad_widget_children_pack(self, padx=0)
title_label.pack_configure(padx=_PADX)
@property
def nicofox_path(self):
return self._nicofox_field.path
@nicofox_path.setter
def nicofox_path(self, path):
self._nicofox_field.path = path
@property
def bookmark_path(self):
return self._bookmark_field.path
@bookmark_path.setter
def bookmark_path(self, path):
self._bookmark_field.path = path
@property
def output_path(self):
return self._output_field.path
@output_path.setter
def output_path(self, path):
self._output_field.path = path
class MetaPanel(tk.Frame):
"""Panel for input metadata like container name, common tags, etc."""
def __init__(self, *args, **kwargs):
super(MetaPanel, self).__init__(*args, **kwargs)
row_counter = itertools.count()
_create_section_title_label(self, text=_('Metadata')).grid(
row=next(row_counter), columnspan=2, sticky=tk.W+tk.E)
self._container_entry = self._create_field(_('Container:'), next(row_counter))
self._container_desc_entry = self._create_field(_('Container Description:'), next(row_counter))
self._common_tags_entry = self._create_field(_('Common Tags:'), next(row_counter))
self.columnconfigure(1, weight=1)
_pad_widget_children_grid(self)
def _create_field(self, label, row):
tk.ttk.Label(self, text=label).grid(row=row, column=0, sticky=tk.E)
entry = tk.ttk.Entry(self)
entry.grid(row=row, column=1, sticky=tk.W+tk.E)
return entry
@property
def container(self):
return self._container_entry.get().strip()
@container.setter
def container(self, text):
self._container_entry.delete(0, tk.END)
self._container_entry.insert(0, text)
@property
def container_description(self):
return self._container_desc_entry.get().strip()
@container_description.setter
def container_description(self, text):
self._container_desc_entry.delete(0, tk.END)
self._container_desc_entry.insert(0, text)
@property
def common_tags(self):
tags_text = self._common_tags_entry.get().strip()
return [tag.strip() for tag in tags_text.split(',') if tag.strip()]
@common_tags.setter
def common_tags(self, tags):
tags_text = ', '.join(tag.strip() for tag in tags if tag.strip())
self._common_tags_entry.delete(0, tk.END)
self._common_tags_entry.insert(0, tags_text)
class Processor:
"""Collect data from UI and launch porting tasks."""
def __init__(self):
self._profile_getter = None
self._path_source = None
self._meta_source = None
self._tasks = []
self._on_all_tasks_complete = None
@property
def profile_getter(self):
return self._profile_getter
@profile_getter.setter
def profile_getter(self, getter):
self._profile_getter = getter
@property
def path_source(self):
return self._path_source
@path_source.setter
def path_source(self, source):
self._path_source = source
@property
def meta_source(self):
return self._meta_source
@meta_source.setter
def meta_source(self, source):
self._meta_source = source
@property
def has_running_task(self):
self._clear_finished_tasks()
return bool(self._tasks)
@staticmethod
def _lookup_nicofox_path(profile):
"""Find the path to the NicoFox database and return it.
First, find the NicoFox database in current working directory.
If doesn't exist, then find it in profile directory if there has one.
Finally, if nowhere can find it, return None.
"""
NICOFOX_DATABASE_NAME = 'smilefox.sqlite'
# Find in current working directory.
nicofox_path = pathlib.Path(NICOFOX_DATABASE_NAME)
if nicofox_path.is_file():
return nicofox_path.absolute()
# Find in profile directory.
if profile is not None:
nicofox_path = pathlib.Path(profile.path, NICOFOX_DATABASE_NAME)
if nicofox_path.is_file():
return nicofox_path.absolute()
# Nowhere can find it.
return None
@staticmethod
def _lookup_bookmark_path(profile):
"""Find the path to the Firefox bookmarks backup and return it.
First, find the Firefox bookmarks backup with today's date in current working directory.
If doesn't exist and there has a profile, try to find the last automatic bookmarks backup.
Finally, if nowhere can find it, return None.
Note: it is highly recommended to use the manually backup.
"""
# Find in current working directory.
bookmarks_filename_today = firefox_helper.get_bookmarks_backup_filename()
bookmark_path = pathlib.Path(bookmarks_filename_today)
if bookmark_path.is_file():
return bookmark_path.absolute()
# Find the lastest one in profile directory.
if profile is not None:
bookmark_path = firefox_helper.get_last_firefox_bookmarks_backup_path(profile)
if bookmark_path is not None:
return bookmark_path.absolute()
# Nowhere can find it.
return None
@staticmethod
def _make_output_path():
"""Make the output filename and return it.
The output filename will be bookmarks filename today suffix with "-with-nicofox".
e.g. bookmarks-yyyy-mm-dd-with-nicofox.json
This function also prevents the name which conflicts with existing files.
It will try append "-number" to the end of file stem in order
until the filename doesn't exist.
e.g. bookmarks-yyyy-mm-dd-with-nicofox-2.json
"""
bookmarks_filename_today = firefox_helper.get_bookmarks_backup_filename()
output_path = pathlib.Path(bookmarks_filename_today)
stem = output_path.stem + '-with-nicofox'
ext = output_path.suffix
output_path = pathlib.Path(stem + ext)
if output_path.exists():
for suffix_num in itertools.count(2):
output_path = pathlib.Path(stem + '-' + str(suffix_num) + ext)
if not output_path.exists():
break
return output_path.absolute()
def _clear_finished_tasks(self):
self._tasks = [task for task in self._tasks if not task.done]
def close_all_dialogs(self):
"""Close all task dialogs. (but does NOT stop the tasks.)"""
for task in self._tasks:
task.close()
def start_port(self, root):
"""Collect information form UI and start porting task."""
assert self._path_source is not None
assert self._meta_source is not None
# Get current referred profile.
profile = self._profile_getter() if self._profile_getter is not None else None
# Collect path arguments and correct them.
nicofox_path = self._path_source.nicofox_path
bookmark_path = self._path_source.bookmark_path
output_path = self._path_source.output_path
if not nicofox_path:
nicofox_path = Processor._lookup_nicofox_path(profile)
if nicofox_path is None:
tk.messagebox.showwarning(__title__, _('NicoFox database path is required.'))
return
if not bookmark_path:
bookmark_path = Processor._lookup_bookmark_path(profile)
if bookmark_path is None:
tk.messagebox.showwarning(__title__, _('Bookmarks backup path is required.'))
return
if not output_path:
output_path = Processor._make_output_path()
self._path_source.nicofox_path = nicofox_path
self._path_source.bookmark_path = bookmark_path
self._path_source.output_path = output_path
# Collect metadata arguments and correct them.
metadata = nicofox2bookmarks.create_metadata()
metadata['container'] = self._meta_source.container or _('NicoFox')
metadata['description'] = self._meta_source.container_description\
or _('Bookmarks imported from NicoFox database using {}.').format(__title__)
metadata['common_tags'] = self._meta_source.common_tags
# Feedback the correct metadata to UI.
self._meta_source.container = metadata['container']
self._meta_source.container_description = metadata['description']
self._meta_source.common_tags = metadata['common_tags']
# Setup task parameters and start task.
task_param = {
'nicofox_path': nicofox_path,
'bookmark_path': bookmark_path,
'output_path': output_path,
'metadata': metadata,
}
if len(self._tasks) >= 8:
self._clear_finished_tasks()
self._tasks.append(TaskDialog(root, task_param))
def _on_root_close(root, processor):
if processor.has_running_task:
to_close = tk.messagebox.askyesno(
__title__,
_('There are still running task(s). Close this window will NOT stop them.\n'
'Do you want to close it?'))
if to_close == tk.NO:
return
processor.close_all_dialogs()
root.destroy()
def main():
"""Main function."""
# Load configs and setup i18n.
config = _load_configs()
_setup_i18n(config)
# Setup root window.
root = tk.Tk()
root.title(__title__ + ' ver.' + __version__)
root.resizable(width=True, height=False)
# Setup profiles selector.
profiles_selector = ProfilesSelector(root)
profiles_selector.load_profiles()
profiles_selector.pack(fill=tk.BOTH)
# Setup processor.
processor = Processor()
processor.profile_getter = lambda: profiles_selector.selected_profile
# Setup path panel.
path_panel = PathPanel(root)
path_panel.pack(fill=tk.BOTH)
processor.path_source = path_panel
# Setup meta panel.
meta_panel = MetaPanel(root)
meta_panel.pack(fill=tk.BOTH)
processor.meta_source = meta_panel
# Setup OK button.
ok_button = tk.ttk.Button(root, text=_('Start Port'), command=lambda: processor.start_port(root))
ok_button.pack(fill=tk.BOTH)
# Optimize the root window size.
root.update_idletasks()
width, height, x, y = _get_widget_geometry(root)
if width < _STARTUP_MIN_WIDTH:
width = _STARTUP_MIN_WIDTH
_set_widget_geometry(root, width, height, x, y)
# Start GUI.
root.protocol('WM_DELETE_WINDOW', lambda: _on_root_close(root, processor))
root.mainloop()
if __name__ == '__main__':
main() | 0.368747 | 0.061593 |
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
import gc
from util import log, checkAndCreateDir, generateLogger, plotClusterPairGrid
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
from sklearn.decomposition import TruncatedSVD
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
from computeFeatures import computeFeatures
from Interpreter import Interpreter
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.manifold import SpectralEmbedding
from copy import deepcopy
from crossValidation import crossValidation
from scipy.stats import pearsonr
from scipy.stats import pointbiserialr
from datetime import datetime
def analyzeData(
in_p=None, # input path for raw esdr and smell data
out_p_root=None, # root directory for outputing files
start_dt=None, # starting date for the data
end_dt=None, # ending data for the data
logger=None):
"""
Analyzing Smell PGH data
Revealing the patterns of air pollution
"""
log("Analyze data...", logger)
out_p = out_p_root + "analysis/"
checkAndCreateDir(out_p)
# Plot features
plotFeatures(in_p, out_p_root, logger)
# Plot aggregated smell data
plotAggrSmell(in_p, out_p, logger)
# Plot dimension reduction
plotLowDimensions(in_p, out_p, logger)
# Correlational study
corrStudy(in_p, out_p, logger)
corrStudy(in_p, out_p, logger, is_regr=True)
# Interpret model
num_run = 1 # how many times to run the simulation
interpretModel(in_p, out_p, end_dt, start_dt, num_run, logger)
print("END")
def interpretModel(in_p, out_p, end_dt, start_dt, num_run, logger):
# Load time series data
df_esdr = pd.read_csv(in_p[0], parse_dates=True, index_col="DateTime")
df_smell = pd.read_csv(in_p[1], parse_dates=True, index_col="DateTime")
# Select variables based on prior knowledge
log("Select variables based on prior knowledge...")
want = {
#"3.feed_26.OZONE_PPM": "O3", # Lawrenceville ACHD
"3.feed_26.SONICWS_MPH": "Lawrenceville_wind_speed",
"3.feed_26.SONICWD_DEG": "Lawrenceville_wind_direction_@",
"3.feed_26.SIGTHETA_DEG": "Lawrenceville_wind_direction_std",
"3.feed_28.H2S_PPM": "Liberty_H2S", # Liberty ACHD
"3.feed_28.SIGTHETA_DEG": "Liberty_wind_direction_std",
"3.feed_28.SONICWD_DEG": "Liberty_wind_direction_@",
"3.feed_28.SONICWS_MPH": "Liberty_wind_speed",
#"3.feed_23.PM10_UG_M3": "FPpm", # Flag Plaza ACHD
"3.feed_11067.SIGTHETA_DEG..3.feed_43.SIGTHETA_DEG": "ParkwayEast_wind_direction_std", # Parkway East ACHD
"3.feed_11067.SONICWD_DEG..3.feed_43.SONICWD_DEG": "ParkwayEast_wind_direction_@",
"3.feed_11067.SONICWS_MPH..3.feed_43.SONICWS_MPH": "ParkwayEast_wind_speed"
} # key is the desired variables, value is the replaced name, @ is the flag for computing sine and cosine
want_vars = want.keys()
df_esdr_cp = df_esdr
df_esdr = pd.DataFrame()
for col in df_esdr_cp.columns:
if col in want_vars:
log("\t" + col)
df_esdr[want[col]] = df_esdr_cp[col]
# Interpret data
df_esdr = df_esdr.reset_index()
df_smell = df_smell.reset_index()
df_X, df_Y, df_C = computeFeatures(df_esdr=df_esdr, df_smell=df_smell, f_hr=8, b_hr=2, thr=40, is_regr=False,
add_inter=True, add_roll=False, add_diff=False, logger=logger)
for m in ["DT"]*num_run:
start_time_str = datetime.now().strftime("%Y-%d-%m-%H%M%S")
out_p_m = out_p + "experiment/" + start_time_str + "/"
lg = generateLogger(out_p_m + m + "-" + start_time_str + ".log", format=None)
model = Interpreter(df_X=df_X, df_Y=df_Y, out_p=out_p_m, logger=lg)
df_Y = model.getFilteredLabels()
df_X = model.getSelectedFeatures()
num_folds = int((end_dt - start_dt).days / 7) # one fold represents a week
crossValidation(df_X=df_X, df_Y=df_Y, df_C=df_C, out_p_root=out_p_m, method=m, is_regr=False, logger=lg,
num_folds=num_folds, skip_folds=48, train_size=8000)
def computeCrossCorrelation(x, y, max_lag=None):
n = len(x)
xo = x - x.mean()
yo = y - y.mean()
cv = np.correlate(xo, yo, "full") / n
cc = cv / (np.std(x) * np.std(y))
if max_lag > 0:
cc = cc[n-1-max_lag:n+max_lag]
return cc
def corrStudy(in_p, out_p, logger, is_regr=False):
log("Compute correlation of lagged X and current Y...", logger)
f_name = "corr_with_time_lag"
if is_regr: f_name += "_is_regr"
# Compute features
df_X, df_Y, _ = computeFeatures(in_p=in_p, f_hr=8, b_hr=0, thr=40, is_regr=is_regr,
add_inter=False, add_roll=False, add_diff=False, logger=logger)
# Compute daytime index
# For 8 hours prediction, 11am covers duration from 11am to 7pm
#h_start = 6
#h_end = 11
#idx = (df_X["HourOfDay"]>=h_start)&(df_X["HourOfDay"]<=h_end)
# Compute point biserial correlation or Pearson correlation
Y = df_Y.squeeze()
max_t_lag = 6 # the maximum time lag
df_corr_info = pd.DataFrame()
df_corr = pd.DataFrame()
for c in df_X.columns:
if c in ["Day", "DayOfWeek", "HourOfDay"]: continue
s_info = []
s = []
X = df_X[c]
for i in range(0, max_t_lag+1):
d = pd.concat([Y, X.shift(i)], axis=1)
d.columns = ["y", "x"]
#d = d[idx] # select only daytime
d = d.dropna()
if is_regr:
r, p = pearsonr(d["y"], d["x"])
else:
r, p = pointbiserialr(d["y"], d["x"])
s_info.append((np.round(r, 3), np.round(p, 5), len(d)))
s.append(np.round(r, 3))
df_corr_info[c] = pd.Series(data=s_info)
df_corr[c] = pd.Series(data=s)
df_corr_info.to_csv(out_p+f_name+".csv")
# Plot
df_corr = df_corr.round(2)
log(df_corr)
#plotCorrelation(df_corr, out_p+f_name+".png")
def plotCorrelation(df_corr, out_p):
# Plot graph
tick_font_size = 16
label_font_size = 20
title_font_size = 32
fig, ax1 = plt.subplots(1, 1, figsize=(28, 5))
divider = make_axes_locatable(ax1)
ax2 = divider.append_axes("right", size="2%", pad=0.4)
sns.heatmap(df_corr, ax=ax1, cbar_ax=ax2, cmap="RdBu", vmin=-0.6, vmax=0.6,
linewidths=0.1, annot=False, fmt="g", xticklabels=False, yticklabels="auto")
ax1.tick_params(labelsize=tick_font_size)
ax2.tick_params(labelsize=tick_font_size)
ax1.set_ylabel("Time lag (hours)", fontsize=label_font_size)
ax1.set_xlabel("Predictors (sensors from different monitoring stations)", fontsize=label_font_size)
plt.suptitle("Time-lagged point biserial correlation of predictors and response (smell events)", fontsize=title_font_size)
fig.tight_layout()
fig.subplots_adjust(top=0.88)
fig.savefig(out_p, dpi=150)
fig.clf()
plt.close()
def plotAggrSmell(in_p, out_p, logger):
df_X, df_Y, _ = computeFeatures(in_p=in_p, f_hr=None, b_hr=0, thr=40, is_regr=True,
add_inter=False, add_roll=False, add_diff=False, logger=logger)
# Plot the distribution of smell values by days of week and hours of day
plotDayHour(df_X, df_Y, out_p, logger)
def plotDayHour(df_X, df_Y, out_p, logger):
log("Plot the distribution of smell over day and hour...", logger)
df = pd.DataFrame()
df["HourOfDay"] = df_X["HourOfDay"]
df["DayOfWeek"] = df_X["DayOfWeek"]
df["smell"] = df_Y["smell"]
df = df.groupby(["HourOfDay", "DayOfWeek"]).mean()
df = df.round(2).reset_index()
df_hd = df["HourOfDay"].values
df_dw = df["DayOfWeek"].values
df_c = df["smell"].values
mat = np.zeros((7,24))
for hd, dw, c in zip(df_hd, df_dw, df_c):
mat[(dw, hd)] = c
y_l = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
x_l = ["0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "11", "12", "13", "14", "15",
"16", "17", "18", "19", "20", "21", "22", "23"]
df_day_hour = pd.DataFrame(data=mat, columns=x_l, index=y_l)
df_day_hour.to_csv(out_p + "smell_day_hour.csv")
fig, ax1 = plt.subplots(1, 1, figsize=(14, 6))
divider = make_axes_locatable(ax1)
ax2 = divider.append_axes("right", size="2%", pad=0.2)
sns.heatmap(df_day_hour, ax=ax1, cbar_ax=ax2, cmap="Blues", vmin=0, vmax=7, linewidths=0.1,
annot=False, fmt="g", xticklabels=x_l, yticklabels=y_l, cbar_kws={"ticks":[0,2,4,6]})
for item in ax1.get_yticklabels():
item.set_rotation(0)
for item in ax1.get_xticklabels():
item.set_rotation(0)
#ax1.set_ylabel("Day of week", fontsize=22)
ax1.set_xlabel("Hour of day", fontsize=22)
ax1.tick_params(axis="x", labelsize=22)
ax1.tick_params(axis="y", labelsize=22)
ax2.tick_params(axis="y", labelsize=22)
plt.suptitle("Average smell values over time", fontsize=30)
plt.tight_layout()
fig.subplots_adjust(top=0.89)
fig.savefig(out_p + "smell_day_hour.png", dpi=150)
fig.clf()
plt.close()
def plotFeatures(in_p, out_p_root, logger):
plot_time_hist_pair = True
plot_corr = True
# Create file out folders
out_p = [
out_p_root + "analysis/time/",
out_p_root + "analysis/hist/",
out_p_root + "analysis/pair/",
out_p_root + "analysis/"]
# Create folder for saving files
for f in out_p:
checkAndCreateDir(f)
# Compute features
df_X, df_Y, _ = computeFeatures(in_p=in_p, f_hr=8, b_hr=0, thr=40, is_regr=True,
add_inter=False, add_roll=False, add_diff=False, logger=logger)
df_Y = pd.to_numeric(df_Y.squeeze())
# Plot feature histograms, or time-series, or pairs of (feature, label)
if plot_time_hist_pair:
with Parallel(n_jobs=-1) as parallel:
# Plot time series
log("Plot time series...", logger)
h = "Time series "
parallel(delayed(plotTime)(df_X[v], h, out_p[0]) for v in df_X.columns)
plotTime(df_Y, h, out_p[0])
# Plot histograms
log("Plot histograms...", logger)
h = "Histogram "
parallel(delayed(plotHist)(df_X[v], h, out_p[1]) for v in df_X.columns)
plotHist(df_Y, h, out_p[1])
# Plot pairs of (feature, label)
log("Plot pairs...", logger)
h = "Pairs "
parallel(delayed(plotPair)(df_X[v], df_Y, h, out_p[2]) for v in df_X.columns)
# Plot correlation matrix
if plot_corr:
log("Plot correlation matrix of predictors...", logger)
plotCorrMatirx(df_X, out_p[3])
log("Finished plotting features", logger)
def plotTime(df_v, title_head, out_p):
v = df_v.name
fig = plt.figure(figsize=(40, 8), dpi=150)
df_v.plot(alpha=0.5, title=title_head)
fig.tight_layout()
fig.savefig(out_p + "time===" + v + ".png")
fig.clf()
plt.close()
gc.collect()
def plotHist(df_v, title_head, out_p, bins=30):
v = df_v.name
fig = plt.figure(figsize=(8, 8), dpi=150)
df_v.plot.hist(alpha=0.5, bins=bins, title=title_head)
plt.xlabel(v)
fig.tight_layout()
fig.savefig(out_p + v + ".png")
fig.clf()
plt.close()
gc.collect()
def plotPair(df_v1, df_v2, title_head, out_p):
v1, v2 = df_v1.name, df_v2.name
fig = plt.figure(figsize=(8, 8), dpi=150)
plt.scatter(df_v1, df_v2, s=10, alpha=0.4)
plt.title(title_head)
plt.xlabel(v1)
plt.ylabel(v2)
fig.tight_layout()
fig.savefig(out_p + v1 + "===" + v2 + ".png")
fig.clf()
plt.close()
gc.collect()
def plotCorrMatirx(df, out_p):
"""
Plot correlation matrix of (x_i, x_j) for each vector x_i and vector x_j in matrix X
"""
# Compute correlation matrix
df_corr = df.corr().round(3)
df_corr.to_csv(out_p + "corr_matrix.csv")
# Plot graph
fig, ax = plt.subplots(figsize=(10, 8))
im = ax.imshow(df_corr, cmap=plt.get_cmap("RdBu"), interpolation="nearest",vmin=-1, vmax=1)
fig.colorbar(im)
fig.tight_layout()
plt.suptitle("Correlation matrix", fontsize=18)
fig.subplots_adjust(top=0.92)
fig.savefig(out_p + "corr_matrix.png", dpi=150)
fig.clf()
plt.close()
def plotLowDimensions(in_p, out_p, logger):
df_X, df_Y, _ = computeFeatures(in_p=in_p, f_hr=8, b_hr=3, thr=40, is_regr=False,
add_inter=False, add_roll=False, add_diff=False, logger=logger)
X = df_X.values
Y = df_Y.squeeze().values
log("Number of positive samples: " + str(len(Y[Y==1])) + " (" + str(float(len(Y[Y==1]))/len(Y)) + ")")
log("Number of negative samples: " + str(len(Y[Y==0])) + " (" + str(float(len(Y[Y==0]))/len(Y)) + ")")
_, df_Y_regr, _ = computeFeatures(in_p=in_p, f_hr=8, b_hr=3, thr=40, is_regr=True,
add_inter=False, add_roll=False, add_diff=False, logger=logger)
Y_regr = df_Y_regr.squeeze().values
log("Plot PCA...", logger)
plotPCA(X, Y, Y_regr, out_p)
log("Plot Kernel PCA...", logger)
plotKernelPCA(X, Y, Y_regr, out_p)
log("Finished plotting dimensions", logger)
def plotSpectralEmbedding(X, Y, out_p, is_regr=False):
X, Y = deepcopy(X), deepcopy(Y)
pca = PCA(n_components=10)
X = pca.fit_transform(X)
sm = SpectralEmbedding(n_components=3, eigen_solver="arpack", n_neighbors=10, n_jobs=-1)
X = sm.fit_transform(X)
title = "Spectral Embedding"
out_p += "spectral_embedding.png"
plotClusterPairGrid(X, Y, out_p, 3, 1, title, is_regr)
def plotRandomTreesEmbedding(X, Y, out_p, is_regr=False):
X, Y = deepcopy(X), deepcopy(Y)
hasher = RandomTreesEmbedding(n_estimators=1000, max_depth=5, min_samples_split=2, n_jobs=-1)
X = hasher.fit_transform(X)
pca = TruncatedSVD(n_components=3)
X = pca.fit_transform(X)
title = "Random Trees Embedding"
out_p += "random_trees_embedding.png"
plotClusterPairGrid(X, Y, out_p, 3, 1, title, is_regr)
def plotKernelPCA(X, Y, Y_regr, out_p):
"""
Y is the binned dataset
Y_regr is the original dataset
"""
X, Y, Y_regr = deepcopy(X), deepcopy(Y), deepcopy(Y_regr)
pca = KernelPCA(n_components=3, kernel="rbf", n_jobs=-1)
X = pca.fit_transform(X)
r = pca.lambdas_
r = np.round(r/sum(r), 3)
title = "Kernel PCA, eigenvalue = " + str(r)
plotClusterPairGrid(X, Y, out_p+"kernel_pca.png", 3, 1, title, False)
plotClusterPairGrid(X, Y_regr, out_p+"kernel_pca_regr.png", 3, 1, title, True)
def plotPCA(X, Y, Y_regr, out_p):
"""
Y is the binned dataset
Y_rege is the original dataset
"""
X, Y, Y_regr = deepcopy(X), deepcopy(Y), deepcopy(Y_regr)
pca = PCA(n_components=3)
X = pca.fit_transform(X)
r = np.round(pca.explained_variance_ratio_, 3)
title = "PCA, eigenvalue = " + str(r)
plotClusterPairGrid(X, Y, out_p+"pca.png", 3, 1, title, False)
plotClusterPairGrid(X, Y_regr, out_p+"pca_regr.png", 3, 1, title, True) | py/prediction/analyzeData.py | import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
import gc
from util import log, checkAndCreateDir, generateLogger, plotClusterPairGrid
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
from sklearn.decomposition import TruncatedSVD
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
from computeFeatures import computeFeatures
from Interpreter import Interpreter
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.manifold import SpectralEmbedding
from copy import deepcopy
from crossValidation import crossValidation
from scipy.stats import pearsonr
from scipy.stats import pointbiserialr
from datetime import datetime
def analyzeData(
in_p=None, # input path for raw esdr and smell data
out_p_root=None, # root directory for outputing files
start_dt=None, # starting date for the data
end_dt=None, # ending data for the data
logger=None):
"""
Analyzing Smell PGH data
Revealing the patterns of air pollution
"""
log("Analyze data...", logger)
out_p = out_p_root + "analysis/"
checkAndCreateDir(out_p)
# Plot features
plotFeatures(in_p, out_p_root, logger)
# Plot aggregated smell data
plotAggrSmell(in_p, out_p, logger)
# Plot dimension reduction
plotLowDimensions(in_p, out_p, logger)
# Correlational study
corrStudy(in_p, out_p, logger)
corrStudy(in_p, out_p, logger, is_regr=True)
# Interpret model
num_run = 1 # how many times to run the simulation
interpretModel(in_p, out_p, end_dt, start_dt, num_run, logger)
print("END")
def interpretModel(in_p, out_p, end_dt, start_dt, num_run, logger):
# Load time series data
df_esdr = pd.read_csv(in_p[0], parse_dates=True, index_col="DateTime")
df_smell = pd.read_csv(in_p[1], parse_dates=True, index_col="DateTime")
# Select variables based on prior knowledge
log("Select variables based on prior knowledge...")
want = {
#"3.feed_26.OZONE_PPM": "O3", # Lawrenceville ACHD
"3.feed_26.SONICWS_MPH": "Lawrenceville_wind_speed",
"3.feed_26.SONICWD_DEG": "Lawrenceville_wind_direction_@",
"3.feed_26.SIGTHETA_DEG": "Lawrenceville_wind_direction_std",
"3.feed_28.H2S_PPM": "Liberty_H2S", # Liberty ACHD
"3.feed_28.SIGTHETA_DEG": "Liberty_wind_direction_std",
"3.feed_28.SONICWD_DEG": "Liberty_wind_direction_@",
"3.feed_28.SONICWS_MPH": "Liberty_wind_speed",
#"3.feed_23.PM10_UG_M3": "FPpm", # Flag Plaza ACHD
"3.feed_11067.SIGTHETA_DEG..3.feed_43.SIGTHETA_DEG": "ParkwayEast_wind_direction_std", # Parkway East ACHD
"3.feed_11067.SONICWD_DEG..3.feed_43.SONICWD_DEG": "ParkwayEast_wind_direction_@",
"3.feed_11067.SONICWS_MPH..3.feed_43.SONICWS_MPH": "ParkwayEast_wind_speed"
} # key is the desired variables, value is the replaced name, @ is the flag for computing sine and cosine
want_vars = want.keys()
df_esdr_cp = df_esdr
df_esdr = pd.DataFrame()
for col in df_esdr_cp.columns:
if col in want_vars:
log("\t" + col)
df_esdr[want[col]] = df_esdr_cp[col]
# Interpret data
df_esdr = df_esdr.reset_index()
df_smell = df_smell.reset_index()
df_X, df_Y, df_C = computeFeatures(df_esdr=df_esdr, df_smell=df_smell, f_hr=8, b_hr=2, thr=40, is_regr=False,
add_inter=True, add_roll=False, add_diff=False, logger=logger)
for m in ["DT"]*num_run:
start_time_str = datetime.now().strftime("%Y-%d-%m-%H%M%S")
out_p_m = out_p + "experiment/" + start_time_str + "/"
lg = generateLogger(out_p_m + m + "-" + start_time_str + ".log", format=None)
model = Interpreter(df_X=df_X, df_Y=df_Y, out_p=out_p_m, logger=lg)
df_Y = model.getFilteredLabels()
df_X = model.getSelectedFeatures()
num_folds = int((end_dt - start_dt).days / 7) # one fold represents a week
crossValidation(df_X=df_X, df_Y=df_Y, df_C=df_C, out_p_root=out_p_m, method=m, is_regr=False, logger=lg,
num_folds=num_folds, skip_folds=48, train_size=8000)
def computeCrossCorrelation(x, y, max_lag=None):
n = len(x)
xo = x - x.mean()
yo = y - y.mean()
cv = np.correlate(xo, yo, "full") / n
cc = cv / (np.std(x) * np.std(y))
if max_lag > 0:
cc = cc[n-1-max_lag:n+max_lag]
return cc
def corrStudy(in_p, out_p, logger, is_regr=False):
log("Compute correlation of lagged X and current Y...", logger)
f_name = "corr_with_time_lag"
if is_regr: f_name += "_is_regr"
# Compute features
df_X, df_Y, _ = computeFeatures(in_p=in_p, f_hr=8, b_hr=0, thr=40, is_regr=is_regr,
add_inter=False, add_roll=False, add_diff=False, logger=logger)
# Compute daytime index
# For 8 hours prediction, 11am covers duration from 11am to 7pm
#h_start = 6
#h_end = 11
#idx = (df_X["HourOfDay"]>=h_start)&(df_X["HourOfDay"]<=h_end)
# Compute point biserial correlation or Pearson correlation
Y = df_Y.squeeze()
max_t_lag = 6 # the maximum time lag
df_corr_info = pd.DataFrame()
df_corr = pd.DataFrame()
for c in df_X.columns:
if c in ["Day", "DayOfWeek", "HourOfDay"]: continue
s_info = []
s = []
X = df_X[c]
for i in range(0, max_t_lag+1):
d = pd.concat([Y, X.shift(i)], axis=1)
d.columns = ["y", "x"]
#d = d[idx] # select only daytime
d = d.dropna()
if is_regr:
r, p = pearsonr(d["y"], d["x"])
else:
r, p = pointbiserialr(d["y"], d["x"])
s_info.append((np.round(r, 3), np.round(p, 5), len(d)))
s.append(np.round(r, 3))
df_corr_info[c] = pd.Series(data=s_info)
df_corr[c] = pd.Series(data=s)
df_corr_info.to_csv(out_p+f_name+".csv")
# Plot
df_corr = df_corr.round(2)
log(df_corr)
#plotCorrelation(df_corr, out_p+f_name+".png")
def plotCorrelation(df_corr, out_p):
# Plot graph
tick_font_size = 16
label_font_size = 20
title_font_size = 32
fig, ax1 = plt.subplots(1, 1, figsize=(28, 5))
divider = make_axes_locatable(ax1)
ax2 = divider.append_axes("right", size="2%", pad=0.4)
sns.heatmap(df_corr, ax=ax1, cbar_ax=ax2, cmap="RdBu", vmin=-0.6, vmax=0.6,
linewidths=0.1, annot=False, fmt="g", xticklabels=False, yticklabels="auto")
ax1.tick_params(labelsize=tick_font_size)
ax2.tick_params(labelsize=tick_font_size)
ax1.set_ylabel("Time lag (hours)", fontsize=label_font_size)
ax1.set_xlabel("Predictors (sensors from different monitoring stations)", fontsize=label_font_size)
plt.suptitle("Time-lagged point biserial correlation of predictors and response (smell events)", fontsize=title_font_size)
fig.tight_layout()
fig.subplots_adjust(top=0.88)
fig.savefig(out_p, dpi=150)
fig.clf()
plt.close()
def plotAggrSmell(in_p, out_p, logger):
df_X, df_Y, _ = computeFeatures(in_p=in_p, f_hr=None, b_hr=0, thr=40, is_regr=True,
add_inter=False, add_roll=False, add_diff=False, logger=logger)
# Plot the distribution of smell values by days of week and hours of day
plotDayHour(df_X, df_Y, out_p, logger)
def plotDayHour(df_X, df_Y, out_p, logger):
log("Plot the distribution of smell over day and hour...", logger)
df = pd.DataFrame()
df["HourOfDay"] = df_X["HourOfDay"]
df["DayOfWeek"] = df_X["DayOfWeek"]
df["smell"] = df_Y["smell"]
df = df.groupby(["HourOfDay", "DayOfWeek"]).mean()
df = df.round(2).reset_index()
df_hd = df["HourOfDay"].values
df_dw = df["DayOfWeek"].values
df_c = df["smell"].values
mat = np.zeros((7,24))
for hd, dw, c in zip(df_hd, df_dw, df_c):
mat[(dw, hd)] = c
y_l = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
x_l = ["0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "11", "12", "13", "14", "15",
"16", "17", "18", "19", "20", "21", "22", "23"]
df_day_hour = pd.DataFrame(data=mat, columns=x_l, index=y_l)
df_day_hour.to_csv(out_p + "smell_day_hour.csv")
fig, ax1 = plt.subplots(1, 1, figsize=(14, 6))
divider = make_axes_locatable(ax1)
ax2 = divider.append_axes("right", size="2%", pad=0.2)
sns.heatmap(df_day_hour, ax=ax1, cbar_ax=ax2, cmap="Blues", vmin=0, vmax=7, linewidths=0.1,
annot=False, fmt="g", xticklabels=x_l, yticklabels=y_l, cbar_kws={"ticks":[0,2,4,6]})
for item in ax1.get_yticklabels():
item.set_rotation(0)
for item in ax1.get_xticklabels():
item.set_rotation(0)
#ax1.set_ylabel("Day of week", fontsize=22)
ax1.set_xlabel("Hour of day", fontsize=22)
ax1.tick_params(axis="x", labelsize=22)
ax1.tick_params(axis="y", labelsize=22)
ax2.tick_params(axis="y", labelsize=22)
plt.suptitle("Average smell values over time", fontsize=30)
plt.tight_layout()
fig.subplots_adjust(top=0.89)
fig.savefig(out_p + "smell_day_hour.png", dpi=150)
fig.clf()
plt.close()
def plotFeatures(in_p, out_p_root, logger):
plot_time_hist_pair = True
plot_corr = True
# Create file out folders
out_p = [
out_p_root + "analysis/time/",
out_p_root + "analysis/hist/",
out_p_root + "analysis/pair/",
out_p_root + "analysis/"]
# Create folder for saving files
for f in out_p:
checkAndCreateDir(f)
# Compute features
df_X, df_Y, _ = computeFeatures(in_p=in_p, f_hr=8, b_hr=0, thr=40, is_regr=True,
add_inter=False, add_roll=False, add_diff=False, logger=logger)
df_Y = pd.to_numeric(df_Y.squeeze())
# Plot feature histograms, or time-series, or pairs of (feature, label)
if plot_time_hist_pair:
with Parallel(n_jobs=-1) as parallel:
# Plot time series
log("Plot time series...", logger)
h = "Time series "
parallel(delayed(plotTime)(df_X[v], h, out_p[0]) for v in df_X.columns)
plotTime(df_Y, h, out_p[0])
# Plot histograms
log("Plot histograms...", logger)
h = "Histogram "
parallel(delayed(plotHist)(df_X[v], h, out_p[1]) for v in df_X.columns)
plotHist(df_Y, h, out_p[1])
# Plot pairs of (feature, label)
log("Plot pairs...", logger)
h = "Pairs "
parallel(delayed(plotPair)(df_X[v], df_Y, h, out_p[2]) for v in df_X.columns)
# Plot correlation matrix
if plot_corr:
log("Plot correlation matrix of predictors...", logger)
plotCorrMatirx(df_X, out_p[3])
log("Finished plotting features", logger)
def plotTime(df_v, title_head, out_p):
v = df_v.name
fig = plt.figure(figsize=(40, 8), dpi=150)
df_v.plot(alpha=0.5, title=title_head)
fig.tight_layout()
fig.savefig(out_p + "time===" + v + ".png")
fig.clf()
plt.close()
gc.collect()
def plotHist(df_v, title_head, out_p, bins=30):
v = df_v.name
fig = plt.figure(figsize=(8, 8), dpi=150)
df_v.plot.hist(alpha=0.5, bins=bins, title=title_head)
plt.xlabel(v)
fig.tight_layout()
fig.savefig(out_p + v + ".png")
fig.clf()
plt.close()
gc.collect()
def plotPair(df_v1, df_v2, title_head, out_p):
v1, v2 = df_v1.name, df_v2.name
fig = plt.figure(figsize=(8, 8), dpi=150)
plt.scatter(df_v1, df_v2, s=10, alpha=0.4)
plt.title(title_head)
plt.xlabel(v1)
plt.ylabel(v2)
fig.tight_layout()
fig.savefig(out_p + v1 + "===" + v2 + ".png")
fig.clf()
plt.close()
gc.collect()
def plotCorrMatirx(df, out_p):
"""
Plot correlation matrix of (x_i, x_j) for each vector x_i and vector x_j in matrix X
"""
# Compute correlation matrix
df_corr = df.corr().round(3)
df_corr.to_csv(out_p + "corr_matrix.csv")
# Plot graph
fig, ax = plt.subplots(figsize=(10, 8))
im = ax.imshow(df_corr, cmap=plt.get_cmap("RdBu"), interpolation="nearest",vmin=-1, vmax=1)
fig.colorbar(im)
fig.tight_layout()
plt.suptitle("Correlation matrix", fontsize=18)
fig.subplots_adjust(top=0.92)
fig.savefig(out_p + "corr_matrix.png", dpi=150)
fig.clf()
plt.close()
def plotLowDimensions(in_p, out_p, logger):
df_X, df_Y, _ = computeFeatures(in_p=in_p, f_hr=8, b_hr=3, thr=40, is_regr=False,
add_inter=False, add_roll=False, add_diff=False, logger=logger)
X = df_X.values
Y = df_Y.squeeze().values
log("Number of positive samples: " + str(len(Y[Y==1])) + " (" + str(float(len(Y[Y==1]))/len(Y)) + ")")
log("Number of negative samples: " + str(len(Y[Y==0])) + " (" + str(float(len(Y[Y==0]))/len(Y)) + ")")
_, df_Y_regr, _ = computeFeatures(in_p=in_p, f_hr=8, b_hr=3, thr=40, is_regr=True,
add_inter=False, add_roll=False, add_diff=False, logger=logger)
Y_regr = df_Y_regr.squeeze().values
log("Plot PCA...", logger)
plotPCA(X, Y, Y_regr, out_p)
log("Plot Kernel PCA...", logger)
plotKernelPCA(X, Y, Y_regr, out_p)
log("Finished plotting dimensions", logger)
def plotSpectralEmbedding(X, Y, out_p, is_regr=False):
X, Y = deepcopy(X), deepcopy(Y)
pca = PCA(n_components=10)
X = pca.fit_transform(X)
sm = SpectralEmbedding(n_components=3, eigen_solver="arpack", n_neighbors=10, n_jobs=-1)
X = sm.fit_transform(X)
title = "Spectral Embedding"
out_p += "spectral_embedding.png"
plotClusterPairGrid(X, Y, out_p, 3, 1, title, is_regr)
def plotRandomTreesEmbedding(X, Y, out_p, is_regr=False):
X, Y = deepcopy(X), deepcopy(Y)
hasher = RandomTreesEmbedding(n_estimators=1000, max_depth=5, min_samples_split=2, n_jobs=-1)
X = hasher.fit_transform(X)
pca = TruncatedSVD(n_components=3)
X = pca.fit_transform(X)
title = "Random Trees Embedding"
out_p += "random_trees_embedding.png"
plotClusterPairGrid(X, Y, out_p, 3, 1, title, is_regr)
def plotKernelPCA(X, Y, Y_regr, out_p):
"""
Y is the binned dataset
Y_regr is the original dataset
"""
X, Y, Y_regr = deepcopy(X), deepcopy(Y), deepcopy(Y_regr)
pca = KernelPCA(n_components=3, kernel="rbf", n_jobs=-1)
X = pca.fit_transform(X)
r = pca.lambdas_
r = np.round(r/sum(r), 3)
title = "Kernel PCA, eigenvalue = " + str(r)
plotClusterPairGrid(X, Y, out_p+"kernel_pca.png", 3, 1, title, False)
plotClusterPairGrid(X, Y_regr, out_p+"kernel_pca_regr.png", 3, 1, title, True)
def plotPCA(X, Y, Y_regr, out_p):
"""
Y is the binned dataset
Y_rege is the original dataset
"""
X, Y, Y_regr = deepcopy(X), deepcopy(Y), deepcopy(Y_regr)
pca = PCA(n_components=3)
X = pca.fit_transform(X)
r = np.round(pca.explained_variance_ratio_, 3)
title = "PCA, eigenvalue = " + str(r)
plotClusterPairGrid(X, Y, out_p+"pca.png", 3, 1, title, False)
plotClusterPairGrid(X, Y_regr, out_p+"pca_regr.png", 3, 1, title, True) | 0.739516 | 0.430028 |
import simpy
import numpy as np
from numpy.random import RandomState
"""
Simple OB patient flow model 4 - Very simple OO
Details:
- Generate arrivals via Poisson process
- Define an OBUnit class that contains a simpy.Resource object as a member.
Not subclassing Resource, just trying to use it as a member.
- Routing is hard coded (no Router class yet)
- Just trying to get objects/processes to communicate
"""
ARR_RATE = 0.4
MEAN_LOS_OBS = 3
MEAN_LOS_LDR = 12
MEAN_LOS_PP = 48
CAPACITY_OBS = 2
CAPACITY_LDR = 6
CAPACITY_PP = 24
RNG_SEED = 6353
class OBunit(object):
""" Models an OB unit with fixed capacity.
Parameters
----------
env : simpy.Environment
the simulation environment
name : str
unit name
capacity : integer (or None)
Number of beds. Use None for infinite capacity.
"""
def __init__(self, env, name, capacity=None, debug=False):
if capacity is None:
self.capacity = capacity=simpy.core.Infinity
else:
self.capacity = capacity
self.unit = simpy.Resource(env, capacity)
self.env = env
self.name = name
self.debug = debug
self.num_entries = 0
self.num_exits = 0
self.tot_occ_time = 0.0
def basic_stats_msg(self):
msg = "{:6}:\t Entries={}, Exits={}, ALOS={:4.2f}".format(self.name,
self.num_entries,
self.num_exits,
self.tot_occ_time / self.num_exits)
return msg
class OBpatient(object):
def __init__(self, arrtime, arrstream, patient_id=0, prng=0):
self.arrtime = arrtime
self.arrstream = arrstream
self.patient_id = patient_id
self.name = 'Patient_{}'.format(patient_id)
# Hard coding for now
self.planned_los_obs = prng.exponential(MEAN_LOS_OBS)
self.planned_los_ldr = prng.exponential(MEAN_LOS_LDR)
self.planned_los_pp = prng.exponential(MEAN_LOS_PP)
def __repr__(self):
return "patientid: {}, arr_stream: {}, time: {}". \
format(self.patient_id, self.arrstream, self.arrtime)
class OBPatientGenerator(object):
""" Generates patients.
Set the "out" member variable to the resource at which patient generated.
Parameters
----------
env : simpy.Environment
the simulation environment
adist : function
a no parameter function that returns the successive inter-arrival times of the packets
initial_delay : number
Starts generation after an initial delay. Default = 0
stoptime : number
Stops generation at the stoptime. Default is infinite
"""
def __init__(self, env, arr_stream, arr_rate, initial_delay=0, stoptime=250, debug=False):
self.id = id
self.env = env
self.arr_rate = arr_rate
self.arr_stream = arr_stream
self.initial_delay = initial_delay
self.stoptime = stoptime
self.debug = debug
self.num_patients_created = 0
self.prng = RandomState(RNG_SEED)
self.action = env.process(self.run()) # starts the run() method as a SimPy process
def run(self):
"""The patient generator.
"""
# Delay for initial_delay
yield self.env.timeout(self.initial_delay)
# Main generator loop that terminates when stoptime reached
while self.env.now < self.stoptime:
# Delay until time for next arrival
# Compute next interarrival time
iat = self.prng.exponential(1.0 / self.arr_rate)
yield self.env.timeout(iat)
self.num_patients_created += 1
# Create new patient
obp = OBpatient(self.env.now, self.arr_stream, patient_id=self.num_patients_created, prng=self.prng)
# Create a new flow instance for this patient. The OBpatient object carries all necessary info.
obflow = obpatient_flow(env, obp, self.debug)
# Register the new flow instance as a SimPy process.
self.env.process(obflow)
def obpatient_flow(env, obp, debug=False):
""" Models the patient flow process.
The sequence of units is hard coded for now.
Parameters
----------
env : simpy.Environment
the simulation environment
obp : OBpatient object
the patient to send through the flow process
"""
name = obp.name
# OBS
if debug:
print("{} trying to get OBS at {}".format(name, env.now))
bed_request_ts = env.now
bed_request1 = obs_unit.unit.request() # Request an obs bed
yield bed_request1
if debug:
print("{} entering OBS at {}".format(name, env.now))
obs_unit.num_entries += 1
enter_ts = env.now
if debug:
if env.now > bed_request_ts:
print("{} waited {} time units for OBS bed".format(name, env.now- bed_request_ts))
yield env.timeout(obp.planned_los_obs) # Stay in obs bed
if debug:
print("{} trying to get LDR at {}".format(name, env.now))
bed_request_ts = env.now
bed_request2 = ldr_unit.unit.request() # Request an obs bed
yield bed_request2
# Got LDR bed, release OBS bed
obs_unit.unit.release(bed_request1) # Release the obs bed
obs_unit.num_exits += 1
exit_ts = env.now
obs_unit.tot_occ_time += exit_ts - enter_ts
if debug:
print("{} leaving OBS at {}".format(name, env.now))
# LDR stay
if debug:
print("{} entering LDR at {}".format(name, env.now))
ldr_unit.num_entries += 1
enter_ts = env.now
if debug:
if env.now > bed_request_ts:
print("{} waited {} time units for LDR bed".format(name, env.now- bed_request_ts))
yield env.timeout(obp.planned_los_ldr) # Stay in LDR bed
if debug:
print("{} trying to get PP at {}".format(name, env.now))
bed_request_ts = env.now
bed_request3 = pp_unit.unit.request() # Request a PP bed
yield bed_request3
# Got PP bed, release LDR bed
ldr_unit.unit.release(bed_request2) # Release the ldr bed
ldr_unit.num_exits += 1
exit_ts = env.now
ldr_unit.tot_occ_time += exit_ts - enter_ts
if debug:
print("{} leaving LDR at {}".format(name, env.now))
# PP stay
if debug:
print("{} entering PP at {}".format(name, env.now))
pp_unit.num_entries += 1
enter_ts = env.now
if debug:
if env.now > bed_request_ts:
print("{} waited {} time units for PP bed".format(name, env.now- bed_request_ts))
yield env.timeout(obp.planned_los_pp) # Stay in LDR bed
pp_unit.unit.release(bed_request3) # Release the PP bed
pp_unit.num_exits += 1
exit_ts = env.now
pp_unit.tot_occ_time += exit_ts - enter_ts
if debug:
print("{} leaving PP and system at {}".format(name, env.now))
# Initialize a simulation environment
env = simpy.Environment()
rho_obs = ARR_RATE * MEAN_LOS_OBS / CAPACITY_OBS
rho_ldr = ARR_RATE * MEAN_LOS_LDR / CAPACITY_LDR
rho_pp = ARR_RATE * MEAN_LOS_PP / CAPACITY_PP
print("rho_obs: {:6.3f}, rho_ldr: {:6.3f}, rho_pp: {:6.3f}".format(rho_obs, rho_ldr, rho_pp))
# Declare a Resource to model OBS unit
obs_unit = OBunit(env, "OBS", CAPACITY_OBS, debug=True)
ldr_unit = OBunit(env, "LDR", CAPACITY_LDR, debug=True)
pp_unit = OBunit(env, "PP", CAPACITY_PP, debug=True)
# Run the simulation for a while
runtime = 100000
debug = False
obpat_gen = OBPatientGenerator(env, "Type1", ARR_RATE, 0, runtime, debug=debug)
env.run()
print("\nNum patients generated: {}\n".format(obpat_gen.num_patients_created))
print(obs_unit.basic_stats_msg())
print(ldr_unit.basic_stats_msg())
print(pp_unit.basic_stats_msg()) | obflow_4_oo_1.py | import simpy
import numpy as np
from numpy.random import RandomState
"""
Simple OB patient flow model 4 - Very simple OO
Details:
- Generate arrivals via Poisson process
- Define an OBUnit class that contains a simpy.Resource object as a member.
Not subclassing Resource, just trying to use it as a member.
- Routing is hard coded (no Router class yet)
- Just trying to get objects/processes to communicate
"""
ARR_RATE = 0.4
MEAN_LOS_OBS = 3
MEAN_LOS_LDR = 12
MEAN_LOS_PP = 48
CAPACITY_OBS = 2
CAPACITY_LDR = 6
CAPACITY_PP = 24
RNG_SEED = 6353
class OBunit(object):
""" Models an OB unit with fixed capacity.
Parameters
----------
env : simpy.Environment
the simulation environment
name : str
unit name
capacity : integer (or None)
Number of beds. Use None for infinite capacity.
"""
def __init__(self, env, name, capacity=None, debug=False):
if capacity is None:
self.capacity = capacity=simpy.core.Infinity
else:
self.capacity = capacity
self.unit = simpy.Resource(env, capacity)
self.env = env
self.name = name
self.debug = debug
self.num_entries = 0
self.num_exits = 0
self.tot_occ_time = 0.0
def basic_stats_msg(self):
msg = "{:6}:\t Entries={}, Exits={}, ALOS={:4.2f}".format(self.name,
self.num_entries,
self.num_exits,
self.tot_occ_time / self.num_exits)
return msg
class OBpatient(object):
def __init__(self, arrtime, arrstream, patient_id=0, prng=0):
self.arrtime = arrtime
self.arrstream = arrstream
self.patient_id = patient_id
self.name = 'Patient_{}'.format(patient_id)
# Hard coding for now
self.planned_los_obs = prng.exponential(MEAN_LOS_OBS)
self.planned_los_ldr = prng.exponential(MEAN_LOS_LDR)
self.planned_los_pp = prng.exponential(MEAN_LOS_PP)
def __repr__(self):
return "patientid: {}, arr_stream: {}, time: {}". \
format(self.patient_id, self.arrstream, self.arrtime)
class OBPatientGenerator(object):
""" Generates patients.
Set the "out" member variable to the resource at which patient generated.
Parameters
----------
env : simpy.Environment
the simulation environment
adist : function
a no parameter function that returns the successive inter-arrival times of the packets
initial_delay : number
Starts generation after an initial delay. Default = 0
stoptime : number
Stops generation at the stoptime. Default is infinite
"""
def __init__(self, env, arr_stream, arr_rate, initial_delay=0, stoptime=250, debug=False):
self.id = id
self.env = env
self.arr_rate = arr_rate
self.arr_stream = arr_stream
self.initial_delay = initial_delay
self.stoptime = stoptime
self.debug = debug
self.num_patients_created = 0
self.prng = RandomState(RNG_SEED)
self.action = env.process(self.run()) # starts the run() method as a SimPy process
def run(self):
"""The patient generator.
"""
# Delay for initial_delay
yield self.env.timeout(self.initial_delay)
# Main generator loop that terminates when stoptime reached
while self.env.now < self.stoptime:
# Delay until time for next arrival
# Compute next interarrival time
iat = self.prng.exponential(1.0 / self.arr_rate)
yield self.env.timeout(iat)
self.num_patients_created += 1
# Create new patient
obp = OBpatient(self.env.now, self.arr_stream, patient_id=self.num_patients_created, prng=self.prng)
# Create a new flow instance for this patient. The OBpatient object carries all necessary info.
obflow = obpatient_flow(env, obp, self.debug)
# Register the new flow instance as a SimPy process.
self.env.process(obflow)
def obpatient_flow(env, obp, debug=False):
""" Models the patient flow process.
The sequence of units is hard coded for now.
Parameters
----------
env : simpy.Environment
the simulation environment
obp : OBpatient object
the patient to send through the flow process
"""
name = obp.name
# OBS
if debug:
print("{} trying to get OBS at {}".format(name, env.now))
bed_request_ts = env.now
bed_request1 = obs_unit.unit.request() # Request an obs bed
yield bed_request1
if debug:
print("{} entering OBS at {}".format(name, env.now))
obs_unit.num_entries += 1
enter_ts = env.now
if debug:
if env.now > bed_request_ts:
print("{} waited {} time units for OBS bed".format(name, env.now- bed_request_ts))
yield env.timeout(obp.planned_los_obs) # Stay in obs bed
if debug:
print("{} trying to get LDR at {}".format(name, env.now))
bed_request_ts = env.now
bed_request2 = ldr_unit.unit.request() # Request an obs bed
yield bed_request2
# Got LDR bed, release OBS bed
obs_unit.unit.release(bed_request1) # Release the obs bed
obs_unit.num_exits += 1
exit_ts = env.now
obs_unit.tot_occ_time += exit_ts - enter_ts
if debug:
print("{} leaving OBS at {}".format(name, env.now))
# LDR stay
if debug:
print("{} entering LDR at {}".format(name, env.now))
ldr_unit.num_entries += 1
enter_ts = env.now
if debug:
if env.now > bed_request_ts:
print("{} waited {} time units for LDR bed".format(name, env.now- bed_request_ts))
yield env.timeout(obp.planned_los_ldr) # Stay in LDR bed
if debug:
print("{} trying to get PP at {}".format(name, env.now))
bed_request_ts = env.now
bed_request3 = pp_unit.unit.request() # Request a PP bed
yield bed_request3
# Got PP bed, release LDR bed
ldr_unit.unit.release(bed_request2) # Release the ldr bed
ldr_unit.num_exits += 1
exit_ts = env.now
ldr_unit.tot_occ_time += exit_ts - enter_ts
if debug:
print("{} leaving LDR at {}".format(name, env.now))
# PP stay
if debug:
print("{} entering PP at {}".format(name, env.now))
pp_unit.num_entries += 1
enter_ts = env.now
if debug:
if env.now > bed_request_ts:
print("{} waited {} time units for PP bed".format(name, env.now- bed_request_ts))
yield env.timeout(obp.planned_los_pp) # Stay in LDR bed
pp_unit.unit.release(bed_request3) # Release the PP bed
pp_unit.num_exits += 1
exit_ts = env.now
pp_unit.tot_occ_time += exit_ts - enter_ts
if debug:
print("{} leaving PP and system at {}".format(name, env.now))
# Initialize a simulation environment
env = simpy.Environment()
rho_obs = ARR_RATE * MEAN_LOS_OBS / CAPACITY_OBS
rho_ldr = ARR_RATE * MEAN_LOS_LDR / CAPACITY_LDR
rho_pp = ARR_RATE * MEAN_LOS_PP / CAPACITY_PP
print("rho_obs: {:6.3f}, rho_ldr: {:6.3f}, rho_pp: {:6.3f}".format(rho_obs, rho_ldr, rho_pp))
# Declare a Resource to model OBS unit
obs_unit = OBunit(env, "OBS", CAPACITY_OBS, debug=True)
ldr_unit = OBunit(env, "LDR", CAPACITY_LDR, debug=True)
pp_unit = OBunit(env, "PP", CAPACITY_PP, debug=True)
# Run the simulation for a while
runtime = 100000
debug = False
obpat_gen = OBPatientGenerator(env, "Type1", ARR_RATE, 0, runtime, debug=debug)
env.run()
print("\nNum patients generated: {}\n".format(obpat_gen.num_patients_created))
print(obs_unit.basic_stats_msg())
print(ldr_unit.basic_stats_msg())
print(pp_unit.basic_stats_msg()) | 0.456894 | 0.176459 |
import os
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
from implicit.als import AlternatingLeastSquares
from data import SeqTensor
class ImplicitALS(AlternatingLeastSquares):
""" Simple sub-class for `implicit`s ALS algorithm """
def __init__(self, n_components, regularization=1e-3, alpha=100,
n_iters=15, dtype=np.float32, use_gpu=False):
"""
Args:
n_components (int): n_factors
regularization (float): regularization term weight
alpha (float): confidence coefficient
"""
super(ImplicitALS, self).__init__(
factors=n_components, regularization=regularization,
use_gpu=use_gpu, iterations=n_iters, dtype=np.float32
)
self.alpha = alpha
def fit(self, X):
os.environ['OPENBLAS_NUM_THREADS'] = '1'
X.data = X.data * (self.alpha - 1.)
super(ImplicitALS, self).fit(X.T)
os.environ['OPENBLAS_NUM_THREADS'] = '8'
# fill the zero factors with random values ~ N(0, 0.01)
user_zeros = (np.sum(self.user_factors, axis=1) == 0)
self.user_factors[user_zeros] = (
np.random.randn(user_zeros.sum(), self.factors) * 0.01)
item_zeros = (np.sum(self.item_factors, axis=1) == 0)
self.item_factors[item_zeros] = (
np.random.randn(item_zeros.sum(), self.factors) * 0.01)
def predict_k(self, u, k=500):
""""""
if not hasattr(self, 'user_factors') or not hasattr(self, 'item_factors'):
raise ValueError('[Error] model first should be fit!')
r = -self.user_factors[u].dot(self.item_factors.T)
ix = np.argpartition(r, k)[:k]
return ix[r[ix].argsort()]
class UserRNN(nn.Module):
""""""
def __init__(self, n_components, n_users, n_hid=100, n_out=16,
user_train=True, n_layers=1, drop_out=0, sparse_embedding=True):
""""""
super(UserRNN, self).__init__()
self.n_components = n_components
self.n_users = n_users
self.n_layers = n_layers
self.n_out = n_out
self.is_cuda = False
# setup learnable embedding layers
self.emb = nn.Embedding(
n_users, n_components, sparse=sparse_embedding)
self.emb.weight.requires_grad = user_train
self.user_rnn = nn.LSTM(n_components, n_hid, n_layers,
batch_first=True, dropout=drop_out)
self.user_out = nn.Linear(n_hid, n_out)
def forward(self, pid):
"""
pid: SeqTensor instance for batch of playlist
"""
# process seqs
pid = SeqTensor(pid, None, None, is_gpu=self.is_cuda)
# process rnn
emb_pl = self.emb(Variable(pid.seq))
emb_pl = pack_padded_sequence(emb_pl, pid.lengths.tolist(), batch_first=True)
out_u, hid_u = self.user_rnn(emb_pl)
# unpack & unsort batch order
hid_u = pid.unsort(hid_u[0][-1]) # only take last rnn layer
# obtain final estimation
out_u = self.user_out(hid_u)
return out_u
def user_factor(self, pid):
""""""
pid = SeqTensor(pid, None, None, is_gpu=self.is_cuda)
emb_pl = self.emb(Variable(pid.seq))
emb_pl = pack_padded_sequence(emb_pl, pid.lengths.tolist(), batch_first=True)
out_u, hid_u = self.user_rnn(emb_pl)
out_u = self.user_out(pid.unsort(hid_u[0][-1]))
return out_u | model.py | import os
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
from implicit.als import AlternatingLeastSquares
from data import SeqTensor
class ImplicitALS(AlternatingLeastSquares):
""" Simple sub-class for `implicit`s ALS algorithm """
def __init__(self, n_components, regularization=1e-3, alpha=100,
n_iters=15, dtype=np.float32, use_gpu=False):
"""
Args:
n_components (int): n_factors
regularization (float): regularization term weight
alpha (float): confidence coefficient
"""
super(ImplicitALS, self).__init__(
factors=n_components, regularization=regularization,
use_gpu=use_gpu, iterations=n_iters, dtype=np.float32
)
self.alpha = alpha
def fit(self, X):
os.environ['OPENBLAS_NUM_THREADS'] = '1'
X.data = X.data * (self.alpha - 1.)
super(ImplicitALS, self).fit(X.T)
os.environ['OPENBLAS_NUM_THREADS'] = '8'
# fill the zero factors with random values ~ N(0, 0.01)
user_zeros = (np.sum(self.user_factors, axis=1) == 0)
self.user_factors[user_zeros] = (
np.random.randn(user_zeros.sum(), self.factors) * 0.01)
item_zeros = (np.sum(self.item_factors, axis=1) == 0)
self.item_factors[item_zeros] = (
np.random.randn(item_zeros.sum(), self.factors) * 0.01)
def predict_k(self, u, k=500):
""""""
if not hasattr(self, 'user_factors') or not hasattr(self, 'item_factors'):
raise ValueError('[Error] model first should be fit!')
r = -self.user_factors[u].dot(self.item_factors.T)
ix = np.argpartition(r, k)[:k]
return ix[r[ix].argsort()]
class UserRNN(nn.Module):
""""""
def __init__(self, n_components, n_users, n_hid=100, n_out=16,
user_train=True, n_layers=1, drop_out=0, sparse_embedding=True):
""""""
super(UserRNN, self).__init__()
self.n_components = n_components
self.n_users = n_users
self.n_layers = n_layers
self.n_out = n_out
self.is_cuda = False
# setup learnable embedding layers
self.emb = nn.Embedding(
n_users, n_components, sparse=sparse_embedding)
self.emb.weight.requires_grad = user_train
self.user_rnn = nn.LSTM(n_components, n_hid, n_layers,
batch_first=True, dropout=drop_out)
self.user_out = nn.Linear(n_hid, n_out)
def forward(self, pid):
"""
pid: SeqTensor instance for batch of playlist
"""
# process seqs
pid = SeqTensor(pid, None, None, is_gpu=self.is_cuda)
# process rnn
emb_pl = self.emb(Variable(pid.seq))
emb_pl = pack_padded_sequence(emb_pl, pid.lengths.tolist(), batch_first=True)
out_u, hid_u = self.user_rnn(emb_pl)
# unpack & unsort batch order
hid_u = pid.unsort(hid_u[0][-1]) # only take last rnn layer
# obtain final estimation
out_u = self.user_out(hid_u)
return out_u
def user_factor(self, pid):
""""""
pid = SeqTensor(pid, None, None, is_gpu=self.is_cuda)
emb_pl = self.emb(Variable(pid.seq))
emb_pl = pack_padded_sequence(emb_pl, pid.lengths.tolist(), batch_first=True)
out_u, hid_u = self.user_rnn(emb_pl)
out_u = self.user_out(pid.unsort(hid_u[0][-1]))
return out_u | 0.90335 | 0.358774 |
import subprocess
import textwrap
import socket
import vim
import sys
import os
import imp
from ui import DebugUI
from dbgp import DBGP
def vim_init():
'''put DBG specific keybindings here -- e.g F1, whatever'''
vim.command('ca dbg Dbg')
def vim_quit():
'''remove DBG specific keybindings'''
vim.command('cuna dbg')
def get_vim(name, default, fn=str):
if vim.eval('exists("%s")' % name) == '1':
return vim.eval(name)
return default
import types
class Registrar:
def __init__(self, args=(), kwds=(), named=True):
if named:
self.reg = {}
else:
self.reg = []
self.args = args
self.kwds = kwds
self.named = named
def register(self, *args, **kwds):
def meta(func):
self.add(func, args, kwds)
return meta
def add(self, func, args, kwds):
if self.named:
self.reg[args[0]] = {'function':func, 'args':args[1:], 'kwds':kwds}
else:
self.reg.append({'function':func, 'args':args, 'kwds':kwds})
return func
def bind(self, inst):
res = {}
for key, value in self.reg.iteritems():
value = value.copy()
res[key] = value
if callable(value['function']):
value['function'] = types.MethodType(value['function'], inst, inst.__class__)
return res
__call__ = register
class CmdRegistrar(Registrar):
def add(self, func, args, kwds):
lead = kwds.get('lead', '')
disabled_mappings = False
if vim.eval("exists('g:vim_debug_disable_mappings')") != "0":
disabled_mappings = vim.eval("g:vim_debug_disable_mappings") != "0"
if lead and not disabled_mappings:
vim.command('map <Leader>%s :Dbg %s<cr>' % (lead, args[0]))
dct = {'function':func, 'options':kwds}
for name in args:
self.reg[name] = dct
class Debugger:
''' This is the main debugger class... '''
options = {'port':9000, 'max_children':32, 'max_data':'1024', 'minbufexpl':0, 'max_depth':1}
def __init__(self):
self.started = False
self.watching = {}
self._type = None
def init_vim(self):
self.ui = DebugUI()
self.settings = {}
for k,v in self.options.iteritems():
self.settings[k] = get_vim(k, v, type(v))
vim_init()
def start_url(self, url):
if '?' in url:
url += '&'
else:
url += '?'
url += 'XDEBUG_SESSION_START=vim_phpdebug'
self._type = 'php'
# only linux and mac supported atm
command = 'xdg-open' if sys.platform.startswith('linux') else 'open'
try:
subprocess.Popen((command, url), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
print 'failed to start a browser. aborting debug session'
return
return self.start()
def start_py(self, fname):
if os.name == 'nt':
_,PYDBGP,_ = imp.find_module('dbgp')
PYDBGP = PYDBGP + '/../EGG-INFO/scripts/pydbgp.py'
subprocess.Popen(('python.exe',PYDBGP, '-d', 'localhost:9000', fname), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
subprocess.Popen(('pydbgp.py', '-d', 'localhost:9000', fname), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self._type = 'python'
return self.start()
def start(self):
## self.breaks = BreakPointManager()
self.started = True
self.bend = DBGP(self.settings, self.ui.windows['log'].write, self._type)
for key, value in self.handle.bind(self).iteritems():
if callable(value['function']):
fn = value['function']
else:
tmp = self
for item in value['function'].split('.'):
tmp = getattr(tmp, item)
fn = tmp
self.bend.addCommandHandler(key, fn)
self.bend.addCommandHandler('<stream>', self.ui.windows['output'].add)
if not self.bend.connect():
print textwrap.dedent('''\
Unable to connect to debug server. Things to check:
- you refreshed the page during the 5 second
period
- you have the xdebug extension installed (apt-get
install php5-xdebug on ubuntu)
- you set the XDEBUG_SESSION_START cookie
- "xdebug.remote_enable = 1" is in php.ini (not
enabled by default)
If you have any questions, look at
http://tech.blog.box.net/2007/06/20/how-to-debug-php-with-vim-and-xdebug-on-linux/
''')
return False
self.ui.startup()
self.bend.get_packets(1)
self.bend.command('feature_set', 'n', 'max_children', 'v', self.settings['max_children'])
self.bend.command('feature_set', 'n', 'max_data', 'v', self.settings['max_data'])
self.bend.command('feature_set', 'n', 'max_depth', 'v', self.settings['max_depth'])
self.bend.command('stdout', 'c', '1')
self.bend.command('stderr', 'c', '1')
for name in ('max_children', 'max_data', 'max_depth'):
self.bend.command('feature_set', 'n', name, 'v', self.settings[name], suppress=True)
self.bend.command('step_into')
self.bend.command('context_get')
self.bend.command('stack_get')
self.bend.command('status')
self.ui.go_srcview()
def set_status(self, status):
self.status = status
# self.party
''' setup + register vim commands '''
cmd = CmdRegistrar()
cmd('over', help='step over next function call', lead='o')('step_over')
cmd('into', help='step into next function call', lead='i')('step_into')
cmd('out', help='step out of current function call', lead='t')('step_out')
cmd('run', help='continue execution until a breakpoint is reached or the program ends', lead='r')('run')
@cmd('eval', help='eval some code', plain=True)
def eval_(self, code):
self.bend.command('eval', data=code)
self.bend.command('context_get')
@cmd('quit', 'stop', 'exit', help='exit the debugger')
def quit(self):
self.bend.close()
self.ui.close()
vim_quit()
@cmd('up', help='go up the stack', lead='u')
def up(self):
self.ui.stack_up()
@cmd('down', help='go down the stack', lead='d')
def down(self):
self.ui.stack_down()
@cmd('watch', help='execute watch functions', lead='w')
def watch(self):
lines = self.ui.windows['watch'].expressions.buffer
self.watching = {}
for i, line in enumerate(lines[1:]):
if not line.strip():continue
# self.ui.windows['log'].write('evalling:' + line)
tid = self.bend.command('eval', data=line, suppress=True)
self.watching[tid] = i+1
self.bend.get_packets()
@cmd('break', help='set a breakpoint', lead='b')
def break_(self):
(row, col) = vim.current.window.cursor
file = os.path.abspath(vim.current.buffer.name)
if not os.path.exists(file):
print 'Not in a file'
return
bid = self.ui.break_at(file, row)
if bid == -1:
tid = self.bend.cid + 1
self.ui.queue_break(tid, file, row)
self.bend.command('breakpoint_set', 't', 'line', 'f', 'file://' + file, 'n', row, data='')
else:
tid = self.bend.cid + 1
self.ui.queue_break_remove(tid, bid)
self.bend.command('breakpoint_remove', 'd', bid)
@cmd('here', help='continue execution until the cursor (tmp breakpoint)', lead='h')
def here(self):
(row, col) = vim.current.window.cursor
file = os.path.abspath(vim.current.buffer.name)
if not os.path.exists(file):
print 'Not in a file'
return
tid = self.bend.cid + 1
# self.ui.queue_break(tid, file, row)
self.bend.command('breakpoint_set', 't', 'line', 'r', '1', 'f', 'file://' + file, 'n', row, data='')
self.bend.command('run')
def commands(self):
self._commands = self.cmd.bind(self)
return self._commands
handle = Registrar()
@handle('stack_get')
def _stack_get(self, node):
line = self.ui.windows['stack'].refresh(node)
self.ui.set_srcview(line[2], line[3])
@handle('breakpoint_set')
def _breakpoint_set(self, node):
self.ui.set_break(int(node.getAttribute('transaction_id')), node.getAttribute('id'))
self.ui.go_srcview()
@handle('breakpoint_remove')
def _breakpoint_remove(self, node):
self.ui.clear_break(int(node.getAttribute('transaction_id')))
self.ui.go_srcview()
def _status(self, node):
if node.getAttribute('reason') == 'ok':
self.set_status(node.getAttribute('status'))
def _change(self, node):
if node.getAttribute('reason') == 'ok':
self.set_status(node.getAttribute('status'))
if self.status != 'stopping':
try:
self.bend.command('context_get')
self.bend.command('stack_get')
except (EOFError, socket.error):
self.disable()
else:
self.disable()
def disable(self):
print 'Execution has ended; connection closed. type :Dbg quit to exit debugger'
self.ui.unhighlight()
for cmd in self._commands.keys():
if cmd not in ('quit', 'close'):
self._commands.pop(cmd)
@handle('<init>')
def _init(self, node):
file = node.getAttribute('fileuri')
self.ui.set_srcview(file, 1)
handle('status')(_status)
handle('stdout')(_status)
handle('stderr')(_status)
handle('step_into')(_change)
handle('step_out')(_change)
handle('step_over')(_change)
handle('run')(_change)
def _log(self, node):
self.ui.windows['log'].write(node.toprettyxml(indent=' '))
pass # print node
@handle('eval')
def _eval(self, node):
id = int(node.getAttribute('transaction_id'))
if id in self.watching:
self.ui.windows['watch'].set_result(self.watching.pop(id), node)
self.ui.windows['watch'].expressions.focus()
handle('property_get')(_log)
handle('property_set')(_log)
@handle('context_get')
def _context_get(self, node):
self.ui.windows['scope'].refresh(node)
handle('feature_set')(_log)
# vim: et sw=4 sts=4 | vim_debug/new_debugger.py | import subprocess
import textwrap
import socket
import vim
import sys
import os
import imp
from ui import DebugUI
from dbgp import DBGP
def vim_init():
'''put DBG specific keybindings here -- e.g F1, whatever'''
vim.command('ca dbg Dbg')
def vim_quit():
'''remove DBG specific keybindings'''
vim.command('cuna dbg')
def get_vim(name, default, fn=str):
if vim.eval('exists("%s")' % name) == '1':
return vim.eval(name)
return default
import types
class Registrar:
def __init__(self, args=(), kwds=(), named=True):
if named:
self.reg = {}
else:
self.reg = []
self.args = args
self.kwds = kwds
self.named = named
def register(self, *args, **kwds):
def meta(func):
self.add(func, args, kwds)
return meta
def add(self, func, args, kwds):
if self.named:
self.reg[args[0]] = {'function':func, 'args':args[1:], 'kwds':kwds}
else:
self.reg.append({'function':func, 'args':args, 'kwds':kwds})
return func
def bind(self, inst):
res = {}
for key, value in self.reg.iteritems():
value = value.copy()
res[key] = value
if callable(value['function']):
value['function'] = types.MethodType(value['function'], inst, inst.__class__)
return res
__call__ = register
class CmdRegistrar(Registrar):
def add(self, func, args, kwds):
lead = kwds.get('lead', '')
disabled_mappings = False
if vim.eval("exists('g:vim_debug_disable_mappings')") != "0":
disabled_mappings = vim.eval("g:vim_debug_disable_mappings") != "0"
if lead and not disabled_mappings:
vim.command('map <Leader>%s :Dbg %s<cr>' % (lead, args[0]))
dct = {'function':func, 'options':kwds}
for name in args:
self.reg[name] = dct
class Debugger:
''' This is the main debugger class... '''
options = {'port':9000, 'max_children':32, 'max_data':'1024', 'minbufexpl':0, 'max_depth':1}
def __init__(self):
self.started = False
self.watching = {}
self._type = None
def init_vim(self):
self.ui = DebugUI()
self.settings = {}
for k,v in self.options.iteritems():
self.settings[k] = get_vim(k, v, type(v))
vim_init()
def start_url(self, url):
if '?' in url:
url += '&'
else:
url += '?'
url += 'XDEBUG_SESSION_START=vim_phpdebug'
self._type = 'php'
# only linux and mac supported atm
command = 'xdg-open' if sys.platform.startswith('linux') else 'open'
try:
subprocess.Popen((command, url), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
print 'failed to start a browser. aborting debug session'
return
return self.start()
def start_py(self, fname):
if os.name == 'nt':
_,PYDBGP,_ = imp.find_module('dbgp')
PYDBGP = PYDBGP + '/../EGG-INFO/scripts/pydbgp.py'
subprocess.Popen(('python.exe',PYDBGP, '-d', 'localhost:9000', fname), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
subprocess.Popen(('pydbgp.py', '-d', 'localhost:9000', fname), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self._type = 'python'
return self.start()
def start(self):
## self.breaks = BreakPointManager()
self.started = True
self.bend = DBGP(self.settings, self.ui.windows['log'].write, self._type)
for key, value in self.handle.bind(self).iteritems():
if callable(value['function']):
fn = value['function']
else:
tmp = self
for item in value['function'].split('.'):
tmp = getattr(tmp, item)
fn = tmp
self.bend.addCommandHandler(key, fn)
self.bend.addCommandHandler('<stream>', self.ui.windows['output'].add)
if not self.bend.connect():
print textwrap.dedent('''\
Unable to connect to debug server. Things to check:
- you refreshed the page during the 5 second
period
- you have the xdebug extension installed (apt-get
install php5-xdebug on ubuntu)
- you set the XDEBUG_SESSION_START cookie
- "xdebug.remote_enable = 1" is in php.ini (not
enabled by default)
If you have any questions, look at
http://tech.blog.box.net/2007/06/20/how-to-debug-php-with-vim-and-xdebug-on-linux/
''')
return False
self.ui.startup()
self.bend.get_packets(1)
self.bend.command('feature_set', 'n', 'max_children', 'v', self.settings['max_children'])
self.bend.command('feature_set', 'n', 'max_data', 'v', self.settings['max_data'])
self.bend.command('feature_set', 'n', 'max_depth', 'v', self.settings['max_depth'])
self.bend.command('stdout', 'c', '1')
self.bend.command('stderr', 'c', '1')
for name in ('max_children', 'max_data', 'max_depth'):
self.bend.command('feature_set', 'n', name, 'v', self.settings[name], suppress=True)
self.bend.command('step_into')
self.bend.command('context_get')
self.bend.command('stack_get')
self.bend.command('status')
self.ui.go_srcview()
def set_status(self, status):
self.status = status
# self.party
''' setup + register vim commands '''
cmd = CmdRegistrar()
cmd('over', help='step over next function call', lead='o')('step_over')
cmd('into', help='step into next function call', lead='i')('step_into')
cmd('out', help='step out of current function call', lead='t')('step_out')
cmd('run', help='continue execution until a breakpoint is reached or the program ends', lead='r')('run')
@cmd('eval', help='eval some code', plain=True)
def eval_(self, code):
self.bend.command('eval', data=code)
self.bend.command('context_get')
@cmd('quit', 'stop', 'exit', help='exit the debugger')
def quit(self):
self.bend.close()
self.ui.close()
vim_quit()
@cmd('up', help='go up the stack', lead='u')
def up(self):
self.ui.stack_up()
@cmd('down', help='go down the stack', lead='d')
def down(self):
self.ui.stack_down()
@cmd('watch', help='execute watch functions', lead='w')
def watch(self):
lines = self.ui.windows['watch'].expressions.buffer
self.watching = {}
for i, line in enumerate(lines[1:]):
if not line.strip():continue
# self.ui.windows['log'].write('evalling:' + line)
tid = self.bend.command('eval', data=line, suppress=True)
self.watching[tid] = i+1
self.bend.get_packets()
@cmd('break', help='set a breakpoint', lead='b')
def break_(self):
(row, col) = vim.current.window.cursor
file = os.path.abspath(vim.current.buffer.name)
if not os.path.exists(file):
print 'Not in a file'
return
bid = self.ui.break_at(file, row)
if bid == -1:
tid = self.bend.cid + 1
self.ui.queue_break(tid, file, row)
self.bend.command('breakpoint_set', 't', 'line', 'f', 'file://' + file, 'n', row, data='')
else:
tid = self.bend.cid + 1
self.ui.queue_break_remove(tid, bid)
self.bend.command('breakpoint_remove', 'd', bid)
@cmd('here', help='continue execution until the cursor (tmp breakpoint)', lead='h')
def here(self):
(row, col) = vim.current.window.cursor
file = os.path.abspath(vim.current.buffer.name)
if not os.path.exists(file):
print 'Not in a file'
return
tid = self.bend.cid + 1
# self.ui.queue_break(tid, file, row)
self.bend.command('breakpoint_set', 't', 'line', 'r', '1', 'f', 'file://' + file, 'n', row, data='')
self.bend.command('run')
def commands(self):
self._commands = self.cmd.bind(self)
return self._commands
handle = Registrar()
@handle('stack_get')
def _stack_get(self, node):
line = self.ui.windows['stack'].refresh(node)
self.ui.set_srcview(line[2], line[3])
@handle('breakpoint_set')
def _breakpoint_set(self, node):
self.ui.set_break(int(node.getAttribute('transaction_id')), node.getAttribute('id'))
self.ui.go_srcview()
@handle('breakpoint_remove')
def _breakpoint_remove(self, node):
self.ui.clear_break(int(node.getAttribute('transaction_id')))
self.ui.go_srcview()
def _status(self, node):
if node.getAttribute('reason') == 'ok':
self.set_status(node.getAttribute('status'))
def _change(self, node):
if node.getAttribute('reason') == 'ok':
self.set_status(node.getAttribute('status'))
if self.status != 'stopping':
try:
self.bend.command('context_get')
self.bend.command('stack_get')
except (EOFError, socket.error):
self.disable()
else:
self.disable()
def disable(self):
print 'Execution has ended; connection closed. type :Dbg quit to exit debugger'
self.ui.unhighlight()
for cmd in self._commands.keys():
if cmd not in ('quit', 'close'):
self._commands.pop(cmd)
@handle('<init>')
def _init(self, node):
file = node.getAttribute('fileuri')
self.ui.set_srcview(file, 1)
handle('status')(_status)
handle('stdout')(_status)
handle('stderr')(_status)
handle('step_into')(_change)
handle('step_out')(_change)
handle('step_over')(_change)
handle('run')(_change)
def _log(self, node):
self.ui.windows['log'].write(node.toprettyxml(indent=' '))
pass # print node
@handle('eval')
def _eval(self, node):
id = int(node.getAttribute('transaction_id'))
if id in self.watching:
self.ui.windows['watch'].set_result(self.watching.pop(id), node)
self.ui.windows['watch'].expressions.focus()
handle('property_get')(_log)
handle('property_set')(_log)
@handle('context_get')
def _context_get(self, node):
self.ui.windows['scope'].refresh(node)
handle('feature_set')(_log)
# vim: et sw=4 sts=4 | 0.18543 | 0.067762 |
import random
from dataclasses import dataclass, Field
from typing import Tuple, List
import numpy as np
from PIL import ImageDraw
from text_renderer.utils.bbox import BBox
from text_renderer.utils.draw_utils import transparent_img
from text_renderer.utils.types import PILImage
from .base_effect import Effect
class Line(Effect):
def __init__(
self,
p=0.5,
thickness=(1, 2),
lr_in_offset=(0, 10),
lr_out_offset=(0, 5),
tb_in_offset=(0, 3),
tb_out_offset=(0, 3),
line_pos_p=(0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1),
):
"""
Draw lines around text
Args:
p (float): probability to apply effect
thickness (int, int): line thickness
lr_in_offset (int, int): left-right line inner offset
lr_out_offset (int, int): left-right line outer offset
tb_in_offset (int, int): top-bottom line inner offset
tb_out_offset (int, int): top-bottom line outer offset
line_pos_p (:obj:`tuple`) : Each value corresponds a line position. Must sum to 1.
top, bottom, left, right, top_left, top_right, bottom_left, bottom_right, horizontal_middle, vertical_middle
"""
super().__init__(p)
self.thickness = thickness
self.lr_in_offset = lr_in_offset
self.lr_out_offset = lr_out_offset
self.tb_in_offset = tb_in_offset
self.tb_out_offset = tb_out_offset
self.line_pos_p = line_pos_p
def apply(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
# TODO: merge apply top/bottom/left.. to make it more efficient
func = np.random.choice(
[
self.apply_top,
self.apply_bottom,
self.apply_left,
self.apply_right,
self.apply_top_left,
self.apply_top_right,
self.apply_bottom_left,
self.apply_bottom_right,
self.apply_horizontal_middle,
self.apply_vertical_middle,
],
p=self.line_pos_p,
)
return func(img, text_bbox)
def apply_char_box(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
row = img.height - 1
if row % 234 == 0:
thickness = 3
else:
thickness = 2
draw = ImageDraw.Draw(img)
width = img.width
draw.rectangle(
[(int(width * 0.02), 0), (width // 1.1, row)],
width=thickness,
outline="#000",
)
return img, text_bbox
def apply_horizontal_middle(
self, img: PILImage, text_bbox: BBox
) -> Tuple[PILImage, BBox]:
row = np.random.randint(1, img.height - 1)
thickness = np.random.randint(*self.thickness)
draw = ImageDraw.Draw(img)
draw.line(
(0, row, img.width, row),
fill=self._get_line_color(img, text_bbox),
width=thickness,
)
return img, text_bbox
def apply_vertical_middle(
self, img: PILImage, text_bbox: BBox
) -> Tuple[PILImage, BBox]:
col = np.random.randint(1, img.width - 1)
thickness = np.random.randint(*self.thickness)
draw = ImageDraw.Draw(img)
draw.line(
(col, 0, col, img.height),
fill=self._get_line_color(img, text_bbox),
width=thickness,
)
return img, text_bbox
def apply_bottom(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
in_offset, thickness, out_offset = self._get_tb_param()
new_w = img.width
new_h = img.height + thickness + in_offset + out_offset
new_img = transparent_img((new_w, new_h))
new_img.paste(img, (0, 0))
draw = ImageDraw.Draw(new_img)
text_bbox.bottom += in_offset
draw.line(
list(text_bbox.left_bottom) + list(text_bbox.right_bottom),
fill=self._get_line_color(img, text_bbox),
width=thickness,
)
text_bbox.bottom += thickness
text_bbox.bottom += out_offset
return new_img, text_bbox
def apply_top(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
in_offset, thickness, out_offset = self._get_tb_param()
new_w = img.width
new_h = img.height + thickness + in_offset
new_img = transparent_img((new_w, new_h))
new_img.paste(img, (0, thickness + in_offset + out_offset))
draw = ImageDraw.Draw(new_img)
text_bbox.offset_(text_bbox.left_bottom, (0, new_h))
text_bbox.top -= in_offset
draw.line(
list(text_bbox.left_top) + list(text_bbox.right_top),
fill=self._get_line_color(img, text_bbox),
width=thickness,
)
text_bbox.top -= thickness
text_bbox.top -= out_offset
return new_img, text_bbox
def apply_right(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
in_offset, thickness, out_offset = self._get_lr_param()
new_w = img.width + thickness + in_offset + out_offset
new_h = img.height
new_img = transparent_img((new_w, new_h))
new_img.paste(img, (0, 0))
draw = ImageDraw.Draw(new_img)
text_bbox.right += in_offset
draw.line(
list(text_bbox.right_top) + list(text_bbox.right_bottom),
fill=self._get_line_color(img, text_bbox),
width=thickness,
)
text_bbox.right += thickness
text_bbox.right += out_offset
return new_img, text_bbox
def apply_left(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
in_offset, thickness, out_offset = self._get_lr_param()
new_w = img.width + thickness + in_offset + out_offset
new_h = img.height
new_img = transparent_img((new_w, new_h))
new_img.paste(img, (thickness + in_offset + out_offset, 0))
draw = ImageDraw.Draw(new_img)
text_bbox.offset_(text_bbox.right_top, (new_w, 0))
text_bbox.left -= in_offset
draw.line(
list(text_bbox.left_top) + list(text_bbox.left_bottom),
fill=self._get_line_color(img, text_bbox),
width=thickness,
)
text_bbox.left -= thickness
text_bbox.left -= out_offset
return new_img, text_bbox
def apply_top_left(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
ret = self.apply_top(img, text_bbox)
return self.apply_left(*ret)
def apply_top_right(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
ret = self.apply_top(img, text_bbox)
return self.apply_right(*ret)
def apply_bottom_left(
self, img: PILImage, text_bbox: BBox
) -> Tuple[PILImage, BBox]:
ret = self.apply_bottom(img, text_bbox)
return self.apply_left(*ret)
def apply_bottom_right(
self, img: PILImage, text_bbox: BBox
) -> Tuple[PILImage, BBox]:
ret = self.apply_bottom(img, text_bbox)
return self.apply_right(*ret)
def _get_lr_param(self) -> Tuple[int, int, int]:
in_offset = np.random.randint(*self.lr_in_offset)
out_offset = np.random.randint(*self.lr_out_offset)
thickness = np.random.randint(*self.thickness)
return in_offset, thickness, out_offset
def _get_tb_param(self) -> Tuple[int, int, int]:
in_offset = np.random.randint(*self.tb_in_offset)
out_offset = np.random.randint(*self.tb_out_offset)
thickness = np.random.randint(*self.thickness)
return in_offset, thickness, out_offset
def _get_line_color(self, img: PILImage, text_bbox: BBox):
# TODO: make this configurable
return (
np.random.randint(0, 170),
np.random.randint(0, 170),
np.random.randint(0, 170),
np.random.randint(90, 255),
) | text_renderer/text_renderer/effect/line.py | import random
from dataclasses import dataclass, Field
from typing import Tuple, List
import numpy as np
from PIL import ImageDraw
from text_renderer.utils.bbox import BBox
from text_renderer.utils.draw_utils import transparent_img
from text_renderer.utils.types import PILImage
from .base_effect import Effect
class Line(Effect):
def __init__(
self,
p=0.5,
thickness=(1, 2),
lr_in_offset=(0, 10),
lr_out_offset=(0, 5),
tb_in_offset=(0, 3),
tb_out_offset=(0, 3),
line_pos_p=(0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1),
):
"""
Draw lines around text
Args:
p (float): probability to apply effect
thickness (int, int): line thickness
lr_in_offset (int, int): left-right line inner offset
lr_out_offset (int, int): left-right line outer offset
tb_in_offset (int, int): top-bottom line inner offset
tb_out_offset (int, int): top-bottom line outer offset
line_pos_p (:obj:`tuple`) : Each value corresponds a line position. Must sum to 1.
top, bottom, left, right, top_left, top_right, bottom_left, bottom_right, horizontal_middle, vertical_middle
"""
super().__init__(p)
self.thickness = thickness
self.lr_in_offset = lr_in_offset
self.lr_out_offset = lr_out_offset
self.tb_in_offset = tb_in_offset
self.tb_out_offset = tb_out_offset
self.line_pos_p = line_pos_p
def apply(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
# TODO: merge apply top/bottom/left.. to make it more efficient
func = np.random.choice(
[
self.apply_top,
self.apply_bottom,
self.apply_left,
self.apply_right,
self.apply_top_left,
self.apply_top_right,
self.apply_bottom_left,
self.apply_bottom_right,
self.apply_horizontal_middle,
self.apply_vertical_middle,
],
p=self.line_pos_p,
)
return func(img, text_bbox)
def apply_char_box(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
row = img.height - 1
if row % 234 == 0:
thickness = 3
else:
thickness = 2
draw = ImageDraw.Draw(img)
width = img.width
draw.rectangle(
[(int(width * 0.02), 0), (width // 1.1, row)],
width=thickness,
outline="#000",
)
return img, text_bbox
def apply_horizontal_middle(
self, img: PILImage, text_bbox: BBox
) -> Tuple[PILImage, BBox]:
row = np.random.randint(1, img.height - 1)
thickness = np.random.randint(*self.thickness)
draw = ImageDraw.Draw(img)
draw.line(
(0, row, img.width, row),
fill=self._get_line_color(img, text_bbox),
width=thickness,
)
return img, text_bbox
def apply_vertical_middle(
self, img: PILImage, text_bbox: BBox
) -> Tuple[PILImage, BBox]:
col = np.random.randint(1, img.width - 1)
thickness = np.random.randint(*self.thickness)
draw = ImageDraw.Draw(img)
draw.line(
(col, 0, col, img.height),
fill=self._get_line_color(img, text_bbox),
width=thickness,
)
return img, text_bbox
def apply_bottom(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
in_offset, thickness, out_offset = self._get_tb_param()
new_w = img.width
new_h = img.height + thickness + in_offset + out_offset
new_img = transparent_img((new_w, new_h))
new_img.paste(img, (0, 0))
draw = ImageDraw.Draw(new_img)
text_bbox.bottom += in_offset
draw.line(
list(text_bbox.left_bottom) + list(text_bbox.right_bottom),
fill=self._get_line_color(img, text_bbox),
width=thickness,
)
text_bbox.bottom += thickness
text_bbox.bottom += out_offset
return new_img, text_bbox
def apply_top(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
in_offset, thickness, out_offset = self._get_tb_param()
new_w = img.width
new_h = img.height + thickness + in_offset
new_img = transparent_img((new_w, new_h))
new_img.paste(img, (0, thickness + in_offset + out_offset))
draw = ImageDraw.Draw(new_img)
text_bbox.offset_(text_bbox.left_bottom, (0, new_h))
text_bbox.top -= in_offset
draw.line(
list(text_bbox.left_top) + list(text_bbox.right_top),
fill=self._get_line_color(img, text_bbox),
width=thickness,
)
text_bbox.top -= thickness
text_bbox.top -= out_offset
return new_img, text_bbox
def apply_right(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
in_offset, thickness, out_offset = self._get_lr_param()
new_w = img.width + thickness + in_offset + out_offset
new_h = img.height
new_img = transparent_img((new_w, new_h))
new_img.paste(img, (0, 0))
draw = ImageDraw.Draw(new_img)
text_bbox.right += in_offset
draw.line(
list(text_bbox.right_top) + list(text_bbox.right_bottom),
fill=self._get_line_color(img, text_bbox),
width=thickness,
)
text_bbox.right += thickness
text_bbox.right += out_offset
return new_img, text_bbox
def apply_left(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
in_offset, thickness, out_offset = self._get_lr_param()
new_w = img.width + thickness + in_offset + out_offset
new_h = img.height
new_img = transparent_img((new_w, new_h))
new_img.paste(img, (thickness + in_offset + out_offset, 0))
draw = ImageDraw.Draw(new_img)
text_bbox.offset_(text_bbox.right_top, (new_w, 0))
text_bbox.left -= in_offset
draw.line(
list(text_bbox.left_top) + list(text_bbox.left_bottom),
fill=self._get_line_color(img, text_bbox),
width=thickness,
)
text_bbox.left -= thickness
text_bbox.left -= out_offset
return new_img, text_bbox
def apply_top_left(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
ret = self.apply_top(img, text_bbox)
return self.apply_left(*ret)
def apply_top_right(self, img: PILImage, text_bbox: BBox) -> Tuple[PILImage, BBox]:
ret = self.apply_top(img, text_bbox)
return self.apply_right(*ret)
def apply_bottom_left(
self, img: PILImage, text_bbox: BBox
) -> Tuple[PILImage, BBox]:
ret = self.apply_bottom(img, text_bbox)
return self.apply_left(*ret)
def apply_bottom_right(
self, img: PILImage, text_bbox: BBox
) -> Tuple[PILImage, BBox]:
ret = self.apply_bottom(img, text_bbox)
return self.apply_right(*ret)
def _get_lr_param(self) -> Tuple[int, int, int]:
in_offset = np.random.randint(*self.lr_in_offset)
out_offset = np.random.randint(*self.lr_out_offset)
thickness = np.random.randint(*self.thickness)
return in_offset, thickness, out_offset
def _get_tb_param(self) -> Tuple[int, int, int]:
in_offset = np.random.randint(*self.tb_in_offset)
out_offset = np.random.randint(*self.tb_out_offset)
thickness = np.random.randint(*self.thickness)
return in_offset, thickness, out_offset
def _get_line_color(self, img: PILImage, text_bbox: BBox):
# TODO: make this configurable
return (
np.random.randint(0, 170),
np.random.randint(0, 170),
np.random.randint(0, 170),
np.random.randint(90, 255),
) | 0.837852 | 0.229913 |
import math
from robofab.objects.objectsRF import RPoint, RSegment
from fontbuild.convertCurves import replaceSegments
def getTangents(contours):
tmap = []
for c in contours:
clen = len(c)
for i in range(clen):
s = c[i]
p = s.points[-1]
ns = c[(i + 1) % clen]
ps = c[(clen + i - 1) % clen]
np = ns.points[1] if ns.type == 'curve' else ns.points[-1]
pp = s.points[2] if s.type == 'curve' else ps.points[-1]
tmap.append((pp - p, np - p))
return tmap
def normalizeVector(p):
m = getMagnitude(p);
if m != 0:
return p*(1/m)
else:
return RPoint(0,0)
def getMagnitude(p):
return math.sqrt(p.x*p.x + p.y*p.y)
def getDistance(v1,v2):
return getMagnitude(RPoint(v1.x - v2.x, v1.y - v2.y))
def getAngle(v1,v2):
angle = math.atan2(v1.y,v1.x) - math.atan2(v2.y,v2.x)
return (angle + (2*math.pi)) % (2*math.pi)
def angleDiff(a,b):
return math.pi - abs((abs(a - b) % (math.pi*2)) - math.pi)
def getAngle2(v1,v2):
return abs(angleDiff(math.atan2(v1.y, v1.x), math.atan2(v2.y, v2.x)))
def getMitreOffset(n,v1,v2,mitreSize=4,maxAngle=.9):
# dont mitre if segment is too short
if abs(getMagnitude(v1)) < mitreSize * 2 or abs(getMagnitude(v2)) < mitreSize * 2:
return
angle = getAngle2(v2,v1)
v1 = normalizeVector(v1)
v2 = normalizeVector(v2)
if v1.x == v2.x and v1.y == v2.y:
return
# only mitre corners sharper than maxAngle
if angle > maxAngle:
return
radius = mitreSize / abs(getDistance(v1,v2))
offset1 = RPoint(round(v1.x * radius), round(v1.y * radius))
offset2 = RPoint(round(v2.x * radius), round(v2.y * radius))
return offset1, offset2
def mitreGlyph(g,mitreSize,maxAngle):
if g == None:
return
tangents = getTangents(g.contours)
sid = -1
for c in g.contours:
segments = []
needsMitring = False
for s in c:
sid += 1
v1, v2 = tangents[sid]
off = getMitreOffset(s,v1,v2,mitreSize,maxAngle)
s1 = s.copy()
if off != None:
offset1, offset2 = off
p2 = s.points[-1] + offset2
s2 = RSegment('line', [(p2.x, p2.y)])
s1.points[0] += offset1
segments.append(s1)
segments.append(s2)
needsMitring = True
else:
segments.append(s1)
if needsMitring:
replaceSegments(c, segments) | scripts/lib/fontbuild/mitreGlyph.py | import math
from robofab.objects.objectsRF import RPoint, RSegment
from fontbuild.convertCurves import replaceSegments
def getTangents(contours):
tmap = []
for c in contours:
clen = len(c)
for i in range(clen):
s = c[i]
p = s.points[-1]
ns = c[(i + 1) % clen]
ps = c[(clen + i - 1) % clen]
np = ns.points[1] if ns.type == 'curve' else ns.points[-1]
pp = s.points[2] if s.type == 'curve' else ps.points[-1]
tmap.append((pp - p, np - p))
return tmap
def normalizeVector(p):
m = getMagnitude(p);
if m != 0:
return p*(1/m)
else:
return RPoint(0,0)
def getMagnitude(p):
return math.sqrt(p.x*p.x + p.y*p.y)
def getDistance(v1,v2):
return getMagnitude(RPoint(v1.x - v2.x, v1.y - v2.y))
def getAngle(v1,v2):
angle = math.atan2(v1.y,v1.x) - math.atan2(v2.y,v2.x)
return (angle + (2*math.pi)) % (2*math.pi)
def angleDiff(a,b):
return math.pi - abs((abs(a - b) % (math.pi*2)) - math.pi)
def getAngle2(v1,v2):
return abs(angleDiff(math.atan2(v1.y, v1.x), math.atan2(v2.y, v2.x)))
def getMitreOffset(n,v1,v2,mitreSize=4,maxAngle=.9):
# dont mitre if segment is too short
if abs(getMagnitude(v1)) < mitreSize * 2 or abs(getMagnitude(v2)) < mitreSize * 2:
return
angle = getAngle2(v2,v1)
v1 = normalizeVector(v1)
v2 = normalizeVector(v2)
if v1.x == v2.x and v1.y == v2.y:
return
# only mitre corners sharper than maxAngle
if angle > maxAngle:
return
radius = mitreSize / abs(getDistance(v1,v2))
offset1 = RPoint(round(v1.x * radius), round(v1.y * radius))
offset2 = RPoint(round(v2.x * radius), round(v2.y * radius))
return offset1, offset2
def mitreGlyph(g,mitreSize,maxAngle):
if g == None:
return
tangents = getTangents(g.contours)
sid = -1
for c in g.contours:
segments = []
needsMitring = False
for s in c:
sid += 1
v1, v2 = tangents[sid]
off = getMitreOffset(s,v1,v2,mitreSize,maxAngle)
s1 = s.copy()
if off != None:
offset1, offset2 = off
p2 = s.points[-1] + offset2
s2 = RSegment('line', [(p2.x, p2.y)])
s1.points[0] += offset1
segments.append(s1)
segments.append(s2)
needsMitring = True
else:
segments.append(s1)
if needsMitring:
replaceSegments(c, segments) | 0.316475 | 0.394259 |
"""Python binary to train COVID-19 epidemic model with single CPU."""
from typing import Text
from absl import app
from absl import flags
import io_utils
import json
import logging
import numpy as np
import os
import resample
# pylint: disable=import-not-at-top
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import combined_model
NUM_STATES = 58
flags.DEFINE_float("learning_rate", 0.01, "Initial learning rate.")
flags.DEFINE_integer("max_epochs", 2000, "Number of steps to run trainer.")
flags.DEFINE_integer("test_duration", 100, "Number of days to predict")
flags.DEFINE_string("metadata", None, "")
flags.DEFINE_string("residuals", None, "Only used in resample jobs.")
flags.DEFINE_string("output_path", None, "Path to store the output data.")
flags.DEFINE_integer("min_t0", 1, "value for which t0 iteration starts.")
flags.DEFINE_integer("max_t0", 21, "value for which t0 iteration ends.")
flags.DEFINE_string("file_index_plus_one", None, "Index for permutation.")
flags.DEFINE_bool(
"output_to_json", True, "If the export data are saved in json file.")
flags.DEFINE_bool(
"resample_jobs", False, "If the model estimation is on resampled data.")
flags.DEFINE_bool("flatten_future", True, "Indicates whether or not the "
"future prediction relies on the flat death and infection "
"rates.")
FLAGS = flags.FLAGS
def read_metadata(fpath: Text, n_states: int):
with open(fpath, "r") as f:
jdata = json.load(f)
if len(jdata.keys()) != n_states:
raise ValueError(f"The metadata should contains exactly {n_states} records.")
for k, v in jdata.items():
if set(v.keys()) != {"state", "knot", "data"}:
raise ValueError(f"At the record {k}, the json data don't follow the required format, "
"expected to have three fields: state, knot, and data.")
return jdata
def map_keys(file_index: int, n_states: int, resample_jobs: bool = False):
int_file_index = int(file_index)
if resample_jobs:
return str(int_file_index % n_states), str(int_file_index // n_states)
else:
if int_file_index >= n_states:
raise ValueError(f"The file index should be between 0 and {n_states}.")
return file_index, None
def main(unused_argv):
logger = logging.getLogger("Covid-19-estimation")
job_metadata = read_metadata(FLAGS.metadata, NUM_STATES)
job_file_index = str(int(FLAGS.file_index_plus_one) - 1)
state_key, resample_key = map_keys(
job_file_index, NUM_STATES, FLAGS.resample_jobs)
job_state = job_metadata[state_key]["state"]
logger.info(f"Create estimation job for the state {job_state}.")
# Parse the required data & args for running the estimation model.
job_knots = job_metadata[state_key]["knot"]
if FLAGS.resample_jobs:
job_residuals = resample.read_residuals(
FLAGS.residuals, NUM_STATES)
job_input_data = resample.get_resampled_input(
job_residuals[state_key]["fitted"],
job_residuals[state_key]["residual"],
int(resample_key))
else:
job_input_data = job_metadata[state_key]["data"]
job_knots_connect = [1] * len(job_knots)
job_initial_a = [0.2] * (len(job_knots) + 1)
# Step 1 estimation (infection cases only).
estimator = combined_model.Covid19CombinedEstimator(
knots=job_knots,
knots_connect=job_knots_connect,
loss_prop=0,
estimator_args={
"learning_rate": FLAGS.learning_rate,
"epochs": FLAGS.max_epochs},
initial_guess_a=np.array(job_initial_a),
variable_death_rate_trainable=False
)
estimator.fit(data=job_input_data, min_t0=FLAGS.min_t0, max_t0=FLAGS.max_t0)
# Save the estimated weights from step 1 (will be fixed in step 2).
stage1_estimated_a, stage1_estimated_t0 = estimator.final_model.a.numpy(), estimator.final_model.t0
logger.info("First stage estimation done.")
# Step 2 estimation (death only).
estimator = combined_model.Covid19CombinedEstimator(
knots=job_knots,
knots_connect=job_knots_connect,
loss_prop=1,
estimator_args={
"learning_rate": FLAGS.learning_rate,
"epochs": FLAGS.max_epochs},
initial_guess_a=stage1_estimated_a,
variable_a_trainable=False
)
estimator.fit(data=job_input_data, min_t0=stage1_estimated_t0, max_t0=stage1_estimated_t0)
io_utils.parse_estimated_model(estimator)
if FLAGS.resample_jobs:
job_suffix = "state_" + state_key + "resample_" + resample_key
else:
job_suffix = "state_" + state_key
io_utils.export_estimation_and_prediction(
estimator=estimator,
test_duration=FLAGS.test_duration,
output_path=FLAGS.output_path,
suffix=job_suffix,
flatten_future=FLAGS.flatten_future,
to_json=FLAGS.output_to_json
)
logger.info("Second stage estimation done.")
if __name__ == "__main__":
flags.mark_flag_as_required("metadata")
flags.mark_flag_as_required("output_path")
flags.mark_flag_as_required("file_index_plus_one")
app.run(main) | python/code state/run_two_stage_state_cluster.py | """Python binary to train COVID-19 epidemic model with single CPU."""
from typing import Text
from absl import app
from absl import flags
import io_utils
import json
import logging
import numpy as np
import os
import resample
# pylint: disable=import-not-at-top
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import combined_model
NUM_STATES = 58
flags.DEFINE_float("learning_rate", 0.01, "Initial learning rate.")
flags.DEFINE_integer("max_epochs", 2000, "Number of steps to run trainer.")
flags.DEFINE_integer("test_duration", 100, "Number of days to predict")
flags.DEFINE_string("metadata", None, "")
flags.DEFINE_string("residuals", None, "Only used in resample jobs.")
flags.DEFINE_string("output_path", None, "Path to store the output data.")
flags.DEFINE_integer("min_t0", 1, "value for which t0 iteration starts.")
flags.DEFINE_integer("max_t0", 21, "value for which t0 iteration ends.")
flags.DEFINE_string("file_index_plus_one", None, "Index for permutation.")
flags.DEFINE_bool(
"output_to_json", True, "If the export data are saved in json file.")
flags.DEFINE_bool(
"resample_jobs", False, "If the model estimation is on resampled data.")
flags.DEFINE_bool("flatten_future", True, "Indicates whether or not the "
"future prediction relies on the flat death and infection "
"rates.")
FLAGS = flags.FLAGS
def read_metadata(fpath: Text, n_states: int):
with open(fpath, "r") as f:
jdata = json.load(f)
if len(jdata.keys()) != n_states:
raise ValueError(f"The metadata should contains exactly {n_states} records.")
for k, v in jdata.items():
if set(v.keys()) != {"state", "knot", "data"}:
raise ValueError(f"At the record {k}, the json data don't follow the required format, "
"expected to have three fields: state, knot, and data.")
return jdata
def map_keys(file_index: int, n_states: int, resample_jobs: bool = False):
int_file_index = int(file_index)
if resample_jobs:
return str(int_file_index % n_states), str(int_file_index // n_states)
else:
if int_file_index >= n_states:
raise ValueError(f"The file index should be between 0 and {n_states}.")
return file_index, None
def main(unused_argv):
logger = logging.getLogger("Covid-19-estimation")
job_metadata = read_metadata(FLAGS.metadata, NUM_STATES)
job_file_index = str(int(FLAGS.file_index_plus_one) - 1)
state_key, resample_key = map_keys(
job_file_index, NUM_STATES, FLAGS.resample_jobs)
job_state = job_metadata[state_key]["state"]
logger.info(f"Create estimation job for the state {job_state}.")
# Parse the required data & args for running the estimation model.
job_knots = job_metadata[state_key]["knot"]
if FLAGS.resample_jobs:
job_residuals = resample.read_residuals(
FLAGS.residuals, NUM_STATES)
job_input_data = resample.get_resampled_input(
job_residuals[state_key]["fitted"],
job_residuals[state_key]["residual"],
int(resample_key))
else:
job_input_data = job_metadata[state_key]["data"]
job_knots_connect = [1] * len(job_knots)
job_initial_a = [0.2] * (len(job_knots) + 1)
# Step 1 estimation (infection cases only).
estimator = combined_model.Covid19CombinedEstimator(
knots=job_knots,
knots_connect=job_knots_connect,
loss_prop=0,
estimator_args={
"learning_rate": FLAGS.learning_rate,
"epochs": FLAGS.max_epochs},
initial_guess_a=np.array(job_initial_a),
variable_death_rate_trainable=False
)
estimator.fit(data=job_input_data, min_t0=FLAGS.min_t0, max_t0=FLAGS.max_t0)
# Save the estimated weights from step 1 (will be fixed in step 2).
stage1_estimated_a, stage1_estimated_t0 = estimator.final_model.a.numpy(), estimator.final_model.t0
logger.info("First stage estimation done.")
# Step 2 estimation (death only).
estimator = combined_model.Covid19CombinedEstimator(
knots=job_knots,
knots_connect=job_knots_connect,
loss_prop=1,
estimator_args={
"learning_rate": FLAGS.learning_rate,
"epochs": FLAGS.max_epochs},
initial_guess_a=stage1_estimated_a,
variable_a_trainable=False
)
estimator.fit(data=job_input_data, min_t0=stage1_estimated_t0, max_t0=stage1_estimated_t0)
io_utils.parse_estimated_model(estimator)
if FLAGS.resample_jobs:
job_suffix = "state_" + state_key + "resample_" + resample_key
else:
job_suffix = "state_" + state_key
io_utils.export_estimation_and_prediction(
estimator=estimator,
test_duration=FLAGS.test_duration,
output_path=FLAGS.output_path,
suffix=job_suffix,
flatten_future=FLAGS.flatten_future,
to_json=FLAGS.output_to_json
)
logger.info("Second stage estimation done.")
if __name__ == "__main__":
flags.mark_flag_as_required("metadata")
flags.mark_flag_as_required("output_path")
flags.mark_flag_as_required("file_index_plus_one")
app.run(main) | 0.850096 | 0.34403 |
from hydep.constants import SECONDS_PER_DAY
class TimeStep:
"""Class for modifying and storing time step information.
Parameters
----------
coarse : int, optional
Current coarse time step. Defaults to zero
substep : int, optional
Current substep index. Defaults to None
total : int, optional
Current total time step index, reflecting sum
of all coarse and substeps
currentTime : float, optional
Current point in calendar time [s]
Attributes
----------
coarse : int
Current coarse time step index
substep : int or None
Current substep index. Will only be ``None`` if
not actively in a substep regime, like in the
initial preliminary stages
total : int
Current total time step index, including substeps
currentTime : float
Current point in calendar time [s]
Examples
--------
>>> t = TimeStep()
>>> t.coarse, t.substep, t.total, t.currentTime
(0, None, 0, 0.0)
>>> t.increment(100000)
>>> t.coarse, t.substep, t.total, t.currentTime
(1, None, 1, 100000.0)
>>> t += 86400
>>> t.coarse, t.substep, t.total, t.currentTime
(1, 1, 2, 186400.0)
"""
__slots__ = ("coarse", "substep", "total", "currentTime")
def __init__(self, coarse=None, substep=None, total=None, currentTime=None):
self.coarse = 0 if coarse is None else int(coarse)
self.substep = None if substep is None else int(substep)
self.total = 0 if total is None else int(total)
self.currentTime = 0.0 if currentTime is None else float(currentTime)
def increment(self, delta, coarse=True):
"""Advance across a coarse time step or substep of length ``delta`` [s]"""
if coarse:
self.substep = None
self.coarse += 1
else:
self.substep = (self.substep + 1) if self.substep is not None else 1
self.total += 1
self.currentTime += delta
def __iadd__(self, delta):
"""Advance one substep of length ``delta`` [s]"""
self.increment(delta, coarse=False)
return self
def __repr__(self) -> str:
return (
f"{type(self).__name__}(coarse={self.coarse}, substep={self.substep}, "
f"total={self.total}, currentTime={self.currentTime / SECONDS_PER_DAY} [d])"
) | src/hydep/internal/timestep.py |
from hydep.constants import SECONDS_PER_DAY
class TimeStep:
"""Class for modifying and storing time step information.
Parameters
----------
coarse : int, optional
Current coarse time step. Defaults to zero
substep : int, optional
Current substep index. Defaults to None
total : int, optional
Current total time step index, reflecting sum
of all coarse and substeps
currentTime : float, optional
Current point in calendar time [s]
Attributes
----------
coarse : int
Current coarse time step index
substep : int or None
Current substep index. Will only be ``None`` if
not actively in a substep regime, like in the
initial preliminary stages
total : int
Current total time step index, including substeps
currentTime : float
Current point in calendar time [s]
Examples
--------
>>> t = TimeStep()
>>> t.coarse, t.substep, t.total, t.currentTime
(0, None, 0, 0.0)
>>> t.increment(100000)
>>> t.coarse, t.substep, t.total, t.currentTime
(1, None, 1, 100000.0)
>>> t += 86400
>>> t.coarse, t.substep, t.total, t.currentTime
(1, 1, 2, 186400.0)
"""
__slots__ = ("coarse", "substep", "total", "currentTime")
def __init__(self, coarse=None, substep=None, total=None, currentTime=None):
self.coarse = 0 if coarse is None else int(coarse)
self.substep = None if substep is None else int(substep)
self.total = 0 if total is None else int(total)
self.currentTime = 0.0 if currentTime is None else float(currentTime)
def increment(self, delta, coarse=True):
"""Advance across a coarse time step or substep of length ``delta`` [s]"""
if coarse:
self.substep = None
self.coarse += 1
else:
self.substep = (self.substep + 1) if self.substep is not None else 1
self.total += 1
self.currentTime += delta
def __iadd__(self, delta):
"""Advance one substep of length ``delta`` [s]"""
self.increment(delta, coarse=False)
return self
def __repr__(self) -> str:
return (
f"{type(self).__name__}(coarse={self.coarse}, substep={self.substep}, "
f"total={self.total}, currentTime={self.currentTime / SECONDS_PER_DAY} [d])"
) | 0.935817 | 0.518668 |
from CTL.causal_tree.nn_pehe.tree import *
from sklearn.model_selection import train_test_split
class ValNode(PEHENode):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# self.obj = obj
# ----------------------------------------------------------------
# Base causal tree (ctl, base objective)
# ----------------------------------------------------------------
class ValPEHE(PEHETree):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.root = ValNode()
def fit(self, x, y, t):
if x.shape[0] == 0:
return 0
# ----------------------------------------------------------------
# Seed
# ----------------------------------------------------------------
np.random.seed(self.seed)
# ----------------------------------------------------------------
# Split data
# ----------------------------------------------------------------
x, val_x, y, val_y, t, val_t = train_test_split(x, y, t, random_state=self.seed, shuffle=True,
test_size=self.val_split)
self.root.num_samples = y.shape[0]
self.num_training = y.shape[0]
# ----------------------------------------------------------------
# NN_effect estimates
# use the overall datasets for nearest neighbor for now
# ----------------------------------------------------------------
nn_effect = compute_nn_effect(x, y, t, k=self.k)
val_nn_effect = compute_nn_effect(val_x, val_y, val_t, k=self.k)
# ----------------------------------------------------------------
# effect and pvals
# ----------------------------------------------------------------
effect = tau_squared(y, t)
p_val = get_pval(y, t)
self.root.effect = effect
self.root.p_val = p_val
# ----------------------------------------------------------------
# Not sure if i should eval in root or not
# ----------------------------------------------------------------
obj, nn_pehe = self._eval(y, t, nn_effect, val_y, val_t, val_nn_effect)
self.root.obj = obj
self.obj = self.root.obj
self.root.pehe = nn_pehe
self.pehe = nn_pehe
# ----------------------------------------------------------------
# Add control/treatment means
# ----------------------------------------------------------------
self.root.control_mean = np.mean(y[t == 0])
self.root.treatment_mean = np.mean(y[t == 1])
self.root.num_samples = x.shape[0]
self._fit(self.root, x, y, t, nn_effect, val_x, val_y, val_t, val_nn_effect)
if self.num_leaves > 0:
self.obj = self.obj / self.num_leaves
def _eval(self, train_y, train_t, nn_effect, val_y, val_t, val_nn_effect):
total_train = train_y.shape[0]
total_val = val_y.shape[0]
# treated = np.where(train_t == 1)[0]
# control = np.where(train_t == 0)[0]
# pred_effect = np.mean(train_y[treated]) - np.mean(train_y[control])
pred_effect = ace(train_y, train_t)
# nn_pehe = np.mean((nn_effect - pred_effect) ** 2)
nn_pehe = np.sum((nn_effect - pred_effect) ** 2)
val_effect = ace(val_y, val_t)
val_nn_pehe = np.sum((val_nn_effect - pred_effect) ** 2)
val_train_ratio = total_train / total_val
val_nn_pehe = val_nn_pehe * val_train_ratio
pehe_diff = np.abs(nn_pehe - val_nn_pehe)
# cost = np.abs(total_train * pred_effect - total_train * val_effect)
cost = np.abs(pred_effect - val_effect)
# obj = nn_pehe + pehe_diff
obj = nn_pehe * cost
return obj, nn_pehe
def _fit(self, node: ValNode, train_x, train_y, train_t, nn_effect, val_x, val_y, val_t, val_nn_effect):
if train_x.shape[0] == 0:
return node
if node.node_depth > self.tree_depth:
self.tree_depth = node.node_depth
if self.max_depth == self.tree_depth:
if node.effect > self.max_effect:
self.max_effect = node.effect
if node.effect < self.min_effect:
self.min_effect = node.effect
self.num_leaves += 1
node.leaf_num = self.num_leaves
node.is_leaf = True
return node
# print(self.tree_depth, self.obj)
best_gain = 0.0
best_attributes = []
best_tb_obj, best_fb_obj = (0.0, 0.0)
best_tb_pehe, best_fb_pehe = (0.0, 0.0)
column_count = train_x.shape[1]
for col in range(0, column_count):
unique_vals = np.unique(train_x[:, col])
for value in unique_vals:
(val_x1, val_x2, val_y1, val_y2, val_t1, val_t2) \
= divide_set(val_x, val_y, val_t, col, value)
# check validation set size
val_size = self.val_split * self.min_size if self.val_split * self.min_size > 2 else 2
if check_min_size(val_size, val_t1) or check_min_size(val_size, val_t2):
continue
# check training data size
(train_x1, train_x2, train_y1, train_y2, train_t1, train_t2) \
= divide_set(train_x, train_y, train_t, col, value)
check1 = check_min_size(self.min_size, train_t1)
check2 = check_min_size(self.min_size, train_t2)
if check1 or check2:
continue
(_, _, nn_effect1, nn_effect2, _, _) \
= divide_set(train_x, nn_effect, train_t, col, value)
(_, _, val_nn_effect1, val_nn_effect2, _, _) \
= divide_set(val_x, val_nn_effect, val_t, col, value)
tb_eval, tb_nn_pehe = self._eval(train_y1, train_t1, nn_effect1, val_y1, val_t1, val_nn_effect1)
fb_eval, fb_nn_pehe = self._eval(train_y2, train_t2, nn_effect2, val_y2, val_t2, val_nn_effect2)
split_eval = (tb_eval + fb_eval)
gain = node.obj - split_eval
if gain > best_gain:
best_gain = gain
best_attributes = [col, value]
best_tb_obj, best_fb_obj = (tb_eval, fb_eval)
best_tb_pehe, best_fb_pehe = tb_nn_pehe, fb_nn_pehe
# print(tb_eval, fb_eval, gain, best_gain)
if best_gain > 0:
node.col = best_attributes[0]
node.value = best_attributes[1]
(train_x1, train_x2, train_y1, train_y2, train_t1, train_t2) \
= divide_set(train_x, train_y, train_t, node.col, node.value)
(val_x1, val_x2, val_y1, val_y2, val_t1, val_t2) \
= divide_set(val_x, val_y, val_t, node.col, node.value)
(_, _, nn_effect1, nn_effect2, _, _) \
= divide_set(train_x, nn_effect, train_t, node.col, node.value)
(_, _, val_nn_effect1, val_nn_effect2, _, _) \
= divide_set(val_x, val_nn_effect, val_t, node.col, node.value)
# y1 = train_y1
# y2 = train_y2
# t1 = train_t1
# t2 = train_t2
y1 = np.concatenate((train_y1, val_y1))
y2 = np.concatenate((train_y2, val_y2))
t1 = np.concatenate((train_t1, val_t1))
t2 = np.concatenate((train_t2, val_t2))
best_tb_effect = ace(y1, t1)
best_fb_effect = ace(y2, t2)
tb_p_val = get_pval(y1, t1)
fb_p_val = get_pval(y2, t2)
self.obj = self.obj - node.obj + best_tb_obj + best_fb_obj
self.pehe = self.pehe - node.pehe + best_tb_pehe + best_fb_pehe
tb = ValNode(obj=best_tb_obj, pehe=best_tb_pehe, effect=best_tb_effect, p_val=tb_p_val,
node_depth=node.node_depth + 1,
num_samples=train_y1.shape[0])
fb = ValNode(obj=best_fb_obj, pehe=best_fb_pehe, effect=best_fb_effect, p_val=fb_p_val,
node_depth=node.node_depth + 1,
num_samples=train_y2.shape[0])
node.true_branch = self._fit(tb, train_x1, train_y1, train_t1, nn_effect1, val_x1, val_y1, val_t1,
val_nn_effect1)
node.false_branch = self._fit(fb, train_x2, train_y2, train_t2, nn_effect2, val_x2, val_y2, val_t2,
val_nn_effect2)
if node.effect > self.max_effect:
self.max_effect = node.effect
if node.effect < self.min_effect:
self.min_effect = node.effect
return node
else:
if node.effect > self.max_effect:
self.max_effect = node.effect
if node.effect < self.min_effect:
self.min_effect = node.effect
self.num_leaves += 1
node.leaf_num = self.num_leaves
node.is_leaf = True
return node | CTL/causal_tree/nn_pehe/val.py | from CTL.causal_tree.nn_pehe.tree import *
from sklearn.model_selection import train_test_split
class ValNode(PEHENode):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# self.obj = obj
# ----------------------------------------------------------------
# Base causal tree (ctl, base objective)
# ----------------------------------------------------------------
class ValPEHE(PEHETree):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.root = ValNode()
def fit(self, x, y, t):
if x.shape[0] == 0:
return 0
# ----------------------------------------------------------------
# Seed
# ----------------------------------------------------------------
np.random.seed(self.seed)
# ----------------------------------------------------------------
# Split data
# ----------------------------------------------------------------
x, val_x, y, val_y, t, val_t = train_test_split(x, y, t, random_state=self.seed, shuffle=True,
test_size=self.val_split)
self.root.num_samples = y.shape[0]
self.num_training = y.shape[0]
# ----------------------------------------------------------------
# NN_effect estimates
# use the overall datasets for nearest neighbor for now
# ----------------------------------------------------------------
nn_effect = compute_nn_effect(x, y, t, k=self.k)
val_nn_effect = compute_nn_effect(val_x, val_y, val_t, k=self.k)
# ----------------------------------------------------------------
# effect and pvals
# ----------------------------------------------------------------
effect = tau_squared(y, t)
p_val = get_pval(y, t)
self.root.effect = effect
self.root.p_val = p_val
# ----------------------------------------------------------------
# Not sure if i should eval in root or not
# ----------------------------------------------------------------
obj, nn_pehe = self._eval(y, t, nn_effect, val_y, val_t, val_nn_effect)
self.root.obj = obj
self.obj = self.root.obj
self.root.pehe = nn_pehe
self.pehe = nn_pehe
# ----------------------------------------------------------------
# Add control/treatment means
# ----------------------------------------------------------------
self.root.control_mean = np.mean(y[t == 0])
self.root.treatment_mean = np.mean(y[t == 1])
self.root.num_samples = x.shape[0]
self._fit(self.root, x, y, t, nn_effect, val_x, val_y, val_t, val_nn_effect)
if self.num_leaves > 0:
self.obj = self.obj / self.num_leaves
def _eval(self, train_y, train_t, nn_effect, val_y, val_t, val_nn_effect):
total_train = train_y.shape[0]
total_val = val_y.shape[0]
# treated = np.where(train_t == 1)[0]
# control = np.where(train_t == 0)[0]
# pred_effect = np.mean(train_y[treated]) - np.mean(train_y[control])
pred_effect = ace(train_y, train_t)
# nn_pehe = np.mean((nn_effect - pred_effect) ** 2)
nn_pehe = np.sum((nn_effect - pred_effect) ** 2)
val_effect = ace(val_y, val_t)
val_nn_pehe = np.sum((val_nn_effect - pred_effect) ** 2)
val_train_ratio = total_train / total_val
val_nn_pehe = val_nn_pehe * val_train_ratio
pehe_diff = np.abs(nn_pehe - val_nn_pehe)
# cost = np.abs(total_train * pred_effect - total_train * val_effect)
cost = np.abs(pred_effect - val_effect)
# obj = nn_pehe + pehe_diff
obj = nn_pehe * cost
return obj, nn_pehe
def _fit(self, node: ValNode, train_x, train_y, train_t, nn_effect, val_x, val_y, val_t, val_nn_effect):
if train_x.shape[0] == 0:
return node
if node.node_depth > self.tree_depth:
self.tree_depth = node.node_depth
if self.max_depth == self.tree_depth:
if node.effect > self.max_effect:
self.max_effect = node.effect
if node.effect < self.min_effect:
self.min_effect = node.effect
self.num_leaves += 1
node.leaf_num = self.num_leaves
node.is_leaf = True
return node
# print(self.tree_depth, self.obj)
best_gain = 0.0
best_attributes = []
best_tb_obj, best_fb_obj = (0.0, 0.0)
best_tb_pehe, best_fb_pehe = (0.0, 0.0)
column_count = train_x.shape[1]
for col in range(0, column_count):
unique_vals = np.unique(train_x[:, col])
for value in unique_vals:
(val_x1, val_x2, val_y1, val_y2, val_t1, val_t2) \
= divide_set(val_x, val_y, val_t, col, value)
# check validation set size
val_size = self.val_split * self.min_size if self.val_split * self.min_size > 2 else 2
if check_min_size(val_size, val_t1) or check_min_size(val_size, val_t2):
continue
# check training data size
(train_x1, train_x2, train_y1, train_y2, train_t1, train_t2) \
= divide_set(train_x, train_y, train_t, col, value)
check1 = check_min_size(self.min_size, train_t1)
check2 = check_min_size(self.min_size, train_t2)
if check1 or check2:
continue
(_, _, nn_effect1, nn_effect2, _, _) \
= divide_set(train_x, nn_effect, train_t, col, value)
(_, _, val_nn_effect1, val_nn_effect2, _, _) \
= divide_set(val_x, val_nn_effect, val_t, col, value)
tb_eval, tb_nn_pehe = self._eval(train_y1, train_t1, nn_effect1, val_y1, val_t1, val_nn_effect1)
fb_eval, fb_nn_pehe = self._eval(train_y2, train_t2, nn_effect2, val_y2, val_t2, val_nn_effect2)
split_eval = (tb_eval + fb_eval)
gain = node.obj - split_eval
if gain > best_gain:
best_gain = gain
best_attributes = [col, value]
best_tb_obj, best_fb_obj = (tb_eval, fb_eval)
best_tb_pehe, best_fb_pehe = tb_nn_pehe, fb_nn_pehe
# print(tb_eval, fb_eval, gain, best_gain)
if best_gain > 0:
node.col = best_attributes[0]
node.value = best_attributes[1]
(train_x1, train_x2, train_y1, train_y2, train_t1, train_t2) \
= divide_set(train_x, train_y, train_t, node.col, node.value)
(val_x1, val_x2, val_y1, val_y2, val_t1, val_t2) \
= divide_set(val_x, val_y, val_t, node.col, node.value)
(_, _, nn_effect1, nn_effect2, _, _) \
= divide_set(train_x, nn_effect, train_t, node.col, node.value)
(_, _, val_nn_effect1, val_nn_effect2, _, _) \
= divide_set(val_x, val_nn_effect, val_t, node.col, node.value)
# y1 = train_y1
# y2 = train_y2
# t1 = train_t1
# t2 = train_t2
y1 = np.concatenate((train_y1, val_y1))
y2 = np.concatenate((train_y2, val_y2))
t1 = np.concatenate((train_t1, val_t1))
t2 = np.concatenate((train_t2, val_t2))
best_tb_effect = ace(y1, t1)
best_fb_effect = ace(y2, t2)
tb_p_val = get_pval(y1, t1)
fb_p_val = get_pval(y2, t2)
self.obj = self.obj - node.obj + best_tb_obj + best_fb_obj
self.pehe = self.pehe - node.pehe + best_tb_pehe + best_fb_pehe
tb = ValNode(obj=best_tb_obj, pehe=best_tb_pehe, effect=best_tb_effect, p_val=tb_p_val,
node_depth=node.node_depth + 1,
num_samples=train_y1.shape[0])
fb = ValNode(obj=best_fb_obj, pehe=best_fb_pehe, effect=best_fb_effect, p_val=fb_p_val,
node_depth=node.node_depth + 1,
num_samples=train_y2.shape[0])
node.true_branch = self._fit(tb, train_x1, train_y1, train_t1, nn_effect1, val_x1, val_y1, val_t1,
val_nn_effect1)
node.false_branch = self._fit(fb, train_x2, train_y2, train_t2, nn_effect2, val_x2, val_y2, val_t2,
val_nn_effect2)
if node.effect > self.max_effect:
self.max_effect = node.effect
if node.effect < self.min_effect:
self.min_effect = node.effect
return node
else:
if node.effect > self.max_effect:
self.max_effect = node.effect
if node.effect < self.min_effect:
self.min_effect = node.effect
self.num_leaves += 1
node.leaf_num = self.num_leaves
node.is_leaf = True
return node | 0.757256 | 0.289252 |
from django.db import models
from django.core.urlresolvers import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
# Create your models here.
class Resource(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class CostFunction(models.Model):
CONTINOUS = 'C'
DISCRETE = 'D'
RANGE_CHOICES = (
(CONTINOUS, 'Continous'),
(DISCRETE, 'Discrete'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class_name = models.CharField(max_length=60)
range_function = models.CharField(max_length=2, choices=RANGE_CHOICES, default=CONTINOUS)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class ContinuousCostFunction(models.Model):
id = models.AutoField(primary_key=True)
costfunction = models.ForeignKey('CostFunction')
parameter = models.CharField(max_length=60)
value = models.FloatField(default=0)
class ProbabilityDistribution(models.Model):
CONTINOUS = 'C'
DISCRETE = 'D'
DOMAIN_CHOICES = (
(CONTINOUS, 'Continous'),
(DISCRETE, 'Discrete'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class_name = models.CharField(max_length=60)
domain = models.CharField(max_length=2, choices=DOMAIN_CHOICES, default=CONTINOUS)
def get_absolute_url(self):
return reverse('probabilities-view', kwargs={'pk': self.id})
def __str__(self): # Python 3: def __str__(self):
return self.name
class DiscreteProbabilityDistribution(models.Model):
id = models.AutoField(primary_key=True)
probability_id = models.ForeignKey('ProbabilityDistribution')
value = models.FloatField(default=0)
label = models.CharField(max_length=60, blank=True)
probability = models.FloatField(default=0,
validators=[MaxValueValidator(1),
MinValueValidator(0)])
class ContinuousProbabilityDistribution(models.Model):
id = models.AutoField(primary_key=True)
probability_id = models.ForeignKey('ProbabilityDistribution')
parameter = models.CharField(max_length=60)
value = models.FloatField(default=0)
class Unit(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
symbol =models.CharField(max_length=3)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class DecisionVariable(models.Model):
MAXIMIZE = 'M'
MINIMIZE = 'L'
OPT_CHOICES = (
(MAXIMIZE, 'Maximize'),
(MINIMIZE, 'Minimize'),
)
QUALITY = 'Q'
PRICE = 'P'
MOD_CHOICES = (
(QUALITY, 'Quality'),
(PRICE, 'Price'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
optimization = models.CharField(max_length=2, choices=OPT_CHOICES, default=MAXIMIZE)
min_value = models.FloatField(default=0)
max_value = models.FloatField(default=0)
modeling = models.CharField(max_length=2, choices=MOD_CHOICES, default=QUALITY)
resource = models.ForeignKey('Resource')
unit = models.ForeignKey('Unit')
sensitivity_distribution = models.ForeignKey('ProbabilityDistribution', related_name='sensitivity')
value_distribution = models.ForeignKey('ProbabilityDistribution', related_name='value')
cost_function = models.ForeignKey('CostFunction', related_name='cost')
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Service(models.Model):
FILE = 'F'
DATABASE = 'D'
CONVERTER_CHOICES = (
(DATABASE, 'Database'),
(FILE, 'File'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
file_name_demand = models.CharField(max_length=100, verbose_name= 'Demand file')
converter_origin = models.CharField(max_length=1, choices=CONVERTER_CHOICES, default=DATABASE)
file_name_converter = models.CharField(max_length=100, verbose_name= 'Traffic converter')
decision_variables = models.ManyToManyField(DecisionVariable, through='Service_DecisionVariable')
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Service_DecisionVariable(models.Model):
id_service = models.ForeignKey(Service)
id_decision_variable = models.ForeignKey(DecisionVariable)
def __unicode__(self): # Python 3: def __str__(self):
return str(self.id_decision_variable)
class Service_Relationship(models.Model):
MAX_AGGREGATION = 'M'
MIN_AGGREGATION = 'N'
SUM_AGGREGATION = 'S'
NON_AGGREGATION = 'X'
AGGREGATION_FUNC_CHOICES = (
(MAX_AGGREGATION, 'Max Aggregation'),
(MIN_AGGREGATION, 'Min Aggregation'),
(SUM_AGGREGATION, 'Sum Aggregation'),
(NON_AGGREGATION, 'Non Aggregation'),
)
id = models.AutoField(primary_key=True)
service_from = models.ForeignKey(Service, related_name='service_from')
decision_variable_from = models.ForeignKey(DecisionVariable, related_name='decision_variable_from')
service_to = models.ForeignKey(Service, related_name='service_to')
decision_variable_to = models.ForeignKey(DecisionVariable, related_name='decision_variable_to')
aggregation = models.CharField(max_length=1,
choices=AGGREGATION_FUNC_CHOICES,
default=SUM_AGGREGATION)
def __unicode__(self): # Python 3: def __str__(self):
return '(' + self.service_from.name + ',' +self.decision_variable_from.name + ')' + ' TO ' + '(' + self.service_to.name + ',' + self.decision_variable_to.name + ')'
class Provider(models.Model):
ACTIVE = 'A'
INACTIVE = 'I'
PROV_STAT_CHOICES = (
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
)
BULK = 'G'
BID_BY_BID = 'B'
PROV_CAPC_CHOICES = (
(BULK, 'Bulk Controlled'),
(BID_BY_BID, 'Bid Controlled'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
market_position = models.FloatField(default=0,
blank=False,
validators=[MaxValueValidator(1),
MinValueValidator(0)])
adaptation_factor = models.FloatField(default=0,
blank=False,
validators=[MaxValueValidator(1),
MinValueValidator(0)])
status = models.CharField(max_length=1,
choices=PROV_STAT_CHOICES,
default=ACTIVE)
num_ancestors = models.IntegerField(default=1,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(10)]
)
debug = models.BooleanField(default = False)
service = models.ForeignKey(Service)
monopolist_position = models.FloatField(default=0,
blank=False,
validators=[MaxValueValidator(1),
MinValueValidator(0)])
seed = models.BooleanField(default = False)
year = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(9999)]
)
month = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(12)
]
)
day = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(31) ]
)
hour = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(24) ]
)
minute = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(59) ]
)
second = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(59) ]
)
microsecond = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(999999) ]
)
class_name = models.CharField(max_length=60)
start_from_period = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(9999) ]
)
buying_marketplace_address = models.CharField(max_length=45)
selling_marketplace_address = models.CharField(max_length=45)
capacity_controlled_at = models.CharField(max_length=1,
choices=PROV_CAPC_CHOICES,
default=BULK)
purchase_service = models.ForeignKey(Service, related_name='purchase_service', blank=True, null=True)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Provider_Resource(models.Model):
provider = models.ForeignKey(Provider)
resource = models.ForeignKey(Resource)
capacity = models.FloatField(default=0)
cost = models.FloatField(default=0)
service = models.ForeignKey(Service)
class offeringData(models.Model):
DECISION_VARIABLES = 'D'
CALCULATED_FIELD = 'C'
OFF_CHOICES = (
(DECISION_VARIABLES, 'Decision Variable'),
(CALCULATED_FIELD, 'Calculated Field'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
type = models.CharField(max_length=1, choices=OFF_CHOICES, default=DECISION_VARIABLES)
decision_variable = models.ForeignKey(DecisionVariable, blank=True, null=True)
function = models.CharField(max_length=100, blank=True, null=True)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Graphic(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
description = models.TextField(blank=True)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Axis_Graphic(models.Model):
id = models.AutoField(primary_key=True)
graphic = models.ForeignKey(Graphic)
x_axis = models.ForeignKey(offeringData, related_name='x_axis')
y_axis = models.ForeignKey(offeringData, related_name='y_axis')
detail = models.BooleanField(default = True)
label = models.ForeignKey(offeringData, related_name='label', blank=True, null=True)
color = models.ForeignKey(offeringData, related_name='color', blank=True, null=True)
column1 = models.ForeignKey(offeringData, related_name='column1', blank=True, null=True)
column2 = models.ForeignKey(offeringData, related_name='column2', blank=True, null=True)
column3 = models.ForeignKey(offeringData, related_name='column3', blank=True, null=True)
column4 = models.ForeignKey(offeringData, related_name='column4', blank=True, null=True)
class Provider_Graphic(models.Model):
id = models.AutoField(primary_key=True)
graphic = models.ForeignKey(Graphic)
class_name = models.CharField(max_length=60)
class Presenter(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Presenter_Graphic(models.Model):
presenter = models.ForeignKey(Presenter)
graphic = models.ForeignKey(Graphic)
class Consumer(models.Model):
id = models.AutoField(primary_key=True)
observartions = models.TextField(blank=True)
number_execute = models.IntegerField(default=1,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(9999)]
)
seed = models.BooleanField(default = False)
year = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(9999)]
)
month = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(12)
]
)
day = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(31) ]
)
hour = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(24) ]
)
minute = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(59) ]
)
second = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(59) ]
)
microsecond = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(999999) ]
)
class ConsumerService(models.Model):
id = models.AutoField(primary_key=True)
consumer = models.ForeignKey(Consumer)
service = models.ForeignKey(Service)
average = models.FloatField(default=0)
variance = models.FloatField(default=0)
market_potential = models.FloatField(default=0)
execute = models.BooleanField(default = False)
class ExecutionGroup(models.Model):
ACTIVE = 'A'
INACTIVE = 'I'
PROV_STAT_CHOICES = (
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
status = models.CharField(max_length=1,
choices=PROV_STAT_CHOICES,
default=ACTIVE)
description = models.TextField(blank=True)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class ExecutionConfiguration(models.Model):
ACTIVE = 'A'
INACTIVE = 'I'
PROV_STAT_CHOICES = (
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
)
id = models.AutoField(primary_key=True)
status = models.CharField(max_length=1,
choices=PROV_STAT_CHOICES,
default=ACTIVE)
description = models.TextField(blank=True)
execution_group = models.ForeignKey(ExecutionGroup)
number_consumers = models.IntegerField(default=1, blank=False)
number_periods = models.IntegerField(default=1, blank=False)
def __unicode__(self): # Python 3: def __str__(self):
return self.execution_group.name + ' ' + str(self.id)
class ExecutionConfigurationProviders(models.Model):
id = models.AutoField(primary_key=True)
execution_configuration = models.ForeignKey(ExecutionConfiguration)
provider = models.ForeignKey(Provider)
class GeneralParameters(models.Model):
id = models.AutoField(primary_key=True)
bid_periods = models.IntegerField(default=10, blank=False)
pareto_fronts_to_exchange = models.IntegerField(default=3,
blank=False,
validators=[MinValueValidator(1)]
)
initial_offer_number = models.IntegerField(default=1,
blank=False,
validators=[MinValueValidator(1), MaxValueValidator(10)]
)
num_periods_market_share = models.IntegerField(default=3,
blank=False,
validators=[MinValueValidator(1), MaxValueValidator(10)]
) | simulation_site/simulation/models.py | from django.db import models
from django.core.urlresolvers import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
# Create your models here.
class Resource(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class CostFunction(models.Model):
CONTINOUS = 'C'
DISCRETE = 'D'
RANGE_CHOICES = (
(CONTINOUS, 'Continous'),
(DISCRETE, 'Discrete'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class_name = models.CharField(max_length=60)
range_function = models.CharField(max_length=2, choices=RANGE_CHOICES, default=CONTINOUS)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class ContinuousCostFunction(models.Model):
id = models.AutoField(primary_key=True)
costfunction = models.ForeignKey('CostFunction')
parameter = models.CharField(max_length=60)
value = models.FloatField(default=0)
class ProbabilityDistribution(models.Model):
CONTINOUS = 'C'
DISCRETE = 'D'
DOMAIN_CHOICES = (
(CONTINOUS, 'Continous'),
(DISCRETE, 'Discrete'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class_name = models.CharField(max_length=60)
domain = models.CharField(max_length=2, choices=DOMAIN_CHOICES, default=CONTINOUS)
def get_absolute_url(self):
return reverse('probabilities-view', kwargs={'pk': self.id})
def __str__(self): # Python 3: def __str__(self):
return self.name
class DiscreteProbabilityDistribution(models.Model):
id = models.AutoField(primary_key=True)
probability_id = models.ForeignKey('ProbabilityDistribution')
value = models.FloatField(default=0)
label = models.CharField(max_length=60, blank=True)
probability = models.FloatField(default=0,
validators=[MaxValueValidator(1),
MinValueValidator(0)])
class ContinuousProbabilityDistribution(models.Model):
id = models.AutoField(primary_key=True)
probability_id = models.ForeignKey('ProbabilityDistribution')
parameter = models.CharField(max_length=60)
value = models.FloatField(default=0)
class Unit(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
symbol =models.CharField(max_length=3)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class DecisionVariable(models.Model):
MAXIMIZE = 'M'
MINIMIZE = 'L'
OPT_CHOICES = (
(MAXIMIZE, 'Maximize'),
(MINIMIZE, 'Minimize'),
)
QUALITY = 'Q'
PRICE = 'P'
MOD_CHOICES = (
(QUALITY, 'Quality'),
(PRICE, 'Price'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
optimization = models.CharField(max_length=2, choices=OPT_CHOICES, default=MAXIMIZE)
min_value = models.FloatField(default=0)
max_value = models.FloatField(default=0)
modeling = models.CharField(max_length=2, choices=MOD_CHOICES, default=QUALITY)
resource = models.ForeignKey('Resource')
unit = models.ForeignKey('Unit')
sensitivity_distribution = models.ForeignKey('ProbabilityDistribution', related_name='sensitivity')
value_distribution = models.ForeignKey('ProbabilityDistribution', related_name='value')
cost_function = models.ForeignKey('CostFunction', related_name='cost')
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Service(models.Model):
FILE = 'F'
DATABASE = 'D'
CONVERTER_CHOICES = (
(DATABASE, 'Database'),
(FILE, 'File'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
file_name_demand = models.CharField(max_length=100, verbose_name= 'Demand file')
converter_origin = models.CharField(max_length=1, choices=CONVERTER_CHOICES, default=DATABASE)
file_name_converter = models.CharField(max_length=100, verbose_name= 'Traffic converter')
decision_variables = models.ManyToManyField(DecisionVariable, through='Service_DecisionVariable')
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Service_DecisionVariable(models.Model):
id_service = models.ForeignKey(Service)
id_decision_variable = models.ForeignKey(DecisionVariable)
def __unicode__(self): # Python 3: def __str__(self):
return str(self.id_decision_variable)
class Service_Relationship(models.Model):
MAX_AGGREGATION = 'M'
MIN_AGGREGATION = 'N'
SUM_AGGREGATION = 'S'
NON_AGGREGATION = 'X'
AGGREGATION_FUNC_CHOICES = (
(MAX_AGGREGATION, 'Max Aggregation'),
(MIN_AGGREGATION, 'Min Aggregation'),
(SUM_AGGREGATION, 'Sum Aggregation'),
(NON_AGGREGATION, 'Non Aggregation'),
)
id = models.AutoField(primary_key=True)
service_from = models.ForeignKey(Service, related_name='service_from')
decision_variable_from = models.ForeignKey(DecisionVariable, related_name='decision_variable_from')
service_to = models.ForeignKey(Service, related_name='service_to')
decision_variable_to = models.ForeignKey(DecisionVariable, related_name='decision_variable_to')
aggregation = models.CharField(max_length=1,
choices=AGGREGATION_FUNC_CHOICES,
default=SUM_AGGREGATION)
def __unicode__(self): # Python 3: def __str__(self):
return '(' + self.service_from.name + ',' +self.decision_variable_from.name + ')' + ' TO ' + '(' + self.service_to.name + ',' + self.decision_variable_to.name + ')'
class Provider(models.Model):
ACTIVE = 'A'
INACTIVE = 'I'
PROV_STAT_CHOICES = (
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
)
BULK = 'G'
BID_BY_BID = 'B'
PROV_CAPC_CHOICES = (
(BULK, 'Bulk Controlled'),
(BID_BY_BID, 'Bid Controlled'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
market_position = models.FloatField(default=0,
blank=False,
validators=[MaxValueValidator(1),
MinValueValidator(0)])
adaptation_factor = models.FloatField(default=0,
blank=False,
validators=[MaxValueValidator(1),
MinValueValidator(0)])
status = models.CharField(max_length=1,
choices=PROV_STAT_CHOICES,
default=ACTIVE)
num_ancestors = models.IntegerField(default=1,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(10)]
)
debug = models.BooleanField(default = False)
service = models.ForeignKey(Service)
monopolist_position = models.FloatField(default=0,
blank=False,
validators=[MaxValueValidator(1),
MinValueValidator(0)])
seed = models.BooleanField(default = False)
year = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(9999)]
)
month = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(12)
]
)
day = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(31) ]
)
hour = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(24) ]
)
minute = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(59) ]
)
second = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(59) ]
)
microsecond = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(999999) ]
)
class_name = models.CharField(max_length=60)
start_from_period = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(9999) ]
)
buying_marketplace_address = models.CharField(max_length=45)
selling_marketplace_address = models.CharField(max_length=45)
capacity_controlled_at = models.CharField(max_length=1,
choices=PROV_CAPC_CHOICES,
default=BULK)
purchase_service = models.ForeignKey(Service, related_name='purchase_service', blank=True, null=True)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Provider_Resource(models.Model):
provider = models.ForeignKey(Provider)
resource = models.ForeignKey(Resource)
capacity = models.FloatField(default=0)
cost = models.FloatField(default=0)
service = models.ForeignKey(Service)
class offeringData(models.Model):
DECISION_VARIABLES = 'D'
CALCULATED_FIELD = 'C'
OFF_CHOICES = (
(DECISION_VARIABLES, 'Decision Variable'),
(CALCULATED_FIELD, 'Calculated Field'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
type = models.CharField(max_length=1, choices=OFF_CHOICES, default=DECISION_VARIABLES)
decision_variable = models.ForeignKey(DecisionVariable, blank=True, null=True)
function = models.CharField(max_length=100, blank=True, null=True)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Graphic(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
description = models.TextField(blank=True)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Axis_Graphic(models.Model):
id = models.AutoField(primary_key=True)
graphic = models.ForeignKey(Graphic)
x_axis = models.ForeignKey(offeringData, related_name='x_axis')
y_axis = models.ForeignKey(offeringData, related_name='y_axis')
detail = models.BooleanField(default = True)
label = models.ForeignKey(offeringData, related_name='label', blank=True, null=True)
color = models.ForeignKey(offeringData, related_name='color', blank=True, null=True)
column1 = models.ForeignKey(offeringData, related_name='column1', blank=True, null=True)
column2 = models.ForeignKey(offeringData, related_name='column2', blank=True, null=True)
column3 = models.ForeignKey(offeringData, related_name='column3', blank=True, null=True)
column4 = models.ForeignKey(offeringData, related_name='column4', blank=True, null=True)
class Provider_Graphic(models.Model):
id = models.AutoField(primary_key=True)
graphic = models.ForeignKey(Graphic)
class_name = models.CharField(max_length=60)
class Presenter(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Presenter_Graphic(models.Model):
presenter = models.ForeignKey(Presenter)
graphic = models.ForeignKey(Graphic)
class Consumer(models.Model):
id = models.AutoField(primary_key=True)
observartions = models.TextField(blank=True)
number_execute = models.IntegerField(default=1,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(9999)]
)
seed = models.BooleanField(default = False)
year = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(9999)]
)
month = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(12)
]
)
day = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(31) ]
)
hour = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(24) ]
)
minute = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(59) ]
)
second = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(59) ]
)
microsecond = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(999999) ]
)
class ConsumerService(models.Model):
id = models.AutoField(primary_key=True)
consumer = models.ForeignKey(Consumer)
service = models.ForeignKey(Service)
average = models.FloatField(default=0)
variance = models.FloatField(default=0)
market_potential = models.FloatField(default=0)
execute = models.BooleanField(default = False)
class ExecutionGroup(models.Model):
ACTIVE = 'A'
INACTIVE = 'I'
PROV_STAT_CHOICES = (
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
status = models.CharField(max_length=1,
choices=PROV_STAT_CHOICES,
default=ACTIVE)
description = models.TextField(blank=True)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class ExecutionConfiguration(models.Model):
ACTIVE = 'A'
INACTIVE = 'I'
PROV_STAT_CHOICES = (
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
)
id = models.AutoField(primary_key=True)
status = models.CharField(max_length=1,
choices=PROV_STAT_CHOICES,
default=ACTIVE)
description = models.TextField(blank=True)
execution_group = models.ForeignKey(ExecutionGroup)
number_consumers = models.IntegerField(default=1, blank=False)
number_periods = models.IntegerField(default=1, blank=False)
def __unicode__(self): # Python 3: def __str__(self):
return self.execution_group.name + ' ' + str(self.id)
class ExecutionConfigurationProviders(models.Model):
id = models.AutoField(primary_key=True)
execution_configuration = models.ForeignKey(ExecutionConfiguration)
provider = models.ForeignKey(Provider)
class GeneralParameters(models.Model):
id = models.AutoField(primary_key=True)
bid_periods = models.IntegerField(default=10, blank=False)
pareto_fronts_to_exchange = models.IntegerField(default=3,
blank=False,
validators=[MinValueValidator(1)]
)
initial_offer_number = models.IntegerField(default=1,
blank=False,
validators=[MinValueValidator(1), MaxValueValidator(10)]
)
num_periods_market_share = models.IntegerField(default=3,
blank=False,
validators=[MinValueValidator(1), MaxValueValidator(10)]
) | 0.466359 | 0.096578 |