id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1650052
|
import sublime
import sublime_plugin
import re
import datetime
from time import time
from urllib import parse
from collections import namedtuple
from ..deps.jsonschema import validate, ValidationError
from ..core import RequestCommandMixin
from ..core.parsers import parse_tests
from ..core.responses import prepare_request
Error = namedtuple('Error', 'prop, expected, got, error')
Result = namedtuple('Result', 'result, assertions, errors')
RequestAssertion = namedtuple('RequestAssertion', 'request, assertion')
class TestParserMixin:
def get_requests(self):
"""Parses only first highlighted selection.
"""
view = self.view
self._tests = []
for region in view.sel():
if not region.empty():
selection = view.substr(region)
try:
self._tests = parse_tests(selection)
except Exception as e:
sublime.error_message('Parse Error: there may be unbalanced brackets in tests')
print(e)
break # only parse first selection
return [test.request for test in self._tests]
def eval_assertion(self, s):
"""Includes `env` that was parsed by `RequestCommandMixin`. Raises an
exception that should be caught by client code if assertion can't be
eval'ed or there's anything wrong with assertion.
"""
dict_string = s.split('assert', 1)[1]
try:
assertion = eval(dict_string, self._env)
except Exception as e:
raise Exception('{}, {}'.format(dict_string.strip(), e))
if not isinstance(assertion, dict):
raise TypeError('assertion {} is not a dictionary'.format(assertion))
return assertion
class RequesterRunTestsCommand(TestParserMixin, RequestCommandMixin, sublime_plugin.TextCommand):
"""Execute requests from requester file concurrently. For each request with a
corresponding assertions dictionary, compare response with assertions and
display all results in new tab.
Doesn't work for multiple selections, because absolute order of (request,
assertion) test pairs is preserved in results tab.
"""
def run(self, edit, concurrency=10):
"""Allow user to specify concurrency.
"""
self.MAX_WORKERS = max(1, concurrency)
super().run(edit)
def handle_responses(self, responses):
"""Compares response objects with assertions dictionaries and displays a
test run view that includes all discrepancies.
"""
if len(self._tests) != len(responses):
sublime.error_message('Parse Error: something went wrong')
return
results, errors = [], []
count_assertions, count_errors = 0, 0
for i, response in enumerate(responses):
try:
assertion = self.eval_assertion(self._tests[i].assertion)
except Exception as e:
errors.append('{}: {}'.format('Assertion Error', e))
else:
result = self.get_result(response, assertion)
count_assertions += result.assertions
count_errors += result.errors
results.append(result.result)
if errors:
sublime.error_message('\n\n'.join(errors))
if not results: # don't open test view if no tests were run
return
view = self.view.window().new_file()
view.set_scratch(True)
view.settings().set('requester.test_view', True)
self.set_env_on_view(view)
header = '-- {} assertion{}, {} error{} --\n'.format(
count_assertions, '' if count_assertions == 1 else 's',
count_errors, '' if count_errors == 1 else 's',
)
view.run_command('requester_replace_view_text',
{'text': header + '\n\n' + '\n\n'.join(results), 'point': 0})
view.set_read_only(True)
view.set_name('Requester Test Run')
view.set_syntax_file('Packages/Requester/syntax/requester-test.sublime-syntax')
def get_result(self, response, assertion):
"""Get result of comparing response with assertion dict. Ignores keys in
assertion dict that don't correspond to a valid property or method of
response.
"""
req, res, err = response
result = '{}\nassert {}\n'.format(req.request, assertion)
errors = []
count = len(assertion)
assertion = {str(k): v for k, v in assertion.items()} # make sure keys can be ordered
for prop, expected in sorted(assertion.items()):
if prop.startswith('function'):
try:
name = expected.__name__
value = expected(res)
except Exception as e:
error = 'Function Error "{}": {}'.format(name, e)
sublime.error_message(error)
errors.append(Error('', '', '', error))
continue
if not isinstance(value, bool):
error = 'Function Error: "{}" must return "True" or "False"'.format(name)
sublime.error_message(error)
errors.append(Error('', '', '', error))
continue
if value is False:
errors.append(Error(prop, True, False, 'function "{}" validation failed'.format(name)))
elif prop in ('cookies_schema', 'json_schema', 'headers_schema'): # jsonschema validation
if prop == 'cookies_schema':
got = res.cookies.get_dict()
if prop == 'json_schema':
got = res.json()
if prop == 'headers_schema':
got = res.headers
try:
validate(got, expected)
except ValidationError as e:
errors.append(Error(prop, expected, got, e))
except Exception as e:
error = 'Schema Error: {}'.format(e)
errors.append(Error('', '', '', error))
sublime.error_message(error)
elif prop in ('cookies', 'json'): # method equality validation
if prop == 'cookies':
got = res.cookies.get_dict()
if prop == 'json':
got = res.json()
if got != expected:
errors.append(Error(prop, expected, got, 'not equal'))
else: # prop equality validation
if not hasattr(res, prop):
errors.append(Error('', '', '', '"{}" prop does not exist on response object'.format(prop)))
else:
got = getattr(res, prop)
if got != expected:
errors.append(Error(prop, expected, got, 'not equal'))
result = result + '{} assertion{}, {} error{}\n'.format(
count, '' if count == 1 else 's',
len(errors), '' if len(errors) == 1 else 's',
)
for error in errors:
result = result + self.get_error_string(error) + '\n'
return Result(result, count, len(errors))
def get_error_string(self, error, max_len=150):
"""Return a one-line string representation of validation error. Attributes
exceeding `max_len` are truncated.
"""
error_details = []
for attr in ['prop', 'expected', 'got', 'error']:
val = str(getattr(error, attr))
if len(val) > max_len and not attr == 'error':
val = '...'
error_details.append('{}: {}'.format(attr, val))
return '; '.join(error_details)
def persist_requests(self, responses):
"""Requests shouldn't be persisted for test runs.
"""
TEST_MODULE = """\"\"\"
RUN TESTS: `python -m unittest requester_tests`
MORE INFO: https://docs.python.org/3/library/unittest.html#command-line-interface
{date}
\"\"\"
import unittest
import requests
{imp}
# --- ENV --- #
{env}
# --- ENV --- #
class TestResponses(unittest.TestCase):
{body}
if __name__ == '__main__':
unittest.main()
"""
INDENT = ' ' * 4
class RequesterExportTestsCommand(TestParserMixin, RequestCommandMixin, sublime_plugin.TextCommand):
"""Parses selected (request, assertion) test pairs and exports them to a
runnable test script that includes the combined env string built from the env
file and the env block.
"""
def make_requests(self, requests, env):
self.jsi = False # jsonschema imports necessary?
tests = []
for i, test in enumerate(self._tests):
req = prepare_request(test.request, self._env, i)
if req.error:
sublime.error_message('Export Tests Request Error: {}'.format(req.error))
continue
try:
assertion = self.eval_assertion(test.assertion)
except Exception as e:
sublime.error_message('Export Tests Assertion Error: {}'.format(e))
continue
tests.append(RequestAssertion(req, assertion))
names = set()
methods = []
for test in tests:
name = self.get_test_name(test, names)
names.add(name)
methods.append(self.get_test_method(test, name))
body = '\n\n'.join(methods)
body = '\n'.join('{}{}'.format(INDENT, line) for line in body.split('\n'))
body = '\n'.join('' if line.isspace() else line for line in body.split('\n'))
date = datetime.datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H:%M:%S')
jsonschema_import = 'from jsonschema import validate, ValidationError\n'
view = self.view.window().new_file()
view.run_command('requester_replace_view_text', {
'text': TEST_MODULE.format(
date=date, imp=jsonschema_import if self.jsi else '', env=self._env_string.strip(), body=body
), 'point': 0
})
view.set_syntax_file('Packages/Python/Python.sublime-syntax')
view.set_name('requester_tests.py')
view.set_scratch(True)
def get_test_method(self, test, name):
"""Return a unittest method string that starts with "def"...
"""
req, assertion = test
method = ['def {}(self):'.format(name), 'res = {}'.format(req.request)]
assertion = {str(k): v for k, v in test.assertion.items()} # make sure keys can be ordered
for prop, expected in sorted(assertion.items()):
if prop.startswith('function'):
if not hasattr(expected, '__name__'):
sublime.error_message('"{}" is not a function'.format(expected))
return
s = 'self.assertTrue({}(res))'.format(expected.__name__)
elif prop in ('cookies_schema', 'json_schema', 'headers_schema'): # jsonschema validation
self.jsi = True
if prop == 'cookies_schema':
got = 'res.cookies.get_dict()'
if prop == 'json_schema':
got = 'res.json()'
if prop == 'headers_schema':
got = 'res.headers'
s = """try:\n{indent}validate({}, {!r})
except ValidationError as e:\n{indent}self.fail(str(e))""".format(
got, expected, indent=INDENT
)
elif prop in ('cookies', 'json'): # method equality validation
if prop == 'cookies':
s = 'self.assertEqual(res.cookies.get_dict(), {!r})'.format(expected)
if prop == 'json':
s = 'self.assertEqual(res.json(), {!r})'.format(expected)
else: # prop equality validation
s = 'self.assertEqual(res.{}, {!r})'.format(prop, expected)
method.append(s)
return '\n{}'.format(INDENT).join(line for s in method for line in s.split('\n'))
@staticmethod
def get_test_name(test, names):
"""Get a unique name for a test method, passing in an iterable of all
names that have been assigned so far.
"""
req, assertion = test
path = parse.urlparse(req.url).path.replace('/', '_')
method = req.method.lower()
count = 0
while True:
name = 'test_{}{}{}'.format(
method, clean_var_name(path.replace('/', '_')), '_{}'.format(count) if count else ''
)
if name not in names:
return name
count += 1
def clean_var_name(s):
"""Clean `s` so that it's a valid Python variable name.
"""
s = re.sub('[^0-9a-zA-Z_]', '', s)
s = re.sub('^[^a-zA-Z_]+', '', s)
return s
|
1650075
|
from kivy.compat import text_type
from kivy.core.window import Window
from kivy.metrics import dp
from kivy.properties import ListProperty, ObjectProperty, StringProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.popup import Popup
from kivy.uix.settings import SettingItem, SettingSpacer
from kivy.uix.textinput import TextInput
from kivy.uix.widget import Widget
import re
class SettingString(SettingItem):
'''Implementation of a string setting on top of a :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.textinput.Textinput` so the user can enter a custom
value.
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it's shown.
:attr:`popup` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
textinput = ObjectProperty(None)
'''(internal) Used to store the current textinput from the popup and
to listen for changes.
:attr:`textinput` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
validation_regex = StringProperty(".*")
'''Regular expression for determining a valid entry.
:attr:`validation_regex` is an :class:`~kivy.properties.StringProperty`
and defaults to '.*'.
'''
ok_button_text = StringProperty("Ok")
'''Used to store the desired text for the popup Ok button.
:attr:`ok_button_text` is an :class:`~kivy.properties.StringProperty`
and defaults to 'Ok'.
'''
cancel_button_text = StringProperty("Cancel")
'''Used to store the desired text for the popup Cancel button.
:attr:`cancel_button_text` is an :class:`~kivy.properties.StringProperty`
and defaults to 'Cancel'.
'''
def on_panel(self, instance, value):
if value is None:
return
self.bind(on_release=self._create_popup)
def _dismiss(self, *largs):
if self.textinput:
self.textinput.focus = False
if self.popup:
self.popup.dismiss()
self.popup = None
def _write(self, instance):
self._dismiss()
value = self.textinput.text.strip()
self.value = value
def _valid_input(self, value):
#Controls allowable characters eg. Just numbers
return True
def _valid_entry(self, value):
regex = re.compile(self.validation_regex)
match = re.match(regex, value)
return match
def _on_text(self, instance, value):
if self._valid_input(value):
self.last_value = value
else:
self.textinput.text = self.last_value
return
self._ok_button.disabled = not self._valid_entry(value)
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing='5dp')
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content, size_hint=(None, None),
size=(popup_width, '250dp'))
# create the textinput used for numeric input
self.textinput = textinput = TextInput(
text=self.value, font_size='24sp', multiline=False,
size_hint_y=None, height='42sp')
self.last_value = self.value
textinput.bind(text=self._on_text)
self.textinput = textinput
# construct the content, widget are used as a spacer
content.add_widget(Widget())
content.add_widget(textinput)
content.add_widget(Widget())
content.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
self._ok_button = Button(text=self.ok_button_text)
self._ok_button.bind(on_release=self._write)
btnlayout.add_widget(self._ok_button)
btn = Button(text=self.cancel_button_text)
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
class SettingNumeric(SettingString):
'''Implementation of a numeric setting on top of a :class:`SettingString`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.textinput.Textinput` so the user can enter a custom
value.
'''
value_range = ListProperty([None, None])
'''Values used to represent the minimum and maximum values inclusive. None
can be specified for no limit. If you want to use positive values only in
your ConfigParser instance::
SettingNumeric(..., value_range=[0, None])
.. warning::
You need exactlt two values, the index 0 will be used as minimum,
and index 1 as maxium
:attr:`values` is a :class:`~kivy.properties.ListProperty` and defaults to
[None, None]
'''
def _valid_input(self, value):
is_float = '.' in str(self.value)
try:
if is_float:
float(value)
else:
int(value)
except ValueError:
return False
return True
def _in_value_range(self, value):
inlow = (self.value_range[0] is None) or (value >= self.value_range[0])
inhigh = (self.value_range[1] is None) or (value <= self.value_range[1])
return inlow and inhigh
def _valid_entry(self, value):
is_float = '.' in str(self.value)
try:
if is_float:
return self._in_value_range(float(value))
else:
return self._in_value_range(int(value))
except ValueError:
return False
def _write(self, instance):
is_float = '.' in str(self.value)
self._dismiss()
try:
if is_float:
self.value = text_type(float(self.textinput.text))
else:
self.value = text_type(int(self.textinput.text))
except ValueError:
return
|
1650083
|
from os import listdir
from gensim.models import Word2Vec
from tqdm import tqdm
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
def tfid(text_vector):
vectorizer = TfidfVectorizer()
untokenized_data =[' '.join(tweet) for tweet in tqdm(text_vector, "Vectorizing...")]
vectorizer = vectorizer.fit(untokenized_data)
vectors = vectorizer.transform(untokenized_data).toarray()
return vectors
def tfid_test(train_vectors, test_vectors):
vectorizer = TfidfVectorizer()
untokenized_data =[' '.join(tweet) for tweet in train_vectors]
vectorizer = vectorizer.fit(untokenized_data)
untokenized_data =[' '.join(tweet) for tweet in test_vectors]
vectors = vectorizer.transform(untokenized_data).toarray()
return vectors
|
1650125
|
import os
import requests
from dotenv import load_dotenv, find_dotenv
from setuptools import setup, find_packages
load_dotenv(find_dotenv('config.env'))
def get_version():
url = 'https://api.github.com/repos/suchak1/hyperdrive/releases/latest'
token = os.environ.get('GITHUB')
headers = {'Authorization': f'token {token}'}
response = requests.get(url, headers=headers if token else None)
data = response.json()
version = data['tag_name'].replace('v', '')
return version
def get_requirements():
with open('requirements.txt', 'r') as file:
return [line.strip() for line in file if line]
def get_readme():
with open("README.md", "r") as file:
return file.read()
setup(
name='hyperdrive',
version=get_version(),
description='An algorithmic trading platform',
long_description=get_readme(),
long_description_content_type="text/markdown",
url='https://github.com/suchak1/hyperdrive',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
python_requires='>=3.7',
install_requires=get_requirements(),
project_urls={
'Bug Reports': 'https://github.com/suchak1/hyperdrive/issues',
'Source': 'https://github.com/suchak1/hyperdrive'
}
)
|
1650133
|
import django.test
from django.utils import timezone
import django.core.signing
import pytest
import accounts.models
from .. import models
@pytest.mark.django_db
class TestRegenerate(django.test.TestCase):
def setUp(self):
self.user = accounts.models.User.objects.create_user(
username="fred", email="<EMAIL>", password="<PASSWORD>", email_verified=True, twofa_enabled=True
)
self.user._test_agree_all_tos()
self.dead_backup_device = models.PaperDevice(
owner=self.user, activated_at=timezone.now(), deleted_at=timezone.now()
)
self.dead_backup_device.save()
self.backup_device = models.PaperDevice(owner=self.user, activated_at=timezone.now())
self.backup_device.save()
self.totp_device = models.TOTPDevice(owner=self.user, activated_at=timezone.now(), last_t=0)
self.totp_device.save()
self.client = django.test.Client()
self.login(self.client)
def login(self, c, username="fred"):
assert c.login(username=username, password="<PASSWORD>")
def path(self, device_id=None, device=None):
return django.shortcuts.reverse("twofa:regenerate", kwargs={"device_id": device_id or device.id})
def test_requires_login(self):
client = django.test.Client()
resp = client.get(self.path(device_id=1))
assert resp.status_code == 302
def test_rejects_get(self):
resp = self.client.get(self.path(device=self.backup_device))
assert resp.status_code == 405
def test_regenerate_someone_elses_device(self):
bob = accounts.models.User.objects.create_user(
username="bob", email="<EMAIL>", password="<PASSWORD>", email_verified=True
)
bob._test_agree_all_tos()
self.login(self.client, username="bob")
resp = self.client.post(self.path(device=self.backup_device))
assert resp.status_code == 404
def test_regenerate_deleted_device(self):
resp = self.client.post(self.path(device=self.dead_backup_device))
assert resp.status_code == 404
def test_regenerate_unregeneratable_device(self):
resp = self.client.post(self.path(device=self.totp_device))
assert resp.status_code == 302
def test_happy_path(self):
assert not models.PaperCode.objects.exists()
resp = self.client.post(self.path(device=self.backup_device))
assert resp.status_code == 302
assert models.PaperCode.objects.exists()
|
1650194
|
import crosscat.cython_code.CyclicComponentModel as ccm
import math
import random
import numpy
import six
from scipy.stats import vonmises
from crosscat.utils.general_utils import logmeanexp
import pdb
pi = math.pi
next_seed = lambda rng: rng.randrange(2147483647)
default_hyperparameters = dict(a=1.0, b=pi, kappa=4.0)
default_data_parameters = dict(mu=pi, kappa=4.0)
###############################################################################
# Input-checking and exception-handling functions
###############################################################################
def check_type_force_float(x, name):
"""
If an int is passed, convert it to a float. If some other type is passed,
raise an exception.
"""
if type(x) is int:
return float(x)
elif not isinstance(x, (float, numpy.float64)):
raise TypeError("%r should be a float" % (name,))
else:
return x
def check_data_type_column_data(X):
"""
Makes sure that X is a numpy array and that it is a column vector
"""
if type(X) is not numpy.ndarray:
raise TypeError("X should be type numpy.ndarray")
if len(X.shape) == 2 and X.shape[1] > 1:
raise TypeError("X should have a single column.")
def check_hyperparams_dict(hypers):
if type(hypers) is not dict:
raise TypeError("hypers should be a dict")
keys = ['a', 'b', 'kappa']
for key in keys:
if key not in hypers:
raise KeyError("missing key in hypers: %r" % (key,))
for key, value in six.iteritems(hypers):
if key not in keys:
raise KeyError("invalid hypers key: %r" % (key,))
if not isinstance(value, (float, numpy.float64)):
raise TypeError("%r should be float" % (key,))
if key in ['a', 'kappa']:
if value <= 0.0:
raise ValueError("hypers[%r] should be greater than 0" % (key,))
if key == 'b':
if value <= 0.0 or value >= 2*pi:
raise ValueError("hypers[%r] should be in [0,2*pi]" % (key,))
def check_model_params_dict(params):
if type(params) is not dict:
raise TypeError("params should be a dict")
keys = ['mu', 'kappa']
for key in keys:
if key not in params:
raise KeyError("missing key in params: %r" % (key,))
for key, value in six.iteritems(params):
if key not in keys:
raise KeyError("invalid params key: %r" % (key,))
if not isinstance(value, (float, numpy.float64)):
raise TypeError("%r should be float" % (key,))
if key == "kappa":
if value <= 0.0:
raise ValueError("kappa should be greater than 0")
elif key != "mu":
raise KeyError("Invalid params key: %r" % (key,))
else:
if value < 0.0 or value > 2*pi:
raise ValueError("mu should be in [0,2*pi]")
###############################################################################
# The class extension
###############################################################################
class p_CyclicComponentModel(ccm.p_CyclicComponentModel):
model_type = 'vonmises'
cctype = 'cyclic'
@classmethod
def from_parameters(cls, N, data_params=default_data_parameters, hypers=None, gen_seed=0):
"""
Initialize a continuous component model with sufficient statistics
generated from random data.
Inputs:
N: the number of data points
data_params: a dict with the following keys
mu: the mean of the data
kappa: the precision of the data
hypers: a dict with the following keys
a: the prior precision of the mean
b: the prior mean of the
kappa: precision parameter
gen_seed: an integer from which the rng is seeded
"""
check_model_params_dict(data_params)
data_kappa = data_params['kappa']
data_mean = data_params['mu']
rng = random.Random(gen_seed)
X = [ [rng.vonmisesvariate(data_mean-math.pi, data_kappa)+math.pi] for i in range(N)]
X = numpy.array(X)
check_data_type_column_data(X)
if hypers is None:
hypers = cls.draw_hyperparameters(X, n_draws=1, gen_seed=next_seed(rng))[0]
check_hyperparams_dict(hypers)
sum_sin_x = numpy.sum(numpy.sin(X))
sum_cos_x = numpy.sum(numpy.cos(X))
hypers['fixed'] = 0.0
return cls(hypers, float(N), sum_sin_x, sum_cos_x)
@classmethod
def from_data(cls, X, hypers=None, gen_seed=0):
"""
Initialize a continuous component model with sufficient statistics
generated from data X
Inputs:
X: a column of data (numpy)
hypers: dict with the following entries
a: the prior precision of the mean
b: the prior mean of the
kappa: precision parameter
gen_seed: a int to seed the rng
"""
check_data_type_column_data(X)
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
rng = random.Random(gen_seed)
if hypers is None:
hypers = cls.draw_hyperparameters(X, gen_seed=next_seed(rng))[0]
check_hyperparams_dict(hypers)
N = len(X)
sum_sin_x = numpy.sum(numpy.sin(X))
sum_cos_x = numpy.sum(numpy.cos(X))
hypers['fixed'] = 0.0
return cls(hypers, float(N), sum_sin_x, sum_cos_x)
def sample_parameters_given_hyper(self, gen_seed=0):
"""
Samples a Gaussian parameter given the current hyperparameters.
Inputs:
gen_seed: integer used to seed the rng
"""
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
nprng = numpy.random.RandomState(gen_seed)
hypers = self.get_hypers()
a = hypers['a']
b = hypers['b']
kappa = hypers['kappa']
mu = nprng.vonmises(b-math.pi, a)+math.pi
kappa = hypers['kappa']
assert(kappa > 0)
assert(mu >= 0 and mu <= 2*pi)
params = {'mu': mu, 'kappa': kappa}
return params
def uncollapsed_likelihood(self, X, parameters):
"""
Calculates the score of the data X under this component model with mean
mu and precision kappa.
Inputs:
X: A column of data (numpy)
parameters: a dict with the following keys
mu: the Von Mises mean
kappa: the precision of the Von Mises
"""
check_data_type_column_data(X)
check_model_params_dict(parameters)
mu = parameters['mu']
kappa = parameters['kappa']
N = float(len(X))
hypers = self.get_hypers()
a = hypers['a']
b = hypers['b']
kappa = hypers['kappa']
sum_err = numpy.sum((mu-X)**2.0)
log_likelihood = self.log_likelihood(X, {'mu':mu, 'kappa':rho})
log_prior_mu = vonmises.logpdf(b, a)
log_p = log_likelihood + log_prior_mu + log_prior_rho
return log_p
@staticmethod
def log_likelihood(X, parameters):
"""
Calculates the log likelihood of the data X given mean mu and precision
kappa.
Inputs:
X: a column of data (numpy)
parameters: a dict with the following keys
mu: the Von Mises mean
kappa: the precision of the Von Mises
"""
check_data_type_column_data(X)
check_model_params_dict(parameters)
log_likelihood = numpy.sum(vonmises.logpdf(X-math.pi, parameters['mu']-math.pi, parameters['kappa']))
return log_likelihood
@staticmethod
def log_pdf(X, parameters):
"""
Calculates the pdf for each point in the data X given mean mu and
precision kappa.
Inputs:
X: a column of data (numpy)
parameters: a dict with the following keys
mu: the Von Mises mean
kappa: the precision of the Von Mises
"""
check_data_type_column_data(X)
check_model_params_dict(parameters)
return vonmises.logpdf(X--math.pi, parameters['kappa'],loc=parameters['mu']-math.pi)
@staticmethod
def cdf(X, parameters):
"""
Calculates the cdf for each point in the data X given mean mu and
precision kappa.
Inputs:
X: a column of data (numpy)
parameters: a dict with the following keys
mu: the Von Mises mean
kappa: the precision of the Von Mises
"""
check_data_type_column_data(X)
check_model_params_dict(parameters)
return vonmises.cdf(X-math.pi, parameters['mu']-math.pi, parameters['kappa'])
def brute_force_marginal_likelihood(self, X, n_samples=10000, gen_seed=0):
"""
Calculates the log marginal likelihood via brute force method in which
parameters (mu and kappa) are repeatedly drawn from the prior, the
likelihood is calculated for each set of parameters, then the average is
taken.
Inputs:
X: A column of data (numpy)
n_samples: the number of draws
gen_Seed: seed for the rng
"""
check_data_type_column_data(X)
if type(n_samples) is not int:
raise TypeError("n_samples should be an int")
if n_samples <= 0:
raise ValueError("n_samples should be greater than 0")
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
N = float(len(X))
rng = random.Random(gen_seed)
log_likelihoods = [0]*n_samples
for i in range(n_samples):
params = self.sample_parameters_given_hyper(gen_seed=next_seed(rng))
log_likelihoods[i] = self.log_likelihood(X, params)
log_marginal_likelihood = logmeanexp(log_likelihoods)
return log_marginal_likelihood
@staticmethod
def generate_discrete_support(params, support=0.95, nbins=100):
"""
returns a set of intervals over which the component model pdf is
supported.
Inputs:
params: a dict with entries 'mu' and 'kappa'
nbins: cardinality of the set or the number of grid points in the
approximation
support: a float in (0,1) that describes the amount of probability
we want in the range of support
"""
if type(nbins) is not int:
raise TypeError("nbins should be an int")
if nbins <= 0:
raise ValueError("nbins should be greater than 0")
support = check_type_force_float(support, "support")
if support <= 0.0 or support >= 1.0:
raise ValueError("support is a float st: 0 < support < 1")
check_model_params_dict(params)
mu = params['mu']
kappa = params['kappa']
assert(mu >= 0 and mu <= 2*math.pi)
a, b = vonmises.interval(support, kappa)
a += mu
b += mu
assert -math.pi <= a < b <= 3*math.pi
assert b - a <= 2*math.pi
support_range = b - a;
support_bin_size = support_range/(nbins-1.0)
bins = [a+i*support_bin_size for i in range(nbins)]
return bins
@staticmethod
def draw_hyperparameters(X, n_draws=1, gen_seed=0):
"""
Draws hyperparameters a, b, and kappa from the same distribution that
generates the grid in the C++ code.
Inputs:
X: a column of data (numpy)
n_draws: the number of draws
gen_seed: seed the rng
Output:
A list of dicts of draws where each entry has keys 'a', 'b', 'kappa'.
"""
check_data_type_column_data(X)
if type(n_draws) is not int:
raise TypeError("n_draws should be an int")
if type(gen_seed) is not int:
raise TypeError("gen_seed should be an int")
rng = random.Random(gen_seed)
samples = []
N = float(len(X))
vx = numpy.var(X)
a_kappa_draw_range = (vx, vx/N)
mu_draw_range = (0, 2*pi)
for i in range(n_draws):
a = math.exp(rng.uniform(a_kappa_draw_range[0], a_kappa_draw_range[1]))
kappa = math.exp(rng.uniform(a_kappa_draw_range[0], a_kappa_draw_range[1]))
b = rng.uniform(mu_draw_range[0], mu_draw_range[1])
this_draw = dict(a=a, b=b, kappa=kappa)
samples.append(this_draw)
assert len(samples) == n_draws
return samples
@staticmethod
def generate_data_from_parameters(params, N, gen_seed=0):
"""
Generates data from a gaussina distribution
Inputs:
params: a dict with entries 'mu' and 'kappa'
N: number of data points
"""
if type(N) is not int:
raise TypeError("N should be an int")
if N <= 0:
raise ValueError("N should be greater than 0")
nprng = numpy.random.RandomState(gen_seed)
check_model_params_dict(params)
mu = params['mu']
kappa = params['kappa']
X = numpy.array([[nprng.vonmises(mu-math.pi, kappa)+math.pi] for i in range(N)])
for x in X:
if x < 0. or x > 2.*math.pi:
pdb.set_trace()
assert len(X) == N
return X
@staticmethod
def get_model_parameter_bounds():
"""
Returns a dict where each key-value pair is a model parameter and a
tuple with the lower and upper bounds
"""
inf = float("inf")
params = dict(mu=(0.0,2*pi), rho=(0.0 ,inf))
return params
|
1650291
|
from pycompss.api.constraint import constraint
from pycompss.api.task import task
@task(returns=1, numba=True)
def external(value):
return value / 2
@constraint(computing_units="2")
@task(returns=1, numba=True)
def externalc(value):
return (value - 10) / 2
class example(object):
def __init__(self, v):
self.v = v
@task(numba=True)
def increment(self, value):
self.v = self.v + value
@constraint(computing_units="2")
@task(numba=True)
def subtract(self, value):
self.v = self.v - value
@task(numba=True)
def calcul(self, value):
self.v = self.v + external(value)
@constraint(computing_units="2")
@task(numba=True)
def calcul_c(self, value):
self.v = self.v + externalc(value)
def get_v(self):
return self.v
|
1650319
|
import multiprocessing
from concurrent import futures
from functools import partial
from tqdm import tqdm
import numpy as np
from .common import get_blocking
def copy(data, out,
block_shape=None, n_threads=None,
mask=None, verbose=False, roi=None):
""" Copy a dataset in parallel.
Arguments:
data [array_like] - input data, numpy array or similar like h5py or zarr dataset
out [array_like] - output dataset
block_shape [tuple] - shape of the blocks used for parallelisation,
by default chunks of the output will be used, if available (default: None)
n_threads [int] - number of threads, by default all are used (default: None)
mask [array_like] - mask to exclude data from the computation (default: None)
verbose [bool] - verbosity flag (default: False)
roi [tuple[slice]] - region of interest for this computation (default: None)
Returns:
array_like - the copied dataset
"""
n_threads = multiprocessing.cpu_count() if n_threads is None else n_threads
if out.shape != data.shape:
raise ValueError(f"Output shape {out.shape} does not match input shape {data.shape}")
if mask is not None and mask.shape != data.shape:
raise ValueError(f"Invalid mask shape, got {mask.shape}, expected {data.shape} (= shape of first operand)")
if block_shape is None:
block_shape = out.chunks
blocking = get_blocking(data, block_shape, roi)
def _copy_block(block_id):
block = blocking.getBlock(blockIndex=block_id)
bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
if mask is not None:
m = mask[bb]
if m.sum() == 0:
return
block_data = data[bb]
if mask is not None:
block_data[m] = 0
out[bb] = block_data
n_blocks = blocking.numberOfBlocks
with futures.ThreadPoolExecutor(n_threads) as tp:
if verbose:
list(tqdm(tp.map(_copy_block, range(n_blocks)), total=n_blocks))
else:
list(tp.map(_copy_block, range(n_blocks)))
return out
def _ds_block_reduce(data, out_shape, func):
pass
mean_downscaling = partial(_ds_block_reduce, func=np.mean)
max_downscaling = partial(_ds_block_reduce, func=np.max)
min_downscaling = partial(_ds_block_reduce, func=np.min)
def _ds_interpolate(data, out_shape, order):
pass
nearest_downscaling = partial(_ds_interpolate, order=0)
linear_downscaling = partial(_ds_interpolate, order=1)
quadratic_downscaling = partial(_ds_interpolate, order=2)
cubic_downscaling = partial(_ds_interpolate, order=3)
def downscale(data, out, downscaling_function=None,
block_shape=None, n_threads=None,
mask=None, verbose=False, roi=None):
""" Downscale a dataset in parallel.
Arguments:
data [array_like] - input data, numpy array or similar like h5py or zarr dataset
out [array_like] - output dataset
downscaling_function [str or callable] - the function used for downscaling the blocks.
By default mean downscaling is used (default: fNone)
block_shape [tuple] - shape of the blocks used for parallelisation,
by default chunks of the output will be used, if available (default: None)
n_threads [int] - number of threads, by default all are used (default: None)
mask [array_like] - mask to exclude data from the computation (default: None)
verbose [bool] - verbosity flag (default: False)
roi [tuple[slice]] - region of interest for this computation (default: None)
Returns:
array_like - the downscaled dataset
"""
ds_function_dict = {'mean_downscaling': mean_downscaling,
'max_downscaling': max_downscaling,
'min_downscaling': min_downscaling,
'nearest_downscaling': nearest_downscaling,
'linear_downscaling': linear_downscaling,
'quadratic_downscaling': quadratic_downscaling,
'cubic_downscaling': cubic_downscaling}
ds_function_dict.update({name.replace('_downscaling', ''): func
for name, func in ds_function_dict.items()})
if downscaling_function is None:
downscaling_function = mean_downscaling
elif isinstance(downscaling_function, str):
downscaling_function = ds_function_dict[downscaling_function]
elif not callable(downscaling_function):
raise ValueError(f"Invalid downscaling function of type {type(downscaling_function)}")
if block_shape is None:
block_shape = out.chunks
blocking = get_blocking(data, block_shape, roi)
def _downscale_block(block_id):
pass
n_blocks = blocking.numberOfBlocks
with futures.ThreadPoolExecutor(n_threads) as tp:
if verbose:
list(tqdm(tp.map(_downscale_block, range(n_blocks)), total=n_blocks))
else:
list(tp.map(_downscale_block, range(n_blocks)))
return out
|
1650327
|
class SentenceInList:
"""A sentence from the Tatoeba corpus which is in a list"""
def __init__(self, list_id, sentence_id):
self._lid = list_id
self._sid = sentence_id
@property
def list_id(self):
"""Get the id of the list"""
return int(self._lid)
@property
def sentence_id(self):
"""Get the id of the sentence"""
return int(self._sid)
|
1650337
|
import unittest
from Ejercicio1 import PaginaWeb, SitioWeb
#Las pruebas del primer y segudo ejercicio, en la primera pude implementarla sin problemas, las demas como devuelve un print da None y por eso lo que tiene que dar None,
#pero si corres las pruebas te imprime lo que tiene que ser
class PaginaTest(unittest.TestCase):
def test_pagina_web(self):
pagina = PaginaWeb(url="www.simon.com",ruta="/https",formato="HTML",contenido="<p>Esta es la pagina de Simon</p>",titulo="<h1>La pagina de simon</h1>",slug="La pagina de simon",metatags="<meta name =description content = This is the description. It should be about 155 characters long.>)")
self.assertEqual(
str(pagina),
"Url de la pagina: www.simon.com, ruta de la pagina: /https, formato de la pagina: HTML, contenido de la pagina: <p>Esta es la pagina de Simon</p>, titulo de la pagina: <h1>La pagina de simon</h1>, slug de la pagina: la-pagina-de-simon y metatags de la pagina: <meta name =description content = This is the description. It should be about 155 characters long.>)"
)
def test_buscar_pagina(self):
Pag2 = PaginaWeb("www.pedro.com","/https","XML","<contenido>Esta es la pagina de Pedro</contenido>","La pagina de pedro","La pagina de pedro","No se")
paginas = [Pag2]
sitio = SitioWeb(".net","informatica",paginas)
self.assertEqual(
sitio.buscador(Pag2),
None
)
def test_buscar_pagina_no_existe(self):
Pagdummy = PaginaWeb("x","x","x","x","x","x","x")
paginas = [Pagdummy]
sitio = SitioWeb(".net","informatica",paginas)
self.assertEqual(
sitio.buscador(Pagdummy),
None
)
if __name__ == "__main__":
unittest.main()
|
1650358
|
from __future__ import division
from collections import OrderedDict
from functools import partial
import gzip
import io
import os
import logging
import os.path
import h5py
import numpy
from picklable_itertools.extras import equizip
from progressbar import ProgressBar
from PIL import Image
from scipy.io.matlab import loadmat
from six.moves import zip, xrange
import zmq
from fuel.converters.base import check_exists
from fuel.datasets import H5PYDataset
from fuel.utils.formats import tar_open
from fuel.utils.parallel import producer_consumer
from fuel import config
log = logging.getLogger(__name__)
DEVKIT_ARCHIVE = 'ILSVRC2010_devkit-1.0.tar.gz'
DEVKIT_META_PATH = 'devkit-1.0/data/meta.mat'
DEVKIT_VALID_GROUNDTRUTH_PATH = ('devkit-1.0/data/'
'ILSVRC2010_validation_ground_truth.txt')
PATCH_IMAGES_TAR = 'patch_images.tar'
TEST_GROUNDTRUTH = 'ILSVRC2010_test_ground_truth.txt'
TRAIN_IMAGES_TAR = 'ILSVRC2010_images_train.tar'
VALID_IMAGES_TAR = 'ILSVRC2010_images_val.tar'
TEST_IMAGES_TAR = 'ILSVRC2010_images_test.tar'
IMAGE_TARS = (TRAIN_IMAGES_TAR, VALID_IMAGES_TAR, TEST_IMAGES_TAR,
PATCH_IMAGES_TAR)
PUBLIC_FILES = TEST_GROUNDTRUTH, DEVKIT_ARCHIVE
ALL_FILES = PUBLIC_FILES + IMAGE_TARS
@check_exists(required_files=ALL_FILES)
def convert_ilsvrc2010(directory, output_directory,
output_filename='ilsvrc2010.hdf5',
shuffle_seed=config.default_seed):
"""Converter for data from the ILSVRC 2010 competition.
Source files for this dataset can be obtained by registering at
[ILSVRC2010WEB].
Parameters
----------
input_directory : str
Path from which to read raw data files.
output_directory : str
Path to which to save the HDF5 file.
output_filename : str, optional
The output filename for the HDF5 file. Default: 'ilsvrc2010.hdf5'.
shuffle_seed : int or sequence, optional
Seed for a random number generator used to shuffle the order
of the training set on disk, so that sequential reads will not
be ordered by class.
.. [ILSVRC2010WEB] http://image-net.org/challenges/LSVRC/2010/index
"""
devkit_path = os.path.join(directory, DEVKIT_ARCHIVE)
test_groundtruth_path = os.path.join(directory, TEST_GROUNDTRUTH)
train, valid, test, patch = [os.path.join(directory, fn)
for fn in IMAGE_TARS]
n_train, valid_groundtruth, test_groundtruth, wnid_map = \
prepare_metadata(devkit_path, test_groundtruth_path)
n_valid, n_test = len(valid_groundtruth), len(test_groundtruth)
output_path = os.path.join(output_directory, output_filename)
with h5py.File(output_path, 'w') as f:
log.info('Creating HDF5 datasets...')
prepare_hdf5_file(f, n_train, n_valid, n_test)
log.info('Processing training set...')
process_train_set(f, train, patch, n_train, wnid_map, shuffle_seed)
log.info('Processing validation set...')
process_other_set(f, 'valid', valid, patch, valid_groundtruth, n_train)
log.info('Processing test set...')
process_other_set(f, 'test', test, patch, test_groundtruth,
n_train + n_valid)
log.info('Done.')
return (output_path,)
def fill_subparser(subparser):
"""Sets up a subparser to convert the ILSVRC2010 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2010` command.
"""
subparser.add_argument(
"--shuffle-seed", help="Seed to use for randomizing order of the "
"training set on disk.",
default=config.default_seed, type=int, required=False)
return convert_ilsvrc2010
def prepare_metadata(devkit_archive, test_groundtruth_path):
"""Extract dataset metadata required for HDF5 file setup.
Parameters
----------
devkit_archive : str or file-like object
The filename or file-handle for the gzipped TAR archive
containing the ILSVRC2010 development kit.
test_groundtruth_path : str or file-like object
The filename or file-handle for the text file containing
the ILSVRC2010 test set ground truth.
Returns
-------
n_train : int
The number of examples in the training set.
valid_groundtruth : ndarray, 1-dimensional
An ndarray containing the validation set groundtruth in terms of
0-based class indices.
test_groundtruth : ndarray, 1-dimensional
An ndarray containing the test groundtruth in terms of 0-based
class indices.
wnid_map : dict
A dictionary that maps WordNet IDs to 0-based class indices.
"""
# Read what's necessary from the development kit.
synsets, cost_matrix, raw_valid_groundtruth = read_devkit(devkit_archive)
# Mapping to take WordNet IDs to our internal 0-999 encoding.
wnid_map = dict(zip((s.decode('utf8') for s in synsets['WNID']),
xrange(1000)))
# Map the 'ILSVRC2010 ID' to our zero-based ID.
ilsvrc_id_to_zero_based = dict(zip(synsets['ILSVRC2010_ID'],
xrange(len(synsets))))
# Map the validation set groundtruth to 0-999 labels.
valid_groundtruth = [ilsvrc_id_to_zero_based[id_]
for id_ in raw_valid_groundtruth]
# Raw test data groundtruth, ILSVRC2010 IDs.
raw_test_groundtruth = numpy.loadtxt(test_groundtruth_path,
dtype=numpy.int16)
# Map the test set groundtruth to 0-999 labels.
test_groundtruth = [ilsvrc_id_to_zero_based[id_]
for id_ in raw_test_groundtruth]
# Ascertain the number of filenames to prepare appropriate sized
# arrays.
n_train = int(synsets['num_train_images'].sum())
log.info('Training set: {} images'.format(n_train))
log.info('Validation set: {} images'.format(len(valid_groundtruth)))
log.info('Test set: {} images'.format(len(test_groundtruth)))
n_total = n_train + len(valid_groundtruth) + len(test_groundtruth)
log.info('Total (train/valid/test): {} images'.format(n_total))
return n_train, valid_groundtruth, test_groundtruth, wnid_map
def create_splits(n_train, n_valid, n_test):
n_total = n_train + n_valid + n_test
tuples = {}
tuples['train'] = (0, n_train)
tuples['valid'] = (n_train, n_train + n_valid)
tuples['test'] = (n_train + n_valid, n_total)
sources = ['encoded_images', 'targets', 'filenames']
return OrderedDict(
(split, OrderedDict((source, tuples[split]) for source in sources))
for split in ('train', 'valid', 'test')
)
def prepare_hdf5_file(hdf5_file, n_train, n_valid, n_test):
"""Create datasets within a given HDF5 file.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write.
n_train : int
The number of training set examples.
n_valid : int
The number of validation set examples.
n_test : int
The number of test set examples.
"""
n_total = n_train + n_valid + n_test
splits = create_splits(n_train, n_valid, n_test)
hdf5_file.attrs['split'] = H5PYDataset.create_split_array(splits)
vlen_dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))
hdf5_file.create_dataset('encoded_images', shape=(n_total,),
dtype=vlen_dtype)
hdf5_file.create_dataset('targets', shape=(n_total, 1), dtype=numpy.int16)
hdf5_file.create_dataset('filenames', shape=(n_total, 1), dtype='S32')
def process_train_set(hdf5_file, train_archive, patch_archive, n_train,
wnid_map, shuffle_seed=None):
"""Process the ILSVRC2010 training set.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`n_train`.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
n_train : int
The number of items in the training set.
wnid_map : dict
A dictionary mapping WordNet IDs to class indices.
shuffle_seed : int or sequence, optional
Seed for a NumPy random number generator that permutes the
training set on disk. If `None`, no permutation is performed
(this is the default).
"""
producer = partial(train_set_producer, train_archive=train_archive,
patch_archive=patch_archive, wnid_map=wnid_map)
consumer = partial(image_consumer, hdf5_file=hdf5_file,
num_expected=n_train, shuffle_seed=shuffle_seed)
producer_consumer(producer, consumer)
def _write_to_hdf5(hdf5_file, index, image_filename, image_data,
class_index):
hdf5_file['filenames'][index] = image_filename.encode('ascii')
hdf5_file['encoded_images'][index] = image_data
hdf5_file['targets'][index] = class_index
def train_set_producer(socket, train_archive, patch_archive, wnid_map):
"""Load/send images from the training set TAR file or patch images.
Parameters
----------
socket : :class:`zmq.Socket`
PUSH socket on which to send loaded images.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
wnid_map : dict
A dictionary that maps WordNet IDs to 0-based class indices.
Used to decode the filenames of the inner TAR files.
"""
patch_images = extract_patch_images(patch_archive, 'train')
num_patched = 0
with tar_open(train_archive) as tar:
for inner_tar_info in tar:
with tar_open(tar.extractfile(inner_tar_info.name)) as inner:
wnid = inner_tar_info.name.split('.')[0]
class_index = wnid_map[wnid]
filenames = sorted(info.name for info in inner
if info.isfile())
images_gen = (load_from_tar_or_patch(inner, filename,
patch_images)
for filename in filenames)
pathless_filenames = (os.path.split(fn)[-1]
for fn in filenames)
stream = equizip(pathless_filenames, images_gen)
for image_fn, (image_data, patched) in stream:
if patched:
num_patched += 1
socket.send_pyobj((image_fn, class_index), zmq.SNDMORE)
socket.send(image_data)
if num_patched != len(patch_images):
raise ValueError('not all patch images were used')
def image_consumer(socket, hdf5_file, num_expected, shuffle_seed=None,
offset=0):
"""Fill an HDF5 file with incoming images from a socket.
Parameters
----------
socket : :class:`zmq.Socket`
PULL socket on which to receive images.
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`sum(images_per_class)`.
num_expected : int
The number of items we expect to be sent over the socket.
shuffle_seed : int or sequence, optional
Seed for a NumPy random number generator that permutes the
images on disk.
offset : int, optional
The offset in the HDF5 datasets at which to start writing
received examples. Defaults to 0.
"""
with ProgressBar(maxval=num_expected) as pb:
if shuffle_seed is None:
index_gen = iter(xrange(num_expected))
else:
rng = numpy.random.RandomState(shuffle_seed)
index_gen = iter(rng.permutation(num_expected))
for i, num in enumerate(index_gen):
image_filename, class_index = socket.recv_pyobj(zmq.SNDMORE)
image_data = numpy.fromstring(socket.recv(), dtype='uint8')
_write_to_hdf5(hdf5_file, num + offset, image_filename,
image_data, class_index)
pb.update(i + 1)
def process_other_set(hdf5_file, which_set, image_archive, patch_archive,
groundtruth, offset):
"""Process the validation or test set.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`sum(images_per_class)`.
which_set : str
Which set of images is being processed. One of 'train', 'valid',
'test'. Used for extracting the appropriate images from the patch
archive.
image_archive : str or file-like object
The filename or file-handle for the TAR archive containing images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
groundtruth : iterable
Iterable container containing scalar 0-based class index for each
image, sorted by filename.
offset : int
The offset in the HDF5 datasets at which to start writing.
"""
producer = partial(other_set_producer, image_archive=image_archive,
patch_archive=patch_archive,
groundtruth=groundtruth, which_set=which_set)
consumer = partial(image_consumer, hdf5_file=hdf5_file,
num_expected=len(groundtruth), offset=offset)
producer_consumer(producer, consumer)
def other_set_producer(socket, which_set, image_archive, patch_archive,
groundtruth):
"""Push image files read from the valid/test set TAR to a socket.
Parameters
----------
socket : :class:`zmq.Socket`
PUSH socket on which to send images.
which_set : str
Which set of images is being processed. One of 'train', 'valid',
'test'. Used for extracting the appropriate images from the patch
archive.
image_archive : str or file-like object
The filename or file-handle for the TAR archive containing images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
groundtruth : iterable
Iterable container containing scalar 0-based class index for each
image, sorted by filename.
"""
patch_images = extract_patch_images(patch_archive, which_set)
num_patched = 0
with tar_open(image_archive) as tar:
filenames = sorted(info.name for info in tar if info.isfile())
images = (load_from_tar_or_patch(tar, filename, patch_images)
for filename in filenames)
pathless_filenames = (os.path.split(fn)[-1] for fn in filenames)
image_iterator = equizip(images, pathless_filenames, groundtruth)
for (image_data, patched), filename, class_index in image_iterator:
if patched:
num_patched += 1
socket.send_pyobj((filename, class_index), zmq.SNDMORE)
socket.send(image_data, copy=False)
if num_patched != len(patch_images):
raise Exception
def load_from_tar_or_patch(tar, image_filename, patch_images):
"""Do everything necessary to process an image inside a TAR.
Parameters
----------
tar : `TarFile` instance
The tar from which to read `image_filename`.
image_filename : str
Fully-qualified path inside of `tar` from which to read an
image file.
patch_images : dict
A dictionary containing filenames (without path) of replacements
to be substituted in place of the version of the same file found
in `tar`.
Returns
-------
image_data : bytes
The JPEG bytes representing either the image from the TAR archive
or its replacement from the patch dictionary.
patched : bool
True if the image was retrieved from the patch dictionary. False
if it was retrieved from the TAR file.
"""
patched = True
image_bytes = patch_images.get(os.path.basename(image_filename), None)
if image_bytes is None:
patched = False
try:
image_bytes = tar.extractfile(image_filename).read()
numpy.array(Image.open(io.BytesIO(image_bytes)))
except (IOError, OSError):
with gzip.GzipFile(fileobj=tar.extractfile(image_filename)) as gz:
image_bytes = gz.read()
numpy.array(Image.open(io.BytesIO(image_bytes)))
return image_bytes, patched
def read_devkit(f):
"""Read relevant information from the development kit archive.
Parameters
----------
f : str or file-like object
The filename or file-handle for the gzipped TAR archive
containing the ILSVRC2010 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
See :func:`read_metadata_mat_file` for details.
cost_matrix : ndarray, 2-dimensional, uint8
See :func:`read_metadata_mat_file` for details.
raw_valid_groundtruth : ndarray, 1-dimensional, int16
The labels for the ILSVRC2010 validation set,
distributed with the development kit code.
"""
with tar_open(f) as tar:
# Metadata table containing class hierarchy, textual descriptions, etc.
meta_mat = tar.extractfile(DEVKIT_META_PATH)
synsets, cost_matrix = read_metadata_mat_file(meta_mat)
# Raw validation data groundtruth, ILSVRC2010 IDs. Confusingly
# distributed inside the development kit archive.
raw_valid_groundtruth = numpy.loadtxt(tar.extractfile(
DEVKIT_VALID_GROUNDTRUTH_PATH), dtype=numpy.int16)
return synsets, cost_matrix, raw_valid_groundtruth
def read_metadata_mat_file(meta_mat):
"""Read ILSVRC2010 metadata from the distributed MAT file.
Parameters
----------
meta_mat : str or file-like object
The filename or file-handle for `meta.mat` from the
ILSVRC2010 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
A table containing ILSVRC2010 metadata for the "synonym sets"
or "synsets" that comprise the classes and superclasses,
including the following fields:
* `ILSVRC2010_ID`: the integer ID used in the original
competition data.
* `WNID`: A string identifier that uniquely identifies
a synset in ImageNet and WordNet.
* `wordnet_height`: The length of the longest path to
a leaf node in the FULL ImageNet/WordNet hierarchy
(leaf nodes in the FULL ImageNet/WordNet hierarchy
have `wordnet_height` 0).
* `gloss`: A string representation of an English
textual description of the concept represented by
this synset.
* `num_children`: The number of children in the hierarchy
for this synset.
* `words`: A string representation, comma separated,
of different synoym words or phrases for the concept
represented by this synset.
* `children`: A vector of `ILSVRC2010_ID`s of children
of this synset, padded with -1. Note that these refer
to `ILSVRC2010_ID`s from the original data and *not*
the zero-based index in the table.
* `num_train_images`: The number of training images for
this synset.
cost_matrix : ndarray, 2-dimensional, uint8
A 1000x1000 matrix containing the precomputed pairwise
cost (based on distance in the hierarchy) for all
low-level synsets (i.e. the thousand possible output
classes with training data associated).
"""
mat = loadmat(meta_mat, squeeze_me=True)
synsets = mat['synsets']
cost_matrix = mat['cost_matrix']
new_dtype = numpy.dtype([
('ILSVRC2010_ID', numpy.int16),
('WNID', ('S', max(map(len, synsets['WNID'])))),
('wordnet_height', numpy.int8),
('gloss', ('S', max(map(len, synsets['gloss'])))),
('num_children', numpy.int8),
('words', ('S', max(map(len, synsets['words'])))),
('children', (numpy.int8, max(synsets['num_children']))),
('num_train_images', numpy.uint16)
])
new_synsets = numpy.empty(synsets.shape, dtype=new_dtype)
for attr in ['ILSVRC2010_ID', 'WNID', 'wordnet_height', 'gloss',
'num_children', 'words', 'num_train_images']:
new_synsets[attr] = synsets[attr]
children = [numpy.atleast_1d(ch) for ch in synsets['children']]
padded_children = [
numpy.concatenate((c,
-numpy.ones(new_dtype['children'].shape[0] - len(c),
dtype=numpy.int16)))
for c in children
]
new_synsets['children'] = padded_children
return new_synsets, cost_matrix
def extract_patch_images(f, which_set):
"""Extracts a dict of the "patch images" for ILSVRC2010.
Parameters
----------
f : str or file-like object
The filename or file-handle to the patch images TAR file.
which_set : str
Which set of images to extract. One of 'train', 'valid', 'test'.
Returns
-------
dict
A dictionary contains a mapping of filenames (without path) to a
bytes object containing the replacement image.
Notes
-----
Certain images in the distributed archives are blank, or display
an "image not available" banner. A separate TAR file of
"patch images" is distributed with the corrected versions of
these. It is this archive that this function is intended to read.
"""
if which_set not in ('train', 'valid', 'test'):
raise ValueError('which_set must be one of train, valid, or test')
which_set = 'val' if which_set == 'valid' else which_set
patch_images = {}
with tar_open(f) as tar:
for info_obj in tar:
if not info_obj.name.endswith('.JPEG'):
continue
# Pretty sure that '/' is used for tarfile regardless of
# os.path.sep, but I officially don't care about Windows.
tokens = info_obj.name.split('/')
file_which_set = tokens[-2]
if file_which_set != which_set:
continue
filename = tokens[-1]
patch_images[filename] = tar.extractfile(info_obj.name).read()
return patch_images
|
1650372
|
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
@login_required
def index(request):
"""
总览
:param request:
:return:
"""
return HttpResponse("This is loonflow's api server, please view frontend page, reference: http://loonflow.readthedocs.io/")
|
1650397
|
import inspect
import locale
import markdown as md
import os
import re
from django import template
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.utils.translation import gettext as _
from django.template.defaultfilters import stringfilter
from django_form_builder import dynamic_fields
from uni_ticket.models import (Ticket,
TicketAssignment,
TicketCategory,
TicketCategoryCondition,
TicketCategoryModule)
from uni_ticket import settings as uni_ticket_settings
from uni_ticket.utils import (download_file,
format_slugged_name,
get_user_type,
office_can_be_deleted,
user_is_in_default_office,
user_is_operator,
user_manage_something)
register = template.Library()
@register.simple_tag
def not_a_simple_user(user):
return user_manage_something(user)
@register.filter
def filename(value):
if os.path.exists(value.path):
return os.path.basename(value.file.name)
return _("Risorsa non più disponibile")
@register.filter
def no_slugged(value):
return format_slugged_name(value)
@register.simple_tag
def year_list():
return range(2019, timezone.localdate().year+1)
@register.simple_tag
def categories_list(structure, user):
if user_is_in_default_office(user, structure):
return TicketCategory.objects.filter(organizational_structure=structure)
categories = []
employee_offices = user_is_operator(user, structure)
for eo in employee_offices:
office_categories = TicketCategory.objects.filter(organizational_office=eo.office)
for oc in office_categories:
categories.append(oc)
return categories
@register.simple_tag
def current_date():
return timezone.localtime().date()
@register.simple_tag
def conditions_in_category(category):
conditions = TicketCategoryCondition.objects.filter(category=category,
is_active=True).count()
return conditions
@register.simple_tag
def tasks_in_category(category):
tasks = category.get_tasks().count()
return tasks
@register.simple_tag
def simple_user_context_name():
return uni_ticket_settings.CONTEXT_SIMPLE_USER
@register.simple_tag
def get_usertype(user, structure, label_value_tuple=False):
label = get_user_type(user, structure)
value = uni_ticket_settings.MANAGEMENT_URL_PREFIX[label]
if label_value_tuple: return (label, value)
return value
@register.simple_tag
def get_label_from_form(form, field_name):
field = form.fields.get(field_name)
if field:
return (field.label, getattr(field, 'pre_text', False))
# return False
# formset (we need the parent field label)
formset_field_name_parts = field_name.rsplit("-0-", 1)
# parent formset field
field = form.fields.get(formset_field_name_parts[0])
if field:
# toDo: better reference to django_form_builder for regex and methods
_regexp = '(?P<colname>[a-zA-Z0-9_ ]*)'
content = re.search(_regexp, field.choices[0]) if field.choices else False
###
if content and content.groupdict()['colname'] == formset_field_name_parts[1]:
# get formset field pre_text
return (False, getattr(field, 'pre_text', False))
return False
@register.filter
def get_dyn_field_name(value):
for m in inspect.getmembers(dynamic_fields, inspect.isclass):
if m[0]==value: return getattr(m[1], 'field_type')
return value
@register.simple_tag
def get_unread_messages(ticket, by_operator=True):
return ticket.get_messages_count(by_operator)[1]
@register.simple_tag
def user_from_pk(user_id):
if not user_id: return False
user_model = get_user_model()
user = user_model.objects.get(pk=user_id)
if not user: return False
return user
@register.simple_tag
def user_operator_chat(user, structure):
return user_is_in_default_office(user, structure)
@register.simple_tag
def settings_value(name, **kwargs):
value = getattr(settings, name, None) or getattr(uni_ticket_settings, name, None)
if value and kwargs: return value.format(**kwargs)
return value
@register.simple_tag
def obj_get_attr(obj, attr, **kwargs):
return getattr(obj, attr, None)
@register.filter()
@stringfilter
def markdown(value):
return md.markdown(value, extensions=['markdown.extensions.fenced_code'])
@register.simple_tag
def ticket_has_been_taken(ticket, user=None, structure=None, exclude_readonly=False):
return ticket.has_been_taken(user=user,
structure=structure,
exclude_readonly=exclude_readonly)
@register.simple_tag
def ticket_is_open(ticket, user=None):
return ticket.is_open(user)
@register.simple_tag
def app_is_installed(name):
return name in settings.INSTALLED_APPS
|
1650436
|
import os, sys
import numpy as np
import torch
# Pre-defined collaters
class RayBatchCollater:
def __init__(self):
pass
def __call__(self, xs):
batch_rays = torch.stack([torch.as_tensor(x['rays']) for x in xs], 0)
batch_rays = torch.transpose(batch_rays, 0, 1)
batch_rgbs = None
if 'target_s' in xs[0]:
batch_rgbs = torch.stack([torch.as_tensor(x['target_s']) for x in xs], 0)
batch_cam_ids = None
if 'cam_id' in xs[0]:
batch_cam_ids = torch.stack([torch.as_tensor(x['cam_id']) for x in xs], 0)
return batch_rays, batch_rgbs, batch_cam_ids
return batch_rays, batch_rgbs
class ViewBatchCollater:
def __init__(self):
pass
def __call__(self, xs):
batch_rays = torch.cat([torch.as_tensor(x['rays']) for x in xs], 0)
batch_rays = torch.transpose(batch_rays, 0, 1)
# When under exhibit mode, no groundtruth will be given
batch_rgbs = None
if 'target_s' in xs[0]:
batch_rgbs = torch.cat([torch.as_tensor(x['target_s']) for x in xs], 0)
batch_cam_ids = None
if 'cam_id' in xs[0]:
batch_cam_ids = torch.cat([
torch.full((batch_rays.shape[0],), x['cam_id'], dtype=torch.int64) for x in xs
], 0)
return batch_rays, batch_rgbs, batch_cam_ids
return batch_rays, batch_rgbs
class ExhibitCollater:
def __init__(self, H, W):
self.H, self.W = H, W
def __call__(self, xs):
batch_rays = torch.stack([torch.as_tensor(x['rays']) for x in xs], 0)
batch_rays = torch.transpose(batch_rays, 0, 1)
batch_rays = batch_rays.reshape((batch_rays.shape[0], self.H, self.W, batch_rays.shape[-1]))
# When under exhibit mode, no groundtruth will be given
batch_rgbs = None
if 'target_s' in xs[0]:
batch_rgbs = torch.stack([torch.as_tensor(x['target_s']) for x in xs], 0)
batch_rgbs = batch_rgbs.reshape((self.H, self.W, batch_rgbs.shape[-1]))
return batch_rays, batch_rgbs
class PointBatchCollater:
def __init__(self):
pass
def __call__(self, xs):
batch_pts = torch.stack([torch.as_tensor(x['pts']) for x in xs], 0)
batch_rgbs = torch.stack([torch.as_tensor(x['target_s']) for x in xs], 0)
# batch_rgbs = F.relu(batch_rgbs)
return batch_pts, batch_rgbs
|
1650443
|
from .classification_metrics import expected_calibration_error, area_under_risk_rejection_rate_curve, \
compute_classification_metrics, entropy_based_uncertainty_decomposition, multiclass_brier_score
from .regression_metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, \
plot_uncertainty_by_feature, plot_picp_by_feature
from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
|
1650464
|
from TLV import *
from bson.binary import Binary
RFMT_IPV4 = 1 # Match IPv4 Destination
RFMT_IPV6 = 2 # Match IPv6 Destination
RFMT_ETHERNET = 3 # Match Ethernet Destination
RFMT_MPLS = 4 # Match MPLS label_in
RFMT_ETHERTYPE = 5 # Match Ethernet type
RFMT_NW_PROTO = 6 # Match Network Protocol
RFMT_TP_SRC = 7 # Match Transport Layer Src Port
RFMT_TP_DST = 8 # Match Transport Layer Dest Port
# MSB = 1; Indicates optional feature.
RFMT_IN_PORT = 254 # Match incoming port (Unimplemented)
RFMT_VLAN = 255 # Match incoming VLAN (Unimplemented)
typeStrings = {
RFMT_IPV4 : "RFMT_IPV4",
RFMT_IPV6 : "RFMT_IPV6",
RFMT_ETHERNET : "RFMT_ETHERNET",
RFMT_MPLS : "RFMT_MPLS",
RFMT_ETHERTYPE : "RFMT_ETHERTYPE",
RFMT_NW_PROTO : "RFMT_NW_PROTO",
RFMT_TP_SRC : "RFMT_TP_SRC",
RFMT_TP_DST : "RFMT_TP_DST"
}
class Match(TLV):
def __init__(self, matchType=None, value=None):
super(Match, self).__init__(matchType, self.type_to_bin(matchType, value))
def __str__(self):
return "%s : %s" % (self.type_to_str(self._type), self.get_value())
@classmethod
def IPV4(cls, address, netmask):
return cls(RFMT_IPV4, (address, netmask))
@classmethod
def IPV6(cls, address, netmask):
return cls(RFMT_IPV6, (address, netmask))
@classmethod
def ETHERNET(cls, ethernet_dst):
return cls(RFMT_ETHERNET, ethernet_dst)
@classmethod
def MPLS(cls, label):
return cls(RFMT_MPLS, label)
@classmethod
def IN_PORT(cls, port):
return cls(RFMT_IN_PORT, port)
@classmethod
def VLAN(cls, tag):
return cls(RFMT_VLAN, tag)
@classmethod
def ETHERTYPE(cls, ethertype):
return cls(RFMT_ETHERTYPE, ethertype)
@classmethod
def NW_PROTO(cls, nwproto):
return cls(RFMT_NW_PROTO, nwproto)
@classmethod
def TP_SRC(cls, port):
return cls(RFMT_TP_SRC, port)
@classmethod
def TP_DST(cls, port):
return cls(RFMT_TP_DST, port)
@classmethod
def from_dict(cls, dic):
ma = cls()
ma._type = dic['type']
ma._value = dic['value']
return ma
@staticmethod
def type_to_bin(matchType, value):
if matchType == RFMT_IPV4:
return inet_pton(AF_INET, value[0]) + inet_pton(AF_INET, value[1])
elif matchType == RFMT_IPV6:
return inet_pton(AF_INET6, value[0]) + inet_pton(AF_INET6, value[1])
elif matchType == RFMT_ETHERNET:
return ether_to_bin(value)
elif matchType in (RFMT_MPLS, RFMT_IN_PORT):
return int_to_bin(value, 32)
elif matchType in (RFMT_VLAN, RFMT_ETHERTYPE, RFMT_TP_SRC, RFMT_TP_DST):
return int_to_bin(value, 16)
elif matchType == RFMT_NW_PROTO:
return int_to_bin(value, 8)
else:
return None
@staticmethod
def type_to_str(matchType):
if matchType in typeStrings:
return typeStrings[matchType]
else:
return str(matchType)
def get_value(self):
if self._type == RFMT_IPV4:
return (inet_ntop(AF_INET, self._value[:4]), inet_ntop(AF_INET, self._value[4:]))
elif self._type == RFMT_IPV6:
return (inet_ntop(AF_INET6, self._value[:16]), inet_ntop(AF_INET6, self._value[16:]))
elif self._type == RFMT_ETHERNET:
return bin_to_ether(self._value)
elif self._type in (RFMT_MPLS, RFMT_IN_PORT, RFMT_VLAN, RFMT_ETHERTYPE,
RFMT_NW_PROTO, RFMT_TP_SRC, RFMT_TP_DST):
return bin_to_int(self._value)
else:
return None
def set_value(value):
_value = Binary(self.type_to_bin(self._type, value), 0)
|
1650590
|
from hoplite.utils import server_logging
import os
import sys
import platform
from tests.api import HopliteApiTestCase
import tempfile
import shutil
import pip
import pkg_resources
logger = server_logging.get_server_logger(__name__)
class RootSiteTestCase(HopliteApiTestCase):
def setUp(self):
super(RootSiteTestCase, self).setUp()
'''
This test is failing intermittently. I think it has to do with how exactly things are installed into the
virtual environment and when/how the packages are reloaded.
def test_reload_site_packages(self):
opsys = platform.system().lower()
python_exe_path = sys.executable
bin_path = os.path.split(python_exe_path)[0]
venv_path = bin_path
site_path = ""
if opsys == 'windows':
if hasattr(sys, 'real_prefix'): # If running in virtual environment, we must account for different folder structure
site_path = os.path.join(venv_path, "..", "Lib", "site-packages")
else:
site_path = os.path.join(venv_path, "Lib", "site-packages")
elif opsys == 'linux' or opsys == 'darwin':
if hasattr(sys, 'real_prefix'): # If running in virtual environment, we must account for different folder structure
site_path = os.path.join(venv_path, "..", "lib", "python{0}.{1}".format(sys.version_info[0], sys.version_info[1]),
"site-packages")
else:
site_path = os.path.join(venv_path, "lib", "python{0}.{1}".format(sys.version_info[0], sys.version_info[1]),
"site-packages")
pth_file_path = os.path.join(site_path, "test.pth")
try:
pth_file = open(pth_file_path, 'w+')
pth_file.write(os.path.abspath("./tests"))
pth_file.close()
r = self.client.put('/reload')
self.assertOk(r)
except Exception, e:
os.remove(pth_file_path)
raise e
# This will raise if the path is not in sys.path
sys.path.remove(os.path.abspath("./tests"))
'''
def test_loads_new_entry_points(self):
setup_str = "from setuptools import setup, find_packages;setup(name='TestEntryPointPackage', version='0.1', packages=find_packages(), entry_points={'test.entry': ['job1=tepp.myjob']})"
tempdir = tempfile.mkdtemp()
try:
setup_py = open(os.path.join(tempdir, "setup.py"), 'w')
setup_py.write(setup_str)
setup_py.close()
package_path = os.path.join(tempdir, "tepp")
os.mkdir(package_path)
init_file = open(os.path.join(package_path, "__init__.py"), 'w')
init_file.close()
myjob = open(os.path.join(package_path, "myjob.py"), 'w')
myjob.close()
pip.main(['install', tempdir])
r = self.client.put('/reload')
self.assertOk(r)
count = 0
for entry_point in pkg_resources.iter_entry_points(group="test.entry"):
self.assertEqual(entry_point.name, "job1")
count += 1
if count > 1:
self.fail("test.entry entry point has more than one definition")
finally:
pip.main(['uninstall', '-y', "TestEntryPointPackage"])
shutil.rmtree(tempdir)
|
1650615
|
from django.contrib import admin
from libya_elections.admin_site import admin_site
from .models import Station
from .utils import GENDER_NAMES
def gender(station):
return GENDER_NAMES[station.gender]
class StationAdmin(admin.ModelAdmin):
list_display = ['election', 'center', 'number', gender, 'n_registrants', ]
search_fields = ['election__id', 'center__center_id', ]
# Station records are not editable.
readonly_fields = [field.name for field in Station._meta.local_fields]
# Station records are only created programmatically via rollgen. No one is allowed to create
# them via the admin.
def has_add_permission(self, request):
return False
admin_site.register(Station, StationAdmin)
|
1650626
|
import bigflow.build.reflect
spec = bigflow.build.reflect.get_project_spec()
if spec.name != 'bf-selfbuild-project':
raise AssertionError(spec.name)
|
1650629
|
import re
from .errors import FieldLookupError, BadValueError
FORBIDDEN_VALUE_REGEX = re.compile(ur'([^_.@ \w-]+)', re.UNICODE)
class GeoQueryArguments(object):
def __init__(self, lat, lon, radius):
self.lat, self.lon, self.radius = lat, lon, radius
class FilterExpr(object):
# Default separator between field name and lookup type in the left hand
# side of the filter expression
SEPARATOR = '__'
# If the parsed comparision operator type from the left hand side of the
# filter expression is invalid, fall back to this
DEFAULT_OP = 'exact'
# Only these values are valid after `SEPARATOR` in property names. Each
# comparison operator string is mapped to its equivalent query syntax
# template
OPS = {
'contains': u'%s:(%s)',
'exact': u'%s:"%s"',
'lt': u'%s < %s',
'lte': u'%s <= %s',
'gt': u'%s > %s',
'gte': u'%s >= %s',
'geo': u'distance(%s, geopoint(%f, %f)) < %d',
'geo_lt': u'distance(%s, geopoint(%f, %f)) < %d',
'geo_lte': u'distance(%s, geopoint(%f, %f)) <= %d',
'geo_gt': u'distance(%s, geopoint(%f, %f)) > %d',
'geo_gte': u'distance(%s, geopoint(%f, %f)) >= %d'
}
def __init__(self, k, v, valid_ops=None):
self.prop_expr, self.value = k, v
self.prop_name, self.op = self._split_filter(self.prop_expr)
self.valid_ops = valid_ops or self.OPS
def __str__(self):
"""`str()`ing a `FilterExpr` returns the string for this filter
formatted in the search API syntax.
"""
prop_expr, op = self._split_filter(self.prop_expr)
template = self.OPS[op]
return template % (prop_expr, self.value)
def get_value(self):
return self.__unicode__()
def __unicode__(self):
template = self.OPS[self.op]
if self.op.startswith('geo'):
if not isinstance(self.value, GeoQueryArguments):
raise TypeError(self.value)
return template % (
self.prop_name,
self.value.lat,
self.value.lon,
self.value.radius
)
return template % (self.prop_name, self.value)
def __debug(self):
"""Enable debugging features"""
# This is handy for testing: see Q.__debug for why
self.OPS['is'] = ('%s == %s')
def __undebug(self):
if 'is' in self.OPS:
del self.OPS['is']
def _split_filter(self, prop_expr):
"""Splits `prop_expr` by `self.SEPARATOR` and returns the parts,
with the comparison operator defaulting to a sensible value if it's not
in `self.OPS` or if it's missing.
>>> prop_expr = 'rating__lte'
>>> self._split_filter(prop_expr)
['rating', 'lte']
>>> prop_expr = 'rating'
>>> self._split_filter(prop_expr)
['rating', self.DEFAULT_OP]
"""
op_name = 'exact'
if self.SEPARATOR in prop_expr:
prop_name, op_name = prop_expr.split(self.SEPARATOR)
if op_name not in self.OPS:
# XXX: raise an error here?
op_name = self.DEFAULT_OP
else:
prop_name = prop_expr
return [prop_name, op_name]
class Q(object):
AND = u'AND'
OR = u'OR'
NOT = u'NOT'
DEFAULT = AND
def __init__(self, **kwargs):
self.kwargs = kwargs
children = kwargs.items()
self.children = []
for k, v in children:
try:
v_is_list = bool(iter(v)) and not issubclass(type(v), basestring)
except TypeError:
v_is_list = False
if v_is_list:
q = Q(**{k:v[0]})
for value in v[1:]:
q |= Q(**{k:value})
self.children.append(q)
else:
self.children.append((k, v))
self.conn = self.DEFAULT
self.inverted = False
def __and__(self, other):
return self._combine(other, self.AND)
def __or__(self, other):
return self._combine(other, self.OR)
def __invert__(self):
obj = type(self)(**self.kwargs)
obj.inverted = not self.inverted
return obj
def __str__(self):
"""Recursively stringify this expression and its children."""
tmpl = u'(%s)'
if self.inverted:
tmpl = "({0} {{0}})".format(self.NOT)
else:
tmpl = u'({0})'
conn_fmt = u' {0} '.format(self.conn)
joined_nodes = conn_fmt.join([str(c) for c in self.children])
return tmpl.format(joined_nodes)
def __debug(self):
"""Enable debugging features. Handy for testing with stuff like:
>>> q = Q(True__is=True) | Q(True__is=False) & Q(False__is=False)
>>> q._Q__debug()
>>> str(q)
"(True == True) or ((True == False) and (False == False))"
>>> eval(_) == True
True
"""
for c in self.children:
getattr(c, '_%s__debug' % c.__class__.__name__)()
map(unicode.lower, [self.NOT, self.AND, self.OR, self.conn])
def __undebug(self):
"""Undo `__debug()`"""
for c in self.children:
getattr(c, '_%s__undebug' % c.__class__.__name__)()
map(unicode.upper, [self.NOT, self.AND, self.OR, self.conn])
def _combine(self, other, conn):
"""Return a new Q object with `self` and `other` as children joined
by `conn`.
"""
obj = type(self)()
obj.add(self)
obj.add(other)
obj.conn = conn
return obj
def add(self, child):
self.children.append(child)
def get_filters(self):
filters = []
for q in self.children:
if type(q) == Q:
filters.extend(q.get_filters())
else:
filters.append(q)
return filters
class Query(object):
"""Represents a search API query language string.
>>> query = Query()
>>> query.add_keywords('hello I am things')
>>> query.add_q(Q(things__lte=3))
>>> unicode(q)
'hello I am things AND (things <= 3)'
"""
AND = 'AND'
OR = 'OR'
NOT = 'NOT'
def __init__(self, document_class):
self.document_class = document_class
self._gathered_q = None
self._keywords = []
def __str__(self):
return self.__unicode__()
def __unicode__(self):
"""This is how we get to the actual underlying querystring"""
return unicode(self.build_query()).encode('utf-8')
def _clean(self, value):
"""Remove any punctuation that might break the search API's lexer
(e.g. '^') from the given value.
"""
return FORBIDDEN_VALUE_REGEX.sub('', value)
def _clone(self):
new_q = type(self)(
self.document_class
)
new_q._gathered_q = self._gathered_q
new_q._keywords = self._keywords
return new_q
def add_q(self, q, conn=None):
"""Add a `Q` object to the internal reduction of gathered Qs,
effectively adding a filter clause to the querystring.
"""
if self._gathered_q is None:
self._gathered_q = q
return self
if not conn:
conn = self._gathered_q.DEFAULT
conn = conn.lower()
self._gathered_q = getattr(self._gathered_q, '__%s__' % conn)(q)
return self
def add_keywords(self, keywords):
"""Add keywords to the querystring"""
self._keywords.append(keywords)
return self
def get_filters(self):
if self._gathered_q:
return self._gathered_q.get_filters()
return []
def get_keywords(self):
return self._keywords
def unparse_filter(self, child):
"""Unparse a `Q` object or tuple of the form `(field_lookup, value)`
into the filters it represents. E.g.:
>>> q = Q(title__contains="die hard") & Q(rating__gte=7)
>>> query = Query(FilmDocument)
>>> query.unparse(q)
"((title:'die hard') AND (rating >= 7))"
"""
# If we have a `Q` object, recursively unparse its children
if isinstance(child, Q):
tmpl = u'(%s)'
if child.inverted:
tmpl = u'%s (%s)' % (child.NOT, '%s')
conn = u' %s ' % child.conn
return tmpl % (
conn.join([self.unparse_filter(c) for c in child.children])
)
if child is None:
return None
# `child` is a tuple of the form `(field__lookup, value)`
# TODO: Move this checking to SearchQuery.filter
filter_lookup, value = child
expr = FilterExpr(*child)
# Get the field name to lookup without any comparison operators that
# might be present in the field name string
doc_fields = self.document_class._meta.fields
# Can't filter on fields not in the document's fields
if expr.prop_name not in doc_fields:
raise FieldLookupError(u'Prop name %s not in the field list for %s'
% (expr.prop_name, self.document_class.__name__))
field = doc_fields[expr.prop_name]
try:
value = field.prep_value_for_filter(value, filter_expr=expr)
except (TypeError, ValueError):
raise BadValueError(
u'Value %s invalid for filtering on %s.%s (a %s)' % (
value,
self.document_class.__name__,
expr.prop_name,
type(field))
)
# Create a new filter expression with the old filter lookup but with
# the newly converted value
return unicode(FilterExpr(filter_lookup, value).get_value())
def build_filters(self):
"""Get the search API querystring representation for all gathered
filters so far, ready for passing to the search API.
"""
return self.unparse_filter(self._gathered_q)
def build_keywords(self):
"""Get the search API querystring representation for the currently
gathered keywords.
"""
if self._keywords:
return self._clean(u' '.join(self._keywords))
def build_query(self):
"""Build the full querystring"""
filters = self.build_filters()
keywords = self.build_keywords()
if filters and keywords:
return u'%s %s %s' % (keywords, self.AND, filters)
if filters:
return filters
if keywords:
return keywords
return u''
|
1650631
|
def plot():
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
x, y = np.meshgrid(np.linspace(0, 1), np.linspace(0, 1))
z = x**2 - y**2
fig = plt.figure()
plt.pcolormesh(x, y, z, cmap=cm.viridis, shading="gouraud")
# plt.colorbar()
return fig
def test():
from .helpers import assert_equality
# test relative data path
assert_equality(
plot,
__file__[:-3] + "_reference.tex",
# tex_relative_path_to_data="data/files"
)
|
1650640
|
from __future__ import annotations
from datetime import date, datetime
from pyproject_fmt import __version__
company, name = "tox-dev", "pyproject-fmt"
release, version = __version__, ".".join(__version__.split(".")[:2])
copyright = f"2022-{date.today().year}, {company}"
master_doc, source_suffix = "index", ".rst"
html_theme = "furo"
html_title, html_last_updated_fmt = name, datetime.now().isoformat()
pygments_style, pygments_dark_style = "sphinx", "monokai"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx_argparse_cli",
"sphinx_autodoc_typehints",
"sphinx_copybutton",
]
exclude_patterns = ["_build", "changelog/*", "_draft.rst"]
autoclass_content, autodoc_member_order, autodoc_typehints = "class", "bysource", "none"
autodoc_default_options = {"member-order": "bysource", "undoc-members": True, "show-inheritance": True}
extlinks = {
"issue": (f"https://github.com/{company}/{name}/issues/%s", "#"),
"user": ("https://github.com/%s", "@"),
"gh": ("https://github.com/%s", ""),
}
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
nitpicky = True
nitpick_ignore = []
|
1650740
|
from __future__ import unicode_literals
from rest_framework import serializers
from rest_framework.response import Response
from rest_framework.decorators import detail_route
from django.db.models import Q
from onadata.apps.fieldsight.models import Site
# from onadata.apps.main.models.meta_data import MetaData
from onadata.apps.api.viewsets.xform_list_api import XFormListApi
from onadata.apps.fsforms.models import FieldSightXF
from onadata.apps.fsforms.serializers.FieldSightXFormSerializer import FSXFormListSerializer
from onadata.apps.fsforms.serializers.FieldSightXformManifestSerializer import FSXFormManifestSerializer
class AssignedXFormListApi(XFormListApi):
serializer_class = FSXFormListSerializer
queryset = FieldSightXF.objects.all()
template_name = 'fsforms/assignedFormList.xml'
def filter_queryset(self, queryset):
if self.request.user.is_anonymous():
self.permission_denied(self.request)
site_id = self.kwargs.get('site_id', None)
queryset = queryset.filter(site__id=site_id, site__isnull=False, is_deployed=True)
return queryset
@detail_route(methods=['GET'])
def manifest(self, request, *args, **kwargs):
if kwargs.get('site_id') == '0':
self.object = FieldSightXF.objects.get(pk=kwargs.get('pk'))
else:
self.object = self.get_object()
object_list = []
context = self.get_serializer_context()
serializer = FSXFormManifestSerializer(object_list, many=True,
context=context)
return Response(serializer.data, headers=self.get_openrosa_headers())
def list(self, request, *args, **kwargs):
self.object_list = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data, headers=self.get_openrosa_headers())
def project_forms(self, request, *args, **kwargs):
self.object_list = self.queryset.filter(Q(project__id=kwargs.get('project_id'), site__isnull=True,
is_deleted=False, is_deployed=True) |
Q(project__id=kwargs.get('project_id'), site__isnull=True,
is_survey=True, is_deleted=False, is_deployed=True))
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data, headers=self.get_openrosa_headers())
def site_overide_forms(self, request, *args, **kwargs):
self.object_list = self.queryset.filter(Q(site__project_id=kwargs.get('project_id'),
fsform__isnull=True, project__isnull=True,
is_deployed=True, is_deleted=False) |
Q(site__project_id=kwargs.get('project_id'),
from_project=False, is_deployed=True, is_deleted=False))
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data, headers=self.get_openrosa_headers())
|
1650741
|
from models.problem import Problem
from models.submission import Submission
import typing
from main import background_task_queue, db, config, permission_manager
from main import web_app as app
from common.permission import require_permission
from common.utils import unpack_argument
from flask_sqlalchemy import BaseQuery
from flask import session
from utils import make_response
from models.user import User
import sqlalchemy.sql.expression as expr
from sqlalchemy.sql import func
import flask
@app.route("/api/card/problem", methods=["POST"])
@unpack_argument
def api_card_problem(problemID: int):
"""
获取题目卡片数据
{
"id":"题目ID",
"title":"题目名",
"acceptedCount":"AC数",
"submitCount":"总提交数",
"myStatus":{ //如果不存在则为空
"score":"我的分数",
"fullScore":"题目满分",
"status":"提交状态",
"submissionID":"提交ID"
}
}
"""
problem: Problem = db.session.query(
Problem.id,
Problem.title,
Problem.cached_accepted_count,
Problem.cached_submit_count,
Problem.subtasks).filter(Problem.id == problemID).one_or_none()
if not problem:
flask.abort(404)
result = {
"id": problem.id,
"title": problem.title,
"acceptedCount": problem.cached_accepted_count,
"submitCount": problem.cached_submit_count,
"myStatus": {}
}
my_submission: Submission = db.session.query(
Submission.id,
Submission.score,
Submission.status
).filter(expr.and_(
Submission.uid == session.get("uid", -1),
Submission.problem_id == problemID
)).order_by(Submission.score.desc()).limit(1).one_or_none()
if my_submission:
result["myStatus"] = {
"score": my_submission.score,
"fullScore": sum(item["score"] for item in problem.subtasks),
"status": my_submission.status,
"submissionID": my_submission.id
}
return make_response(0, data=result)
|
1650754
|
import FreeCAD
import Part
import Import
PLOT_CONTROL_POINTS = False
def generate_bspline_patch(vertices, n_nodes, degree, knots):
"""
Generates a bspine patch from the given vertices. Parameters like degree of the patch, knot vector and number of
control points are defined above.
:param vertices: lexicographically numbered control points in a 2D Array of 3 component points
"""
n_nodes_u = n_nodes
n_nodes_v = n_nodes
degree_u = degree
degree_v = degree
knot_u = knots
knot_v = knots
patch = Part.BSplineSurface()
patch.increaseDegree(degree_u, degree_v)
for i in range(4, len(knot_u)-4):
patch.insertUKnot(knot_u[i], 1, 0) # todo why is the second argument equal to 1? If anyone could explain = awesome
for i in range(4, len(knot_v)-4):
patch.insertVKnot(knot_v[i], 1, 0) # todo why is the second argument equal to 1? If anyone could explain = awesome
for ii in range(0, n_nodes_u):
for jj in range(0, n_nodes_v):
k = ii + jj * n_nodes_u
v = vertices[k]
control_point = FreeCAD.Vector(v[0], v[1], v[2])
patch.setPole(ii + 1, jj + 1, control_point, 1)
if(PLOT_CONTROL_POINTS):
Part.show(Part.Vertex(control_point)) # plotting corresponding control points, switched on/off in configuration section
return patch.toShape()
|
1650769
|
import torch.nn as nn
from .registry import POSITION_ENCODERS
from .utils import generate_encoder
@POSITION_ENCODERS.register_module
class Adaptive2DPositionEncoder(nn.Module):
def __init__(self, in_channels, max_h=200, max_w=200, dropout=0.1):
super(Adaptive2DPositionEncoder, self).__init__()
h_position_encoder = generate_encoder(in_channels, max_h)
h_position_encoder = h_position_encoder.transpose(0, 1).view(
1, in_channels, max_h, 1)
w_position_encoder = generate_encoder(in_channels, max_w)
w_position_encoder = w_position_encoder.transpose(0, 1).view(
1, in_channels, 1, max_w)
self.register_buffer('h_position_encoder', h_position_encoder)
self.register_buffer('w_position_encoder', w_position_encoder)
self.h_scale = self.scale_factor_generate(in_channels)
self.w_scale = self.scale_factor_generate(in_channels)
self.pool = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(p=dropout)
def scale_factor_generate(self, in_channels):
scale_factor = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, in_channels, kernel_size=1), nn.Sigmoid())
return scale_factor
def forward(self, x):
b, c, h, w = x.size()
avg_pool = self.pool(x)
h_pos_encoding = self.h_scale(
avg_pool) * self.h_position_encoder[:, :, :h, :]
w_pos_encoding = self.w_scale(
avg_pool) * self.w_position_encoder[:, :, :, :w]
out = x + h_pos_encoding + w_pos_encoding
out = self.dropout(out)
return out
|
1650817
|
from os.path import isfile
from .formats.zip_file import ZIPFile
from .formats.compressed_file import TempDirectory
class Decompressor(object):
def __init__(self, f):
self.f = f
def get_fmt(self):
magic = self.f.read(8)
self.f.seek(0)
for fmt in [ ZIPFile ]:
if fmt.is_magic(magic):
return fmt
return None
def extract(self, fmt, tmp_dir):
compressed = fmt(self.f)
return [ f for f in compressed.extract(tmp_dir) if isfile(f) ]
|
1650820
|
import torch
from .util import enable_running_stats, disable_running_stats
import contextlib
from torch.distributed import ReduceOp
class GSAM(torch.optim.Optimizer):
def __init__(self, params, base_optimizer, model, gsam_alpha, rho_scheduler, adaptive=False, perturb_eps=1e-12, grad_reduce='mean', **kwargs):
defaults = dict(adaptive=adaptive, **kwargs)
super(GSAM, self).__init__(params, defaults)
self.model = model
self.base_optimizer = base_optimizer
self.param_groups = self.base_optimizer.param_groups
self.adaptive = adaptive
self.rho_scheduler = rho_scheduler
self.perturb_eps = perturb_eps
self.alpha = gsam_alpha
# initialize self.rho_t
self.update_rho_t()
# set up reduction for gradient across workers
if grad_reduce.lower() == 'mean':
if hasattr(ReduceOp, 'AVG'):
self.grad_reduce = ReduceOp.AVG
self.manual_average = False
else: # PyTorch <= 1.11.0 does not have AVG, need to manually average across processes
self.grad_reduce = ReduceOp.SUM
self.manual_average = True
elif grad_reduce.lower() == 'sum':
self.grad_reduce = ReduceOp.SUM
self.manual_average = False
else:
raise ValueError('"grad_reduce" should be one of ["mean", "sum"].')
@torch.no_grad()
def update_rho_t(self):
self.rho_t = self.rho_scheduler.step()
return self.rho_t
@torch.no_grad()
def perturb_weights(self, rho=0.0):
grad_norm = self._grad_norm( weight_adaptive = self.adaptive )
for group in self.param_groups:
scale = rho / (grad_norm + self.perturb_eps)
for p in group["params"]:
if p.grad is None: continue
self.state[p]["old_g"] = p.grad.data.clone()
e_w = p.grad * scale.to(p)
if self.adaptive:
e_w *= torch.pow(p, 2)
p.add_(e_w) # climb to the local maximum "w + e(w)"
self.state[p]['e_w'] = e_w
@torch.no_grad()
def unperturb(self):
for group in self.param_groups:
for p in group['params']:
if 'e_w' in self.state[p].keys():
p.data.sub_(self.state[p]['e_w'])
@torch.no_grad()
def gradient_decompose(self, alpha=0.0):
# calculate inner product
inner_prod = 0.0
for group in self.param_groups:
for p in group['params']:
if p.grad is None: continue
inner_prod += torch.sum(
self.state[p]['old_g'] * p.grad.data
)
# get norm
new_grad_norm = self._grad_norm()
old_grad_norm = self._grad_norm(by='old_g')
# get cosine
cosine = inner_prod / (new_grad_norm * old_grad_norm + self.perturb_eps)
# gradient decomposition
for group in self.param_groups:
for p in group['params']:
if p.grad is None: continue
vertical = self.state[p]['old_g'] - cosine * old_grad_norm * p.grad.data / (new_grad_norm + self.perturb_eps)
p.grad.data.add_( vertical, alpha=-alpha)
@torch.no_grad()
def _sync_grad(self):
if torch.distributed.is_initialized(): # synchronize final gardients
for group in self.param_groups:
for p in group['params']:
if p.grad is None: continue
if self.manual_average:
torch.distributed.all_reduce(p.grad, op=self.grad_reduce)
world_size = torch.distributed.get_world_size()
p.grad.div_(float(world_size))
else:
torch.distributed.all_reduce(p.grad, op=self.grad_reduce)
return
@torch.no_grad()
def _grad_norm(self, by=None, weight_adaptive=False):
#shared_device = self.param_groups[0]["params"][0].device # put everything on the same device, in case of model parallelism
if not by:
norm = torch.norm(
torch.stack([
( (torch.abs(p.data) if weight_adaptive else 1.0) * p.grad).norm(p=2)
for group in self.param_groups for p in group["params"]
if p.grad is not None
]),
p=2
)
else:
norm = torch.norm(
torch.stack([
( (torch.abs(p.data) if weight_adaptive else 1.0) * self.state[p][by]).norm(p=2)
for group in self.param_groups for p in group["params"]
if p.grad is not None
]),
p=2
)
return norm
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.base_optimizer.param_groups = self.param_groups
def maybe_no_sync(self):
if torch.distributed.is_initialized():
return self.model.no_sync()
else:
return contextlib.ExitStack()
@torch.no_grad()
def set_closure(self, loss_fn, inputs, targets, **kwargs):
# create self.forward_backward_func, which is a function such that
# self.forward_backward_func() automatically performs forward and backward passes.
# This function does not take any arguments, and the inputs and targets data
# should be pre-set in the definition of partial-function
def get_grad():
self.base_optimizer.zero_grad()
with torch.enable_grad():
outputs = self.model(inputs)
loss = loss_fn(outputs, targets, **kwargs)
loss_value = loss.data.clone().detach()
loss.backward()
return outputs, loss_value
self.forward_backward_func = get_grad
@torch.no_grad()
def step(self, closure=None):
if closure:
get_grad = closure
else:
get_grad = self.forward_backward_func
with self.maybe_no_sync():
# get gradient
outputs, loss_value = get_grad()
# perturb weights
self.perturb_weights(rho=self.rho_t)
# disable running stats for second pass
disable_running_stats(self.model)
# get gradient at perturbed weights
get_grad()
# decompose and get new update direction
self.gradient_decompose(self.alpha)
# unperturb
self.unperturb()
# synchronize gradients across workers
self._sync_grad()
# update with new directions
self.base_optimizer.step()
# enable running stats
enable_running_stats(self.model)
return outputs, loss_value
|
1650858
|
import cryptocompare
from firebot import CMD_HELP
from firebot.utils import admin_cmd
@fire.on(admin_cmd(pattern="crypto (.*)"))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
stark = input_str.split(" ", 1)
curreo = stark[0]
currency1 = stark[1]
curre = curreo.upper()
currency = currency1.upper()
take = ""
take = cryptocompare.get_price(currency, curr=curre)
t = take.get(currency)
k = curre
q = str(t.get(curre))
await event.edit(
f"<b><u>Conversion complete</b></u> \n<b>cryptocurrency</b>:- <code>{currency}</code> \n<b>cryptocurrency value in </b> <code>{k}</code> <b> is :- </b> <code> {q}</code>",
parse_mode="HTML",
)
CMD_HELP.update(
{
"crypto": "**Cryptocurrency**\
\n\n**Syntax : **`.crypto <currency to give value in> <Cryptocurrency shortname>`\
\n**Usage :** Shows cryptocurrency value in given currency.\
\n\n**Example : **`.crypto inr btc`\
\nThis above syntax gives bitcoin's current value in INR."
}
)
|
1650871
|
import math
import asyncio
import logging
from .client import Client
from .server import Server
logger = logging.getLogger(__name__)
class Arbiter(Server):
"""the arbiter manages all of the workers"""
def __init__(self):
self.agents = {}
self.workers = {}
super().__init__()
self.handlers = {
'register': self.register,
'populate': self.populate,
'call_agent': self.call_agent,
'call_agents': self.call_agents,
}
@asyncio.coroutine
def call_agents(self, data):
"""call a method on all agents"""
tasks = []
for id, worker in self.workers.items():
# just fwd call to workers
tasks.append(asyncio.Task(worker.send_recv(data)))
return (yield from asyncio.gather(*tasks))
@asyncio.coroutine
def populate(self, data):
agents = data['agents']
agents_per_worker = math.ceil(len(agents)/len(self.workers))
tasks = []
i = 0
for id, worker in self.workers.items():
to_send = agents[i:i+agents_per_worker]
tasks.append(asyncio.Task(worker.send_recv({
'cmd': 'populate', 'agents': to_send})))
# keep track of where agents are
for agent in to_send:
self.agents[agent.id] = id
i += agents_per_worker
yield from asyncio.gather(*tasks)
return {'success': 'ok'}
@asyncio.coroutine
def register(self, data):
id, type = data['id'], data['type']
try:
if type == 'worker':
host, port = data['host'], data['port']
self.workers[id] = Client(host, port)
self.ncores[id] = data['ncores']
logger.info('registered {} at {}:{}'.format(type, host, port))
return {'success': 'ok'}
except ConnectionRefusedError:
logger.exception('could not connect to {}:{}'.format(host, port))
raise
@asyncio.coroutine
def call_agent(self, data):
"""call a method on an agent and get the result"""
id = data['id']
# find worker that agent is at
worker_id = self.agents[id]
worker = self.workers[worker_id]
# pass along the request to that worker, return the result
return (yield from worker.send_recv(data))
|
1650875
|
import argparse
import sys
import os
import ggutils.s3_access as s3_access
import smalltrain as st
try:
# For the case smalltrain is installed as Python library
print('try to load smalltrain modules from Python library')
from smalltrain.model.nn_model import NNModel
print('smalltrain modules are ready to be loaded from Python library')
except ModuleNotFoundError:
if os.environ.get('SMALLTRAIN_HOME'):
# For the case the environmental value SMALLTRAIN_HOME is exported
_smalltrain_home_path = os.environ.get('SMALLTRAIN_HOME')
_smalltrain_home_path = os.path.join(_smalltrain_home_path, 'src')
else:
# Try to load smalltrain modules from current directory
_smalltrain_home_path = './'
print('try to load smalltrain modules from the path: {}'.format(_smalltrain_home_path))
sys.path.append(_smalltrain_home_path)
from smalltrain.model.nn_model import NNModel
print('smalltrain modules are ready to be loaded from the path: {}'.format(_smalltrain_home_path))
def get_model_list():
from smalltrain.model.one_dim_cnn_model import OneDimCNNModel
from smalltrain.model.two_dim_cnn_model import TwoDimCNNModel
from smalltrain.model.two_dim_cnn_model_v2 import TwoDimCNNModelV2
model_list = [OneDimCNNModel(), TwoDimCNNModel(), TwoDimCNNModelV2()]
model_id_list = [model.MODEL_ID for model in model_list]
return model_list, model_id_list
MODEL_LIST, MODEL_ID_LIST = get_model_list()
def construct_model(log_dir_path, model_id, hparams, train_data=None, debug_mode=True):
if model_id in MODEL_ID_LIST:
for _m in MODEL_LIST:
if _m.MODEL_ID == model_id:
model = _m.construct_and_prepare_model(log_dir_path=log_dir_path, model_id=model_id, hparams=hparams, train_data=train_data, debug_mode=debug_mode)
return model
raise TypeError('Invalid model_id:{}'.format(model_id))
# MODEL_ID_4NN = '4NN_20180808' # 4 nn model 2019/09/10
# MODEL_ID_DNN = 'DNN' # 4 nn model 2019/09/10
# MODEL_ID_1D_CNN = '1D_CNN'
# MODEL_ID_CC = 'CC' # Carbon Copy
# MODEL_ID = MODEL_ID_4NN
class Operation:
"""Operation class as hyper parameter of train or prediction operation
Arguments:
params: A dictionary that maps hyper parameter keys and values
debug_mode: Boolean, if `True` then running with debug mode.
"""
def __init__(self, hparams=None, setting_file_path=None):
self._hparam_ins = st.Hyperparameters(hparams, setting_file_path)
self.hparams_dict = self._hparam_ins.__dict__
print('init hparams_dict: {}'.format(self.hparams_dict))
def get_hparams_ins(self):
return self._hparam_ins
def update_params_from_file(self, setting_file_path):
self._hparam_ins.update_hyper_param_from_file(setting_file_path)
self.hparams_dict = self._hparam_ins.__dict__
def update_hyper_param_from_json(self, json_obj):
self._hparam_ins.update_hyper_param_from_json(json_obj)
self.hparams_dict = self._hparam_ins.__dict__
def read_hyper_param_from_file(self, setting_file_path):
'''
This method is for the compatibility for the codes:
:param setting_file_path:
:return:
'''
self.update_params_from_file(setting_file_path=setting_file_path)
self.hparams_dict = self._hparam_ins.__dict__
return self.hparams_dict
def prepare_dirs(self):
'''
Prepare directories used in operation
:return:
'''
log_dir_path = self.hparams_dict['save_root_dir'] + '/logs/' + self.hparams_dict['train_id']
log_dir_path = log_dir_path.replace('//', '/')
os.makedirs(log_dir_path, exist_ok=True)
self.log_dir_path = log_dir_path
# Set value to hyperparameter
self._hparam_ins.set('log_dir_path', log_dir_path)
save_dir_path = self.hparams_dict['save_root_dir'] + '/model/' + self.hparams_dict['train_id'] + '/'
save_dir_path = save_dir_path.replace('//', '/')
os.makedirs(save_dir_path, exist_ok=True)
self.save_dir_path = save_dir_path
self._hparam_ins.set('save_dir_path', save_dir_path)
save_file_name = 'model-{}_lr-{}_bs-{}.ckpt'.format(self.hparams_dict['model_prefix'], self.hparams_dict['learning_rate'],
self.hparams_dict['batch_size'])
save_file_path = save_dir_path + '/' + save_file_name
save_file_path = save_file_path.replace('//', '/')
self.save_file_path = save_file_path
self._hparam_ins.set('save_file_path', save_file_path)
report_dir_path = self.hparams_dict['save_root_dir'] + '/report/' + self.hparams_dict['train_id'] + '/'
report_dir_path = report_dir_path.replace('//', '/')
os.makedirs(report_dir_path, exist_ok=True)
self.report_dir_path = report_dir_path
self._hparam_ins.set('report_dir_path', report_dir_path)
operation_dir_path = os.path.join(self.hparams_dict['save_root_dir'], 'operation')
operation_dir_path = os.path.join(operation_dir_path, self.hparams_dict['train_id'])
operation_file_path = os.path.join(operation_dir_path, self.hparams_dict['train_id'] + '.json')
os.makedirs(operation_dir_path, exist_ok=True)
# self.operation_dir_path = operation_dir_path
# self.operation_file_path = operation_file_path
if self.hparams_dict['cloud_root'] is not None:
print('Upload the hparams to cloud: {}'.format(self.hparams_dict['cloud_root']))
upload_to_cloud(operation_file_path, self.hparams_dict['cloud_root'], self.hparams_dict['save_root_dir'])
print('[Operation]DONE prepare_dirs')
def construct_and_prepare_model(self, hparams=None, train_data=None):
hparams = hparams or self.hparams_dict
model_id = hparams['model_id']
print('construct_and_prepare_model with model_id: {}'.format(model_id))
if model_id in MODEL_ID_LIST:
for _m in MODEL_LIST:
if _m.MODEL_ID == model_id:
model = _m.construct_and_prepare_model(log_dir_path=hparams['log_dir_path'], model_id=model_id,
hparams=hparams, train_data=train_data,
debug_mode=hparams['debug_mode'])
self.model = model
return model
raise TypeError('Invalid model_id:{}'.format(model_id))
def train(self, hparams=None):
hparams = hparams or self.hparams_dict
if self.model is None:
self.construct_and_prepare_model(hparams=hparams)
self.model.train(iter_to=hparams['iter_to'], learning_rate=hparams['learning_rate'],
batch_size=hparams['batch_size'], dropout_ratio=hparams['dropout_ratio'],
l1_norm_reg_ratio=hparams['l1_norm_reg_ratio'], save_file_path=hparams['save_file_path'],
report_dir_path=hparams['report_dir_path'])
print('DONE train data ')
print('====================')
def auto(self, hparams=None, setting_file_path=None):
print('====================')
print('TODO auto operation with hyper parameter: ')
print(self.hparams_dict)
print('====================')
self.prepare_dirs()
print('DONE prepare_dirs')
print('====================')
print('TODO construct_and_prepare_model')
self.construct_and_prepare_model()
print('DONE construct_and_prepare_model')
print('====================')
if (not self.hparams_dict.get('prediction_mode')):
print('TODO train( or test only)')
self.train()
print('DONE train( or test only)')
print('====================')
print('DONE auto operation')
print('====================')
def main(exec_param):
print(exec_param)
operation = Operation(setting_file_path=exec_param['setting_file_path'])
operation.auto()
def _main(exec_param):
print(exec_param)
operation = Operation()
if 'setting_file_path' in exec_param.keys() and exec_param['setting_file_path'] is not None:
operation.update_params_from_file(exec_param['setting_file_path'])
elif 'json_param' in exec_param.keys() and exec_param['json_param'] is not None:
operation.update_hyper_param_from_json(exec_param['json_param'])
exec_param = operation.hparams_dict
print('updated exec_param:{}'.format(exec_param))
# prepare directories
operation.prepare_dirs()
if 'scrpit_test' in exec_param.keys() and exec_param['scrpit_test'] == True:
test_static_methods()
model = operation.construct_and_prepare_model()
model.train(iter_to=1000, learning_rate=exec_param['learning_rate'], batch_size=exec_param['batch_size'], dropout_ratio=exec_param['dropout_ratio'], save_file_path=exec_param['save_file_path'])
exit()
model = None
print('====================')
print('TODO train data ')
if model is None:
model = operation.construct_and_prepare_model()
operation.train()
print('DONE train data ')
print('====================')
from pathlib import Path
def download_to_local(path, work_dir_path='/var/tmp/tsp/'):
ret_path = None
# check path is local
if os.path.exists(path): return path
os.makedirs(work_dir_path, exist_ok=True)
# check if s3 path
s3_bucket_name, s3_key = get_bucket_name(path)
if s3_bucket_name is not None:
ret_path = os.path.join(work_dir_path, s3_key)
os.makedirs(Path(ret_path).parent, exist_ok=True)
s3_access.download(s3_bucket_name=s3_bucket_name, s3_key=s3_key, local_dir=work_dir_path, file_path=s3_key)
return ret_path
import multiprocessing
def upload_to_cloud(local_path, cloud_root, local_root, with_multiprocessing=True):
if local_path is None:
print('No file to upload_to_cloud:local_path:{}'.format(local_path))
return
s3_bucket_name, s3_root_key = get_bucket_name(cloud_root)
if s3_bucket_name is None:
raise ValueError('Invalid cloud_root:{}'.format(cloud_root))
if len(local_path.split(local_root)[0]) > 0:
raise ValueError('Invalid local_path:{} or local_root:{}'.format(local_path, local_root))
local_path_from_local_root = local_path.split(local_root)[1]
# print('local_path_from_local_root:{}'.format(local_path_from_local_root))
s3_key = os.path.join(s3_root_key, local_path_from_local_root)
local_dir = Path(local_path).parent
file_path = Path(local_path).name
if with_multiprocessing:
# p = multiprocessing.Process(target=s3_access.upload, args=(s3_bucket_name, s3_key, local_dir, file_path,))
# p.start()
send_to_s3_uploader(s3_bucket_name=s3_bucket_name, s3_key=s3_key, local_dir=local_dir, file_path=file_path)
else:
s3_access.upload(s3_bucket_name=s3_bucket_name, s3_key=s3_key, local_dir=local_dir, file_path=file_path)
def send_to_s3_uploader(s3_bucket_name, s3_key, local_dir, file_path, queue_file_path='/var/tmp/tsp/queue.txt'):
mode = 'a' if os.path.isfile(queue_file_path) else 'w'
f = open(queue_file_path, mode)
f.write('{}, {}, {}, {}\n'.format(s3_bucket_name, s3_key, local_dir, file_path))
f.close()
def is_s3_path(s3_path):
s3_bucket_name, s3_key = get_bucket_name(s3_path)
return (s3_bucket_name is not None)
def get_bucket_name(s3_path):
if s3_path is None: return None, None
try:
_split = s3_path.split('s3://')
if len(_split[0]) > 0: return None, None
s3_bucket_name = _split[1].split('/')[0]
s3_key = _split[1][1 + len(s3_bucket_name):]
return s3_bucket_name, s3_key
except IndexError as e:
print('Can not read s3_bucket_name or s3_key from s3_path:{}'.format(s3_path))
return None, None
def test_download_to_local():
path = 's3://your-bucket/tsp/sample/sample.json'
download_path = download_to_local(path)
has_downloaded = os.path.isfile(download_path)
print('[test_download_to_local]from:{}, to:{} has_downloaded:{}'.format(path, download_path, has_downloaded))
assert has_downloaded
def test_upload_to_cloud():
# case 1
local_path = '/var/tsp/sample/test/sample_upload.txt'
cloud_root = 's3://your-bucket/tsp/sample/test/'
local_root = '/var/tsp/sample/test/'
upload_to_cloud(local_path, cloud_root, local_root)
def test_static_methods():
test_upload_to_cloud()
exit()
test_download_to_local()
print('Done test_static_methods')
def main_with_train_id(train_id):
print('TODO')
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tsp')
parser.add_argument('--model_prefix', '-mp', type=str, default='nn',
help='The prefix string representing the model')
parser.add_argument('--save_root_dir', '-rd', type=str, default='/var/tensorflow/tsp/',
help='Root dir for Tensorflow FileWriter')
parser.add_argument('--init_model_path', '-imp', type=str, default=None,
help='Model path to restore Tensorflow session')
parser.add_argument('--restore_var_name_list', '-rvnl', type=list, default=None,
help='restore_var_name_list')
parser.add_argument('--untrainable_var_name_list', '-utvnl', type=list, default=None,
help='untrainable_var_name_list')
parser.add_argument('--learning_rate', '-ll', type=float, default=1e-4,
help='learning_rate of optsimizer')
# About batch size
parser.add_argument('--batch_size', '-bs', type=int, default=128,
help='batch_size')
# About minibatch operation
parser.add_argument('--evaluate_in_minibatch', '-enmb', type=bool, default=False,
help = 'Bool, Whether to evaluate in minibatch or not (Default: False)')
parser.add_argument('--iter_to', '-itr', type=int, default=10000,
help='iter_to')
parser.add_argument('--dropout_ratio', '-dr', type=float, default=0.5,
help='Dropout ratio')
parser.add_argument('--train_id', '-tid', type=str, default='TEST_YYYYMMDD-HHmmSS',
help='id attached to model and log dir to identify train operation ')
parser.add_argument('--model_id', '-mid', type=str, default=st.Hyperparameters.DEFAULT_DICT['model_id'],
help='id attached to model to identify model constructure ')
parser.add_argument('--model_type', '-mty', type=str, default='REGRESSION',
help='model_type ')
parser.add_argument('--prediction_mode', '-pmd', type=bool, default=None,
help='Whether prediction mode or not')
parser.add_argument('--debug_mode', '-dmd', type=bool, default=None,
help='Whether debug mode or not')
parser.add_argument('--monochrome_mode', '-mmd', type=bool, default=False,
help='Whether monochrome mode or not')
parser.add_argument('--optimizer', '-otm', type=str, default=None,
help='String, optimizer')
parser.add_argument('--input_ts_size', '-its', type=int, default=12,
help='input_ts_size')
parser.add_argument('--input_ts_width', '-itw', type=int, default=None,
help='input_img_width')
parser.add_argument('--input_img_width', '-iiw', type=int, default=32,
help='input_img_width')
parser.add_argument('--input_output_ts_offset', '-iotso', type=int, default=1,
help='input_output_ts_offset')
parser.add_argument('--input_output_ts_offset_range', '-iotsor', type=list, default=None,
help='input_output_ts_offset_range')
parser.add_argument('--input_output_ts_offset_list', '-iotsol', type=list, default=None,
help='input_output_ts_offset_list')
parser.add_argument('--has_to_complement_before', '-htcb', type=bool, default=True,
help='Whether complement the value before ts starts or not(Default:True)')
parser.add_argument('--complement_ts', '-cpts', type=str, default=None,
help='String, Values to complement the missing time series data (Default:None)')
parser.add_argument('--n_layer', '-nly', type=int, default=5,
help='n_layer')
parser.add_argument('--num_add_fc_layers', '-nafl', type=int, default=0,
help='num_add_fc_layers')
parser.add_argument('--fc_node_size_list', '-fnsl', type=list, default=None,
help='fc_node_size_list')
parser.add_argument('--fc_weight_stddev_list', '-fwsl', type=list, default=None,
help='List of integer, the list of stddevs of weight variables in each fc layers. Default: all 0.1.')
parser.add_argument('--fc_bias_value_list', '-fbvl', type=list, default=None,
help='List of integer, the list of initial values of bias variables in each fc layers. Default: all 0.1')
# about sub model
parser.add_argument('--sub_model_url', '-smu', type=str, default=None,
help='String, The sub model\'s URL (Default: None, Do not use sub model)')
parser.add_argument('--sub_model_allocation', '-sma', type=float, default=0.0,
help='Float, the allocation of value which flows into the sub model (Default: 0.0, no allocation into the sub model)')
parser.add_argument('--sub_model_input_point', '-smip', type=str, default=None,
help='String, The sub model input point (Default: None, Do not use sub model)')
parser.add_argument('--sub_model_output_point', '-smop', type=str, default=None,
help='String, The sub model output point (Default: None, Do not use sub model)')
# about ResNet
parser.add_argument('--has_res_net', '-hrs', type=bool, default=False,
help='Whether the model has ResNet (the layers in the model has short cut) or not.')
parser.add_argument('--num_cnn_layers_in_res_block', '-nclrb', type=int, default=2,
help='Integer, the number of CNN layers in one Residual Block (Default: 2)')
parser.add_argument('--ts_start', '-tss', type=int, default=None,
help='ts_start')
parser.add_argument('--ts_end', '-tse', type=int, default=None,
help='ts_end')
parser.add_argument('--test_ts_index_from', '-tetsif', type=int, default=None,
help='test_ts_index_from')
parser.add_argument('--test_ts_index_to', '-tetsit', type=int, default=None,
help='test_ts_index_to')
parser.add_argument('--max_data_per_ts', '-mdpts', type=int, default=None,
help='max_data_per_ts')
parser.add_argument('--filter_width', '-flw', type=int, default=5,
help='filter_width')
parser.add_argument('--cnn_channel_size', '-ccs', type=int, default=4,
help='cnn_channel_size')
parser.add_argument('--cnn_channel_size_list', '-ccsl', type=list, default=None,
help='cnn_channel_size_list')
parser.add_argument('--pool_size_list', '-psl', type=list, default=None,
help='pool_size_list')
parser.add_argument('--act_func_list', '-actfl', type=list, default=None,
help='act_func_list')
parser.add_argument('--cnn_weight_stddev_list', '-cwsl', type=list, default=None,
help='List of integer, the list of stddevs of weight variables in each cnn layers. Default: all 0.1.')
parser.add_argument('--cnn_bias_value_list', '-cbvl', type=list, default=None,
help='List of integer, the list of initial values of bias variables in each cnn layers. Default: all 0.1.')
# about data augmentation
parser.add_argument('--flip_randomly_left_right', '-frlr', type=bool, default=True,
help='Boolean, of integer, the list of initial values of bias variables in each cnn layers. Default: all 0.1.')
parser.add_argument('--crop_randomly', '-crr', type=bool, default=True,
help='Boolean, if true, the processed images will be randomly cropped from resized images, the size to resize is set with size_random_crop_from (Default: true).')
parser.add_argument('--size_random_crop_from', '-srcf', type=int, default=None,
help='Integer, the size to which the images will be resized and from whiqch the processed images will be randomly cropped (Default: None, set input_img_width * 1.25 if crop_randomly is true)')
parser.add_argument('--angle_rotate_randomly', '-rtrnd', type=float, default=None,
help='Integer, The Angle by which the image be rotated, randomly choosen between -rt <= x <= +rt (Default: 0)')
parser.add_argument('--rounding_angle', '-rndang', type=int, default=90,
help='Integer, The Angle should be rounded to a multiple of rounding_angle (Default: 90)')
parser.add_argument('--resize_to_crop_with', '-retcw', type=str, default='scaling_or_padding',
help='String, The image needs to be scaling_or_padding or just padding')
# about L1 term loss
parser.add_argument('--add_l1_norm_reg', '-al1nr', type=bool, default=False,
help='Whether add L1 term or not.')
parser.add_argument('--l1_norm_reg_ratio', '-l1nrr', type=float, default=0.01,
help='L1 term ratio (* L1 term)')
# about preactivation regularization
parser.add_argument('--add_preactivation_regularization', '-aprreg', type=bool, default=False,
help='Whether add_preactivation_regularization or not.')
parser.add_argument('--preactivation_regularization_value_ratio', '-prrgvr', type=float, default=0.0,
help='preactivation_regularization_value_ratio')
parser.add_argument('--preactivation_maxout_list', '-prmol', type=list, default=None,
help='preactivation_maxout_list')
# about min-max normalization
parser.add_argument('--has_minmax_norm', '-hmmn', type=bool, default=True,
help='has_minmax_norm')
parser.add_argument('--input_min', '-imin', type=float, default=None,
help='Float, min value of input data. Default: None(will be selected from input test/train data)')
parser.add_argument('--input_max', '-imax', type=float, default=None,
help='Float, max value of input data. Default: None(will be selected from input test/train data)')
# about batch normalization
parser.add_argument('--has_batch_norm', '-hbn', type=bool, default=True,
help='has_batch_norm')
parser.add_argument('--bn_decay', '-bnd', type=float, default=NNModel.DEFAULT_BN_DECAY,
help='batch normalization param decay')
parser.add_argument('--bn_eps', '-bne', type=float, default=NNModel.DEFAULT_BN_ESP,
help='batch normalization param eps')
parser.add_argument('--data_dir_path', '-ddp', type=str, default=None,
help='data_dir_path')
parser.add_argument('--data_set_def_path', '-dsdp', type=str, default=None,
help='data_set_def_path')
parser.add_argument('--input_data_names', '-idn', type=str, default=None,
help='input_data_names')
parser.add_argument('--input_data_names_to_be_extended', '-idnex', type=str, default=None,
help='input_data_names_to_be_extended')
parser.add_argument('--output_data_names', '-odn', type=str, default=None,
help='output_data_names')
parser.add_argument('--output_classes', '-ocs', type=int, default=None,
help='Integer, the number of output classes (output class size) used in cassification operations. Default: None(will be set from data set or initial model)')
# col name that has time series data
parser.add_argument('--dt_col_name', '-tcn', type=str, default=None,
help='ts_col_name')
parser.add_argument('--dt_col_format', '-tcf', type=str, default='YYYY-mm-DD',
help='ts_col_format')
parser.add_argument('--dt_unit', '-tsu', type=str, default='day',
help='ts_unit')
# datetime col
parser.add_argument('--add_dt_col_name_list', '-adcnl', type=list, default=None,
help='add_dt_col_name_list')
parser.add_argument('--annotation_col_names', '-acn', type=list, default=None,
help='annotation_col_names')
# multi resolution channels
parser.add_argument('--multi_resolution_channels', '-mrc', type=int, default=0,
help='multi resolution channels(default:not add)')
parser.add_argument('--decrease_resolution_ratio', '-rdr', type=int, default=NNModel.DEFAULT_DECREASE_RESOLUTION_RATIO,
help='ratio to decrease to multi resolution channels(default:decrease by {})'.format(NNModel.DEFAULT_DECREASE_RESOLUTION_RATIO))
parser.add_argument('--decrease_resolution_ratio_list', '-rdrl', type=list, default=None,
help='list of ratio to decrease to multi resolution channels. If this set, decrease_resolution_ratio setting will be ignored.')
parser.add_argument('--target_group', '-tgr', type=str, default=None,
help='target_group')
parser.add_argument('--test_only_mode', '-tomd', type=bool, default=None,
help='Whether calc output using test data only(without train) or not')
parser.add_argument('--mask_rate', '-mskr', type=float, default=None,
help='mask_rate')
parser.add_argument('--col_index_to_mask', '-citm', type=list, default=None,
help='Column index to mask. If this is None also maks_rate > 0, then none of columns will be masked.')
parser.add_argument('--skip_invalid_data', '-sivld', type=bool, default=None,
help='skip_invalid_data')
parser.add_argument('--valid_data_range', '-vldr', type=list, default=None,
help='valid_data_range')
parser.add_argument('--plot_x_label', '-pxl', type=str, default=None,
help='plot_x_label')
parser.add_argument('--plot_y_label', '-pyl', type=str, default=None,
help='plot_y_label')
parser.add_argument('--plot_x_data_name_in_annotation', '-plxdnia', type=str, default=None,
help='plot_x_data_name_in_annotation')
parser.add_argument('--plot_group_data_name_in_annotation', '-plgdnia', type=str, default=None,
help='plot_group_data_name_in_annotation')
parser.add_argument('--plot_x_range', '-plxr', type=list, default=None,
help='plot_x_range')
parser.add_argument('--plot_y_range', '-plyr', type=list, default=None,
help='plot_y_range')
parser.add_argument('--plot_title', '-pltt', type=str, default=None,
help='plot_title')
parser.add_argument('--plot_errors', '-pler', type=list, default=None,
help='plot_errors')
parser.add_argument('--plot_animation', '-pla', type=bool, default=None,
help='plot_animation')
parser.add_argument('--calc_cc_errors', '-cce', type=bool, default=None,
help='calc_cc_errors')
parser.add_argument('--op_errors', '-opers', type=list, default=None,
help='op_errors')
parser.add_argument('--rank_boundary_list', '-rbl', type=list, default=None,
help='rank_boundary_list')
parser.add_argument('--cloud_root', '-clr', type=str, default=None,
help='String, cloud_root')
parser.add_argument('--prioritize_cloud', '-prcl', type=bool, default=False,
help='Boolean, prioritize_cloud')
# frequencies for the tasks duaring iterations
parser.add_argument('--train_report_frequency', '-trrf', type=int, default=100,
help='train report frequency(default:100)')
parser.add_argument('--test_report_frequency', '-tsrf', type=int, default=100,
help='test report frequency(default:100)')
parser.add_argument('--save_model_frequency', '-smf', type=int, default=100,
help='save model frequency(default:100)')
parser.add_argument('--export_to_onnx', '-eto', type=bool, default=None,
help = 'Boolean, whether to refresh train data stored with the key name or not (Default: false).')
parser.add_argument('--summarize_layer_frequency', '-slf', type=int, default=1000,
help='Integer, summarize layerl frequency(default:1000)')
parser.add_argument('--summarize_layer_name_list', '-slnl', type=int, default=None,
help='List of String, summarize_layer_name_list(Default: None)')
parser.add_argument('--use_cache', '-ucch', type=bool, default=False,
help='Boolean, use_cache')
parser.add_argument('--cache_db_host', '-cchdbh', type=str, default='localhost',
help='String, cache_db_host')
parser.add_argument('--cache_data_set_id', '-cdsid', type=str, default=None,
help='String, Data set id. If None, then set with train_id (Default:None)')
parser.add_argument('--refresh_cache_data_set', '-rfds', type=bool, default=False,
help='Boolean, default: false. Whether to refresh train data stored with the key name or not.')
parser.add_argument('--json_param', '-jpr', type=str, default=None,
help='JSON String to set parameters')
parser.add_argument('--setting_file_path', '-sfp', type=str, default=None,
help='String, The setting file path of JSON String to set parameters')
parser.add_argument('--scrpit_test', '-sct', type=bool, default=False,
help='Boolean, scrpit_test')
args = parser.parse_args()
print('args:{}'.format(args))
exec_param = vars(args)
print('init exec_param:{}'.format(args))
main(exec_param)
|
1650923
|
from calendar import timegm
from datetime import datetime
from typing import Any, Dict
from fastapi import HTTPException
from pydantic import BaseModel, Field
from starlette import status
from .base import UserInfoAuth
from .messages import NOT_VERIFIED
from .verification import JWKS, ExtraVerifier
class FirebaseClaims(BaseModel):
user_id: str = Field(alias="user_id")
email: str = Field(None, alias="email")
class FirebaseCurrentUser(UserInfoAuth):
"""
Verify ID token and get user info of Firebase
"""
user_info = FirebaseClaims
def __init__(self, project_id: str, *args: Any, **kwargs: Any):
url = "https://www.googleapis.com/robot/v1/metadata/x509/securetoken@system.gserviceaccount.com"
jwks = JWKS.firebase(url)
super().__init__(
jwks,
*args,
user_info=self.user_info,
audience=project_id,
issuer=f"https://securetoken.google.com/{project_id}",
extra=FirebaseExtraVerifier(project_id=project_id),
**kwargs,
)
class FirebaseExtraVerifier(ExtraVerifier):
def __init__(self, project_id: str):
self._pjt_id = project_id
def __call__(self, claims: Dict[str, str], auto_error: bool = True) -> bool:
# auth_time must be past time
if claims.get("auth_time"):
auth_time = int(claims["auth_time"])
now = timegm(datetime.utcnow().utctimetuple())
if now < auth_time:
if auto_error:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail=NOT_VERIFIED
)
return False
return True
|
1650925
|
import numpy as np
from scipy.misc import imresize
import gym
from gym.core import ObservationWrapper, Wrapper
from gym.spaces.box import Box
from gym.wrappers import SkipWrapper, TimeLimit
from copy import copy
import collections
try:
import ppaquette_gym_doom
from ppaquette_gym_doom.wrappers.action_space import ToDiscrete
except ImportError:
print("no doom envs")
Transition = collections.namedtuple(
"Transition",
["state", "action", "reward", "next_state", "done"])
class PreprocessImage(ObservationWrapper):
def __init__(self, env, height=64, width=64, grayscale=True, crop=None):
"""
A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it.
"""
super(PreprocessImage, self).__init__(env)
self.img_size = (height, width)
self.grayscale = grayscale
no_crop = lambda img: img
self.crop = crop or no_crop
n_colors = 1 if self.grayscale else 3
self.observation_space = Box(0.0, 1.0, [height, width, n_colors])
def _observation(self, img):
"""what happens to the observation"""
img = self.crop(img)
img = imresize(img, self.img_size)
if self.grayscale:
img = img.mean(-1, keepdims=True)
img = img.astype('float32') / 255.
return img
class FrameBuffer(Wrapper):
def __init__(self, env, n_frames=4, reshape_fn=None):
"""A gym wrapper that returns last n_frames observations as a single observation.
Useful for games like Atari and Doom with screen as input."""
super(FrameBuffer, self).__init__(env)
self.framebuffer = np.zeros([n_frames, ] + list(env.observation_space.shape))
# now, hacky auto-reshape fn
if reshape_fn is None:
shape_dims = list(range(len(self.framebuffer.shape)))
shape_dims = shape_dims[1:] + [shape_dims[0]]
result_shape = list(env.observation_space.shape)
if len(result_shape) == 1:
# so, its linear env
result_shape += [1]
result_shape[-1] = result_shape[-1] * n_frames
reshape_fn = lambda x: np.transpose(x, shape_dims).reshape(result_shape)
self.reshape_fn = reshape_fn
self.observation_space = Box(0.0, 1.0, self.reshape_fn(self.framebuffer).shape)
def reset(self):
"""resets breakout, returns initial frames"""
self.framebuffer = np.zeros_like(self.framebuffer)
self.update_buffer(self.env.reset())
return self.reshape_fn(self.framebuffer)
def step(self, action):
"""plays breakout for 1 step, returns 4-frame buffer"""
new_obs, r, done, info = self.env.step(action)
self.update_buffer(new_obs)
return self.reshape_fn(self.framebuffer), r, done, info
def update_buffer(self, obs):
"""push new observation to the buffer, remove the earliest one"""
self.framebuffer = np.vstack([obs[None], self.framebuffer[:-1]])
class EnvPool(Wrapper):
"""
Typical EnvPool, that does not care about done envs.
"""
def __init__(self, env, n_envs=16, autoreload_envs=False):
super(EnvPool, self).__init__(env)
self.initial_env = env
self.n_envs = n_envs
self.env_shape = env.observation_space.shape
self.envs = []
self.recreate_envs()
self.reset()
def recreate_envs(self):
self.close()
self.envs = np.array([copy(self.initial_env) for _ in range(self.n_envs)])
def reset(self):
self._states = np.zeros(shape=(self.n_envs,) + tuple(self.env_shape), dtype=np.float32)
self._rewards = np.zeros(shape=self.n_envs, dtype=np.float32)
self._dones = np.zeros(shape=self.n_envs, dtype=np.bool)
for i, env in enumerate(self.envs):
self._states[i] = env.reset()
return self._states.copy()
def step(self, actions):
for i, (action, env) in enumerate(zip(actions, self.envs)):
new_s, r, done, _ = env.step(action)
self._rewards[i] = r
self._dones[i] = done
if not done:
self._states[i] = new_s
else:
self._states[i] = env.reset()
return self._states.copy(), self._rewards.copy(), self._dones.copy(), None
def close(self):
for env in self.envs:
env.close()
def pool_states(self):
return self._states.copy()
def make_env(env_name, n_games=1, episode_limit=None, n_frames=1, autoreload_envs=False):
env = gym.make(env_name) if episode_limit is None else gym.make(env_name).env
env = FrameBuffer(env, n_frames=n_frames) if n_frames > 1 else env
if episode_limit is not None:
env = TimeLimit(env, max_episode_steps=episode_limit)
return EnvPool(env, n_games, autoreload_envs) if n_games > 0 else env
def make_image_env(
env_name, n_games=1, episode_limit=None,
n_frames=1, autoreload_envs=False,
width=64, height=64,
grayscale=True, crop=None):
env = gym.make(env_name) if episode_limit is None else gym.make(env_name).env
if "ppaquette" in env_name:
env = SkipWrapper(4)(ToDiscrete("minimal")(env))
env = PreprocessImage(env, width=width, height=height, grayscale=grayscale, crop=crop)
env = FrameBuffer(env, n_frames=n_frames) if n_frames > 1 else env
if episode_limit is not None:
env = TimeLimit(env, max_episode_steps=episode_limit)
return EnvPool(env, n_games, autoreload_envs) if n_games > 0 else env
def make_env_wrapper(make_env_fn, params):
def wrapper(env, n_games, episode_limit=None):
return make_env_fn(env, n_games, episode_limit=episode_limit, **params)
return wrapper
|
1650945
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.hvac_templates import HvactemplatePlantHotWaterLoop
log = logging.getLogger(__name__)
class TestHvactemplatePlantHotWaterLoop(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_hvactemplateplanthotwaterloop(self):
pyidf.validation_level = ValidationLevel.error
obj = HvactemplatePlantHotWaterLoop()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_pump_schedule_name = "object-list|Pump Schedule Name"
obj.pump_schedule_name = var_pump_schedule_name
# alpha
var_pump_control_type = "Intermittent"
obj.pump_control_type = var_pump_control_type
# alpha
var_hot_water_plant_operation_scheme_type = "Default"
obj.hot_water_plant_operation_scheme_type = var_hot_water_plant_operation_scheme_type
# object-list
var_hot_water_plant_equipment_operation_schemes_name = "object-list|Hot Water Plant Equipment Operation Schemes Name"
obj.hot_water_plant_equipment_operation_schemes_name = var_hot_water_plant_equipment_operation_schemes_name
# object-list
var_hot_water_setpoint_schedule_name = "object-list|Hot Water Setpoint Schedule Name"
obj.hot_water_setpoint_schedule_name = var_hot_water_setpoint_schedule_name
# real
var_hot_water_design_setpoint = 7.7
obj.hot_water_design_setpoint = var_hot_water_design_setpoint
# alpha
var_hot_water_pump_configuration = "VariableFlow"
obj.hot_water_pump_configuration = var_hot_water_pump_configuration
# real
var_hot_water_pump_rated_head = 0.0
obj.hot_water_pump_rated_head = var_hot_water_pump_rated_head
# alpha
var_hot_water_setpoint_reset_type = "None"
obj.hot_water_setpoint_reset_type = var_hot_water_setpoint_reset_type
# real
var_hot_water_setpoint_at_outdoor_drybulb_low = 11.11
obj.hot_water_setpoint_at_outdoor_drybulb_low = var_hot_water_setpoint_at_outdoor_drybulb_low
# real
var_hot_water_reset_outdoor_drybulb_low = 12.12
obj.hot_water_reset_outdoor_drybulb_low = var_hot_water_reset_outdoor_drybulb_low
# real
var_hot_water_setpoint_at_outdoor_drybulb_high = 13.13
obj.hot_water_setpoint_at_outdoor_drybulb_high = var_hot_water_setpoint_at_outdoor_drybulb_high
# real
var_hot_water_reset_outdoor_drybulb_high = 14.14
obj.hot_water_reset_outdoor_drybulb_high = var_hot_water_reset_outdoor_drybulb_high
# alpha
var_hot_water_pump_type = "SinglePump"
obj.hot_water_pump_type = var_hot_water_pump_type
# alpha
var_supply_side_bypass_pipe = "Yes"
obj.supply_side_bypass_pipe = var_supply_side_bypass_pipe
# alpha
var_demand_side_bypass_pipe = "Yes"
obj.demand_side_bypass_pipe = var_demand_side_bypass_pipe
# alpha
var_fluid_type = "Water"
obj.fluid_type = var_fluid_type
# real
var_loop_design_delta_temperature = 19.19
obj.loop_design_delta_temperature = var_loop_design_delta_temperature
# real
var_maximum_outdoor_dry_bulb_temperature = 20.2
obj.maximum_outdoor_dry_bulb_temperature = var_maximum_outdoor_dry_bulb_temperature
# alpha
var_load_distribution_scheme = "Optimal"
obj.load_distribution_scheme = var_load_distribution_scheme
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].name, var_name)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].pump_schedule_name, var_pump_schedule_name)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].pump_control_type, var_pump_control_type)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].hot_water_plant_operation_scheme_type, var_hot_water_plant_operation_scheme_type)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].hot_water_plant_equipment_operation_schemes_name, var_hot_water_plant_equipment_operation_schemes_name)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].hot_water_setpoint_schedule_name, var_hot_water_setpoint_schedule_name)
self.assertAlmostEqual(idf2.hvactemplateplanthotwaterloops[0].hot_water_design_setpoint, var_hot_water_design_setpoint)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].hot_water_pump_configuration, var_hot_water_pump_configuration)
self.assertAlmostEqual(idf2.hvactemplateplanthotwaterloops[0].hot_water_pump_rated_head, var_hot_water_pump_rated_head)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].hot_water_setpoint_reset_type, var_hot_water_setpoint_reset_type)
self.assertAlmostEqual(idf2.hvactemplateplanthotwaterloops[0].hot_water_setpoint_at_outdoor_drybulb_low, var_hot_water_setpoint_at_outdoor_drybulb_low)
self.assertAlmostEqual(idf2.hvactemplateplanthotwaterloops[0].hot_water_reset_outdoor_drybulb_low, var_hot_water_reset_outdoor_drybulb_low)
self.assertAlmostEqual(idf2.hvactemplateplanthotwaterloops[0].hot_water_setpoint_at_outdoor_drybulb_high, var_hot_water_setpoint_at_outdoor_drybulb_high)
self.assertAlmostEqual(idf2.hvactemplateplanthotwaterloops[0].hot_water_reset_outdoor_drybulb_high, var_hot_water_reset_outdoor_drybulb_high)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].hot_water_pump_type, var_hot_water_pump_type)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].supply_side_bypass_pipe, var_supply_side_bypass_pipe)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].demand_side_bypass_pipe, var_demand_side_bypass_pipe)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].fluid_type, var_fluid_type)
self.assertAlmostEqual(idf2.hvactemplateplanthotwaterloops[0].loop_design_delta_temperature, var_loop_design_delta_temperature)
self.assertAlmostEqual(idf2.hvactemplateplanthotwaterloops[0].maximum_outdoor_dry_bulb_temperature, var_maximum_outdoor_dry_bulb_temperature)
self.assertEqual(idf2.hvactemplateplanthotwaterloops[0].load_distribution_scheme, var_load_distribution_scheme)
|
1650964
|
import inspect
from os import PathLike
import pathlib
from types import CodeType, FrameType, ModuleType, MethodType
from typing import Any, Callable, Optional, Tuple, Union
class Scope:
def __init__(
self,
function: Optional[Union[Callable, str]] = None,
module: Optional[ModuleType] = None,
file: Optional[Union[str, PathLike]] = None,
unwrap: bool = True,
):
self.function = function
self.module = module
self._fn_name, self._fn_code, self._fn_self = None, None, None
if isinstance(function, str):
self._fn_name = function
elif function is not None:
self._fn_code, self._fn_self = _get_code_and_self(function, unwrap=unwrap)
self.file: Optional[Union[str, pathlib.Path]] = None
if isinstance(file, PathLike):
self.file = pathlib.Path(file).resolve()
else:
self.file = file
def match_frame(self, frame: FrameType) -> bool:
if self._fn_code is not None:
if frame.f_code is not self._fn_code:
return False
if self._fn_self is not None:
# The function is a method; check if `self` is the correct object
arg_info = inspect.getargvalues(frame)
if len(arg_info.args) == 0: # Just in case this somehow happens
return False
if arg_info.locals[arg_info.args[0]] is not self._fn_self:
return False
if self._fn_name is not None:
if frame.f_code.co_name != self._fn_name:
return False
if self.module is not None:
if frame.f_code.co_filename != self.module.__file__:
return False
if self.file is not None:
file = frame.f_code.co_filename
if file.endswith(">"):
# Special filename: exact match
if file != self.file:
return False
elif isinstance(self.file, pathlib.Path):
if pathlib.Path(file).resolve() != self.file:
return False
elif not pathlib.Path(file).resolve().match(self.file):
return False
return True
def _get_code_and_self(fn: Callable, unwrap: bool) -> Tuple[CodeType, Any]:
# First find the actual Python function that implements the callable, if possible.
# Then if unwrap is True, try to follow the wrapper chain, assuming that the
# wrappers are well-behaved (as opposed to, say, a Python function wrapping a
# built-in). If not, we raise an error.
def _unwrap(f):
if unwrap:
f = inspect.unwrap(f)
return f
exc: Optional[Exception] = None
try:
# Bound method
if inspect.ismethod(fn):
assert isinstance(fn, MethodType)
return _unwrap(fn.__func__).__code__, fn.__self__
# Regular function
if inspect.isfunction(fn):
return _unwrap(fn).__code__, None
# Instance of a class that defines a __call__ method
if hasattr(fn, "__class__") and hasattr(fn.__class__, "__call__"):
return _unwrap(fn.__class__.__call__).__code__, fn
except AttributeError as _exc:
exc = _exc
raise TypeError(
"Could not find the code for {!r}. "
"Please provide a pure Python callable".format(fn)
) from exc
|
1650969
|
from django import template
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
register = template.Library()
@register.filter
def url_encode(value):
return quote(value)
@register.filter
def try_decode(value):
if isinstance(value, bytes):
try:
return value.decode('utf8')
except UnicodeDecodeError:
return value
else:
return value
|
1651005
|
import helium
import unittest
import os
class TestBinaryInputFile(unittest.TestCase):
datadir = os.getenv('HEDATADIR', os.getenv('HOME', 'c:/') + '/Helium/data')
def test_data_dir(self):
self.assertTrue(os.path.isdir(self.datadir))
def test_molecule_file(self):
f = helium.MoleculeFile()
self.assertFalse(f.load('foo'))
f.load(self.datadir + '/1K.hel')
self.assertEqual(1000, f.numMolecules())
mol = helium.Molecule()
self.assertTrue(f.readMolecule(mol))
self.assertEqual(8, mol.numAtoms())
self.assertTrue(f.readMolecule(998, mol))
self.assertEqual(29, mol.numAtoms())
f.close()
f = helium.MoleculeFile(self.datadir + '/1K.hel')
self.assertTrue(f.readMolecule(0, mol))
self.assertEqual(8, mol.numAtoms())
def test_memory_mapped_molecule_file(self):
f = helium.MemoryMappedMoleculeFile()
self.assertFalse(f.load('foo'))
f.load(self.datadir + '/1K.hel')
self.assertEqual(1000, f.numMolecules())
mol = helium.Molecule()
self.assertTrue(f.readMolecule(0, mol))
self.assertEqual(8, mol.numAtoms())
f = helium.MemoryMappedMoleculeFile(self.datadir + '/1K.hel')
self.assertEqual(1000, f.numMolecules())
if __name__ == '__main__':
unittest.main()
|
1651055
|
from flask import request
from .. import db
from ..models import User
from datetime import datetime
import pytz
from sqlalchemy import CheckConstraint
from flask.ext.login import current_user
from collections import defaultdict, OrderedDict
from ..email import send_email
class CartItem(db.Model):
"""Functions as association table between listings and merchants"""
__tablename__ = "cartItems"
__table_args__ = (
CheckConstraint('quantity > 0'),
)
merchant_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE'),
primary_key=True)
listing_id = db.Column(db.Integer, db.ForeignKey('listings.id', ondelete='CASCADE'),
primary_key=True)
quantity = db.Column(db.Integer)
listing = db.relationship("Listing")
@staticmethod
def get_vendor_cart_items_dict():
"""returns a dict where the keys are
vendors and the fields are a list
of items in the cart from that vendor"""
vendor_items_dict = defaultdict(list)
for item in current_user.cart_items:
vendor = item.listing.vendor
vendor_items_dict[vendor].append(item)
# uses sorted dict so vendor order in cart
# is in alphabetical order of company names
sorted_dict = OrderedDict(sorted(vendor_items_dict.items(),
key=lambda pair: pair[0].company_name))
return sorted_dict
@staticmethod
def get_vendor_ids():
"""returns all the ids of all the vendors represented in a cart"""
return [item.listing.vendor_id for item in current_user.cart_items]
@staticmethod
def get_total_price(cart_items=None):
"""returns the total price of the given cart_items. if None, returns
the total price of all of the current user's cart_items"""
if cart_items is None:
cart_items = current_user.cart_items
prices = [item.listing.price * item.quantity for item in cart_items]
return sum(prices)
@staticmethod
def delete_cart_items(cart_items):
for cart_item in cart_items:
db.session.delete(cart_item)
def __repr__(self):
return "<CartItem: merchant_id {}, " \
"listing_id {}, quantity {}>".format(self.merchant_id,
self.listing_id,
self.quantity)
class Status:
PENDING = 0
APPROVED = 1
DECLINED = 2
class Order(db.Model):
__tablename__ = 'orders'
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime)
status = db.Column(db.Integer)
merchant_id = db.Column(db.Integer, db.ForeignKey('merchant.id', ondelete='CASCADE'),
primary_key=True)
vendor_id = db.Column(db.Integer, db.ForeignKey('vendor.id', ondelete='CASCADE'),
primary_key=True)
company_name = db.Column(db.String(64))
referral_name = db.Column(db.String(64))
comment = db.Column(db.Text)
def __init__(self, date, vendor_id, referral_name):
self.status = Status.PENDING
self.date = date
self.vendor_id = vendor_id
self.merchant_id = current_user.id
vendor = User.query.get(vendor_id)
self.company_name = vendor.company_name
self.comment = None
self.referral_name=referral_name
def __repr__(self):
return "<Order: {}>".format(self.id)
@staticmethod
def order_cart_items_from_vendor(vendor_id, referral_name, date=None):
"""Orders all the items in the cart from a given vendor"""
if date is None:
date = datetime.now(pytz.timezone('US/Eastern'))
cart_items = filter(lambda item: item.listing.vendor_id == vendor_id,
current_user.cart_items)
order = Order(date, vendor_id, referral_name)
total = reduce(lambda x,y: x + y, map(lambda item: item.listing.price * item.quantity, cart_items))
referral_name = referral_name
vendor = User.query.get(vendor_id)
merchant_id = current_user.id
merchant = User.query.get(merchant_id)
send_email(vendor.email,
'New merchant order request',
'merchant/email/order_item',
merchant=merchant,
cart_items=cart_items,
referral_name=referral_name,
total=total)
# send confirmation to the merchant
send_email(merchant.email,
'Confirmation of order request',
'merchant/email/confirm_order',
vendor=vendor,
cart_items=cart_items,
total=total)
for item in cart_items:
p = Purchase(
order=order,
listing_id=item.listing.id,
product_id=item.listing.product_id,
quantity=item.quantity,
item_name=item.listing.name,
item_price=item.listing.price,
unit=item.listing.unit,
item_quantity=item.listing.quantity
)
db.session.add(p)
db.session.add(order)
CartItem.delete_cart_items(cart_items)
db.session.commit()
def get_total(self):
total = 0
for purchase in self.purchases:
total += purchase.quantity * purchase.item_price
return "${0:.2f}".format(total)
def get_date(self):
"""Get date formatted as mm-dd-yyyy"""
date = self.date.date()
return '{}-{}-{}'.format(date.month, date.day, date.year)
def get_time(self):
"""Get time formatted as mm-dd-yyyy"""
return self.date.time().strftime("%I:%M%p")
def get_merchant_info(self):
merchant_id = self.merchant_id
merchant = User.query.get(merchant_id)
if merchant:
merchant_info = {
'company_name': merchant.company_name,
'full_name': merchant.full_name(),
'email': merchant.email
}
else:
merchant_info = {'company_name': 'USER DELETED FROM SYSTEM' }
return merchant_info
def get_vendor_info(self):
vendor_id = self.vendor_id
vendor = User.query.get(vendor_id)
if vendor:
vendor_info = {
'company_name': vendor.company_name,
'full_name': vendor.full_name(),
'email': vendor.email
}
else:
vendor_info = {'company_name': self.company_name}
return vendor_info
def get_purchase_info(self):
purchases = self.purchases
purchase_info = []
for purchase in purchases:
purchase_info.append({
'product_id': purchase.product_id,
'quantity': purchase.quantity,
'name': purchase.item_name,
'price': purchase.item_price,
'unit': purchase.unit,
'item_quantity': purchase.item_quantity
})
return purchase_info
@staticmethod
def order_cart_items():
"""Takes the cart items and makes an order
for each vendor represented in the cart"""
date = datetime.now(pytz.timezone('US/Eastern'))
vendor_ids = set([item.listing.vendor_id for item in current_user.cart_items])
referral_name=set([item.listing.referral_name for item in current_user.cart_items])
for vendor_id in vendor_ids:
Order.order_cart_items_from_vendor(vendor_id, date, referral_name)
def get_all_purchases(self):
return Purchase.query.filter_by(order_id=self.id).all()
class Purchase(db.Model):
__tablename__ = 'purchases'
id = db.Column(db.Integer, primary_key=True)
# model relationships
order_id = db.Column(db.Integer, db.ForeignKey('orders.id'))
order = db.relationship("Order", backref="purchases")
listing_id = db.Column(db.String(64))
product_id = db.Column(db.String(64))
# purchase properties
quantity = db.Column(db.Integer)
item_name = db.Column(db.String(64))
item_price = db.Column(db.Float)
unit = db.Column(db.String(32))
item_quantity = db.Column(db.String(32))
def __init__(self, order, listing_id, product_id, quantity, item_name, item_price, unit, item_quantity):
self.order = order
self.listing_id = listing_id
self.product_id = product_id
self.quantity = quantity
self.item_name = item_name
self.item_price = item_price
self.unit = unit
self.item_quantity = item_quantity
def __repr__(self):
return "<Purchase: {} Listing: {}>".format(self.id, self.listing_id)
|
1651108
|
build_config = {
"projects": {
u'x86\\ascii\\mixedcase\\i32': {
"files": {
u'dwx_IMUL_30_XOR_dwy.bin': {
"sources": [u'dwx_IMUL_30_XOR_dwy.asm']
},
u'dwx_IMUL_by.bin': {
"sources": [u'dwx_IMUL_by.asm']
}
}
}
}
}
|
1651111
|
from django.contrib.auth.models import User
from django.db import models
from django.conf import settings
class Employment(models.Model):
name = models.CharField(max_length=50)
descr = models.CharField(max_length=100)
def __unicode__(self):
return self.descr
class Computertype(models.Model):
name = models.CharField(max_length=50)
descr = models.CharField(max_length=100)
def __unicode__(self):
return self.descr
class Computerowner(models.Model):
name = models.CharField(max_length=50)
descr = models.CharField(max_length=100)
def __unicode__(self):
return self.descr
class HelpChoices(models.Model):
name = models.CharField(max_length=50)
descr = models.CharField(max_length=100)
def __unicode__(self):
return self.descr
class Log(models.Model):
cn = models.CharField(max_length=50)
timestamp = models.DateTimeField()
message = models.CharField(max_length=250)
|
1651128
|
import polychrom.starting_conformations
import polychrom.forces, polychrom.forcekits, polychrom.polymerutils
from polychrom.hdf5_format import HDF5Reporter, list_URIs, load_URI
from polychrom.simulation import Simulation
import numpy as np
def test_basic_simulation_and_hdf5(tmp_path):
data = polychrom.starting_conformations.create_random_walk(1,100)
"""
Here we created a hdf5Reporter attached to a foler test, and we are saving 5 blocks per file
(you should probalby use 50 here or 100. 5 is just for a showcase)
"""
reporter = HDF5Reporter(folder=tmp_path, max_data_length=5)
"""
Passing a reporter to the simulation object - many reporters are possible, and more will be added in a future
"""
sim = Simulation(
N=100,
error_tol=0.001,
collision_rate=0.1,
integrator="variableLangevin",
platform="reference",
max_Ek=40,
reporters=[reporter],
)
sim.set_data(data)
sim.add_force(polychrom.forcekits.polymer_chains(sim))
sim.add_force(polychrom.forces.spherical_confinement(sim, r=4, k=3))
sim._apply_forces()
datas = []
for i in range(19):
"""
Here we pass two extra records: a string and an array-like object.
First becomes an attr, and second becomes an HDF5 dataset
"""
sim.do_block(
20,
save_extras={
"eggs": "I don't eat green eggs and ham!!!",
"spam": [1, 2, 3],
},
)
datas.append(sim.get_data())
"""
Here we are not forgetting to dump the last set of blocks that the reporter has.
We have to do it at the end of every simulation.
I tried adding it to the destructor to make it automatic,
but some weird interactions with garbage collection made it not very useable.
"""
reporter.dump_data()
files = list_URIs(tmp_path)
d1 = load_URI(files[1])
d1_direct = datas[1]
assert np.abs(d1["pos"] - d1_direct).max() <= 0.0051
d1_fetch = polychrom.polymerutils.fetch_block(tmp_path, 1)
assert np.allclose(d1["pos"], d1_fetch)
assert np.allclose(d1["spam"], [1, 2, 3]) # spam got saved correctly
assert d1["eggs"] == "I don't eat green eggs and ham!!!"
del sim
del reporter
rep = HDF5Reporter(folder=tmp_path, overwrite=False, check_exists=False)
ind, data = rep.continue_trajectory()
# continuing from the last trajectory
assert np.abs(data["pos"] - datas[-1]).max() <= 0.0054
def run():
import tempfile
tmp_dir = tempfile.TemporaryDirectory()
test_basic_simulation_and_hdf5(tmp_dir.name)
if __name__ == "__main__":
run()
|
1651151
|
from mybitbank.libs.misc.stubconnector import rawData, ServiceProxyStubBTC
from django.contrib.auth.models import User
from django.test import TestCase
from mybitbank.libs.connections.connectors import Connector
class ConnectorsTests(TestCase):
connector = None
def setUp(self):
'''
Setup the test
'''
self.connector = Connector()
self.connector.services = {1: ServiceProxyStubBTC()}
self.connector.config = {1: {'id': int(1),
'rpcusername': "testuser",
'rpcpassword': "<PASSWORD>",
'rpchost': "localhost",
'rpcport': "7000",
'name': 'Bitcoin (BTC)',
'currency': 'btc',
'symbol': "B",
'enabled': True,
}, }
user = User.objects.create_user('testing', '<EMAIL>', '<PASSWORD>')
user.save()
def test_if_type_object(self):
'''
Test instance type
'''
self.assertTrue(isinstance(self.connector, Connector), "Instance of connector is not of correct type")
self.connector = None
def test_longNumber(self):
'''
Test longNumber method
'''
num = self.connector.longNumber(10)
self.assertEquals(num, "10.00000000", "Connector.longNumber method is problematic")
self.connector = None
def test_listAccounts(self):
'''
Test listAccounts() method
'''
accounts = self.connector.listAccounts(gethidden=True, getarchived=True)
provider_id = 1
# check number of in and out accounts
number_raw_accounts = len(self.connector.services[provider_id].listaccounts().keys())
number_processed_accounts = len(accounts[provider_id])
self.assertEquals(number_raw_accounts, number_processed_accounts, "Connector.listaccounts() method did not deliver correct number of accounts")
def test_getParamHash(self):
'''
Test hashing function
'''
test_hash = self.connector.getParamHash("test")
self.assertEquals(test_hash, "90a3ed9e32b2aaf4c61c410eb925426119e1a9dc53d4286ade99a809", "Connector.getParamHash() method is problematic")
def test_getaddressesbyaccount(self):
'''
Test getaddressesbyaccount() method
'''
account_name = "pipes"
provider_id = 1
addresses = self.connector.services[provider_id].getaddressesbyaccount(account_name)
# check number of address
self.assertEquals(len(addresses), 2, 'Connector.getaddressesbyaccount() method is not functioning properly, wrong count returned')
# check if addresses contain the "pipes" string
for address in addresses:
self.assertTrue(account_name in address, 'Connector.getaddressesbyaccount() method error, wrong address returned')
def test_getaddressesbyaccount_no_account_name(self):
'''
Test getaddressesbyaccount() method test with no account name
'''
account_name = False
provider_id = 1
addresses = self.connector.services[provider_id].getaddressesbyaccount(account_name)
self.assertTrue(addresses is False, 'Connector.getaddressesbyaccount() method error, wrong address returned')
def test_listTransactionsByAccount(self):
'''
Test listTransactionsByAccount() method
'''
accoount_name = "pipes"
provider_id = 1
transactions = self.connector.listTransactionsByAccount(accoount_name, provider_id)
correct_transactions = rawData['transactions']['pipes']
self.assertEquals(transactions, correct_transactions, 'Connector.listtransactions() method returned wrong number of transactions')
def test_getNewAddress(self):
'''
Test getnewaddress() method
'''
provider_id = 1
new_address = self.connector.getNewAddress(provider_id, rawData['new_account_address'])
self.assertEquals(new_address, rawData['new_account_address'])
def test_getnewaddress_invalid_currency(self):
'''
Test getnewaddress() method with invalid currency id
'''
new_address = self.connector.getNewAddress('INV', rawData['new_account_address'])
self.assertEquals(new_address, False)
def test_getnewaddress_unicode_account_name(self):
'''
Test getnewaddress() method with a unicode string account name
'''
account_name = u'thisisunicode'
provider_id = 1
new_address = self.connector.getNewAddress(provider_id, account_name)
self.assertEquals(new_address, 'new account address')
def test_getnewaddress_nonstring_account_name(self):
'''
Test getnewaddress() method with invalid currency id
'''
account_name = False
provider_id = 1
new_address = self.connector.getNewAddress(provider_id, account_name)
self.assertEquals(new_address, None)
def test_getBalance(self):
'''
Test getBalance() method
'''
provider_id = 1
balance = self.connector.getBalance(provider_id, "my test BTC account")
correct_result = {provider_id: rawData['accounts']['my test BTC account']}
self.assertEquals(balance, correct_result)
def test_getBalance_all_accounts(self):
'''
Test getBalance() method, return all accounts
'''
provider_id = 1
balance = self.connector.getBalance(provider_id)
correct_result = {provider_id: rawData['accounts']}
self.assertEquals(balance, correct_result)
def test_moveamount(self):
'''
Test moveamount() method
'''
from_account = "pipes"
to_account = "another account"
provider_id = 1
amount = "1"
minconf = 1
comment = "test comment from django test"
move_result = self.connector.moveAmount(from_account, to_account, provider_id, amount, minconf, comment)
self.assertEquals(move_result, True)
def test_moveamount_nonexisting_from_account(self):
'''
Test moveamount() method testing non-existing from account
'''
from_account = "Idontexistretsakalia" # non-existing account
to_account = "another account"
provider_id = 1
amount = "1"
minconf = 1
comment = "test comment from django test"
move_result = self.connector.moveAmount(from_account, to_account, provider_id, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_moveamount_nonexistant_to_account(self):
'''
Test moveamount() method testing non-existing to account
'''
from_account = "pipes"
to_account = "Idontexistretsakalia"
provider_id = 0
amount = "1"
minconf = 1
comment = "test comment from django test"
move_result = self.connector.moveAmount(from_account, to_account, provider_id, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_moveamount_invalid_currency(self):
'''
Test moveamount() method testing invalid currency
'''
from_account = "pipes"
to_account = "another account"
provider_id = 0
amount = "1"
minconf = 1
comment = "test comment from django test"
move_result = self.connector.moveAmount(from_account, to_account, provider_id, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_moveamount_non_number_amount_1(self):
'''
Test moveamount() method testing non-number amount
'''
from_account = "pipes"
to_account = "another account"
provider_id = 1
amount = {}
minconf = 1
comment = "test comment from django test"
move_result = self.connector.moveAmount(from_account, to_account, provider_id, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_moveamount_non_number_amount_2(self):
'''
Test moveamount() method testing non-number amount
'''
from_account = "pipes"
to_account = "another account"
provider_id = 1
amount = True
minconf = 1
comment = "test comment from django test"
move_result = self.connector.moveAmount(from_account, to_account, provider_id, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_moveamount_non_number_amount_3(self):
'''
Test moveamount() method testing non-number amount
'''
from_account = "pipes"
to_account = "another account"
provider_id = 1
amount = ""
minconf = 1
comment = "test comment from django test"
move_result = self.connector.moveAmount(from_account, to_account, provider_id, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_moveamount_non_number_amount_4(self):
'''
Test moveamount() method testing non-number amount
'''
from_account = "pipes"
to_account = "another account"
provider_id = 1
amount = u""
minconf = 1
comment = "test comment from django test"
move_result = self.connector.moveAmount(from_account, to_account, provider_id, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_sendFrom_nonexistant_from_account(self):
'''
Test sendFrom() method testing non-existing from account
'''
from_account = "Idontexistretsakalia"
address = "address for sdfsdfs account"
currency = 'btc'
amount = "1"
minconf = 1
comment = "test comment from django test"
move_result = self.connector.sendFrom(from_account, address, currency, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_sendFrom_nonexistant_address(self):
'''
Test sendFrom() method testing non-existing address
'''
from_account = "pipes"
address = "Idontexistretsakalia"
currency = 'btc'
amount = "1"
minconf = 1
comment = "test comment from django test"
move_result = self.connector.sendFrom(from_account, address, currency, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_sendFrom_invalid_currency(self):
'''
Test sendFrom() method testing invalid currency
'''
from_account = "pipes"
address = "address for sdfsdfs account"
provider_id = 0
amount = "1"
minconf = 1
comment = "test comment from django test"
move_result = self.connector.sendFrom(from_account, address, provider_id, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_sendFrom_non_number_amount_1(self):
'''
Test sendFrom() method testing non-number amount
'''
from_account = "pipes"
address = "address for sdfsdfs account"
provider_id = 1
amount = {}
minconf = 1
comment = "test comment from django test"
move_result = self.connector.sendFrom(from_account, address, provider_id, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_sendFrom_non_number_amount_3(self):
'''
Test sendFrom() method testing non-number amount
'''
from_account = "pipes"
address = "address for sdfsdfs account"
provider_id = 1
amount = ""
minconf = 1
comment = "test comment from django test"
move_result = self.connector.sendFrom(from_account, address, provider_id, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_sendFrom_non_number_amount_4(self):
'''
Test sendFrom() method testing non-number amount
'''
from_account = "pipes"
address = "address for sdfsdfs account"
provider_id = 1
amount = u""
minconf = 1
comment = "test comment from django test"
move_result = self.connector.sendFrom(from_account, address, provider_id, amount, minconf, comment)
self.assertNotEquals(move_result, True)
def test_walletpassphrase_invalid_passphrase_1(self):
'''
Test unload wallet
'''
passphrase = True
provider_id = 1
unlock_exit = self.connector.walletPassphrase(passphrase, provider_id)
self.assertNotEquals(unlock_exit, True)
def test_walletpassphrase_invalid_passphrase_2(self):
'''
Test unload wallet
'''
passphrase = {}
provider_id = 1
unlock_exit = self.connector.walletPassphrase(passphrase, provider_id)
self.assertNotEquals(unlock_exit, True)
def test_walletpassphrase_invalid_passphrase_3(self):
'''
Test unload wallet
'''
passphrase = None
provider_id = 1
unlock_exit = self.connector.walletPassphrase(passphrase, provider_id)
self.assertNotEquals(unlock_exit, True)
def test_walletpassphrase_invalid_passphrase_4(self):
'''
Test unload wallet
'''
passphrase = ""
provider_id = 1
unlock_exit = self.connector.walletPassphrase(passphrase, provider_id)
self.assertNotEquals(unlock_exit, True)
def test_walletpassphrase_invalid_currency_1(self):
'''
Test unload wallet
'''
passphrase = "<PASSWORD>"
provider_id = 'inv'
unlock_exit = self.connector.walletPassphrase(passphrase, provider_id)
self.assertNotEquals(unlock_exit, True)
def test_walletpassphrase_invalid_currency_2(self):
'''
Test unload wallet
'''
passphrase = "<PASSWORD>"
provider_id = ""
unlock_exit = self.connector.walletPassphrase(passphrase, provider_id)
self.assertNotEquals(unlock_exit, True)
def test_walletpassphrase_invalid_currency_3(self):
'''
Test unload wallet
'''
passphrase = "<PASSWORD>"
provider_id = False
unlock_exit = self.connector.walletPassphrase(passphrase, provider_id)
self.assertNotEquals(unlock_exit, True)
def test_walletlock_invalid_currency_1(self):
'''
Test walletLock()
'''
provider_id = "pip"
lock_exit = self.connector.walletLock(provider_id)
self.assertNotEquals(lock_exit, True)
def test_walletlock_invalid_currency_2(self):
'''
Test walletLock()
'''
provider_id = ""
lock_exit = self.connector.walletLock(provider_id)
self.assertNotEquals(lock_exit, True)
def test_walletlock_invalid_currency_3(self):
'''
Test walletLock()
'''
provider_id = False
lock_exit = self.connector.walletLock(provider_id)
self.assertNotEquals(lock_exit, True)
def test_getTransaction(self):
'''
Test getTransaction()
'''
correct_transaction = rawData['transactions']['pipes'][0]
txid = correct_transaction['txid']
provider_id = 1
transaction = self.connector.getTransaction(txid, provider_id)
self.assertEquals(transaction, correct_transaction)
def test_getTransaction_invalid_provider_id_1(self):
'''
Test getTransaction() with invalid provider_id
'''
transaction = rawData['transactions']['pipes'][0]
txid = transaction['txid']
provider_id = 0
transaction = self.connector.getTransaction(txid, provider_id)
self.assertNotEquals(transaction.get('code', None), None)
def test_getTransaction_invalid_provider_id_2(self):
'''
Test getTransaction() with invalid provider_id
'''
transaction = rawData['transactions']['pipes'][0]
txid = transaction['txid']
provider_id = None
transaction = self.connector.getTransaction(txid, provider_id)
self.assertNotEquals(transaction.get('code', None), None)
def test_getTransaction_invalid_txid_1(self):
'''
Test getTransaction() with invalid txid
'''
txid = "otinanai"
provider_id = 1
transaction = self.connector.getTransaction(txid, provider_id)
self.assertNotEquals(transaction, None)
def test_gettransactiondetails_invalid_txid_2(self):
'''
Test gettransactiondetails()
'''
txid = False
provider_id = 1
transaction = self.connector.getTransaction(txid, provider_id)
self.assertNotEquals(transaction, None)
|
1651153
|
import subprocess as sp
import torch as th
def run_bench(name, *args, device="cpu"):
args = list(args)
args += ["-d", device]
if device == "cuda" and not th.cuda.is_available():
return "Not available /!\\"
return sp.check_output(["python3", "-m", f"bench.{name}"] + args).decode('utf8')
def main():
template = f"""\
## Benchmarking and verification of Julius
In order to verify the correctness and speed of the implementations in Julius,
we compare ourselves to different reference implementations, comparing speed and
checking how far we are.
### ResampleFrac
We compare `julius.resample` to `resampy`, on an input of size (32, 8 * 44100),
i.e. a batch of size 16 of 8 second of audio at 44.1kHz.
We use the same number of zero crossing as `resampy` for this benchmark.
The small delta is probably
due to the different window function used.
On CPU we have:
{run_bench('resample')}
On GPU we have:
{run_bench('resample', device='cuda')}
### FFTConv1d
We compare to `pytorch.nn.functional.conv1d`, on a input of size [32, 32, 10240],
for a convolution with 32 input channels, 64 output channels and various kernel sizes.
On CPU we have:
{run_bench('fftconv')}
On GPU we have:
{run_bench('fftconv', device='cuda')}
### LowPassFilter
We do not compare to anything, but measure the attenuation in dB of a pure tone
at `0.9 * cutoff`, at the `cutoff`, and at `1.1 * cutoff`.
Note that our implementation automatically choses to use FFTConv1d or not when appropriate.
On CPU we have:
{run_bench('lowpass')}
On GPU we have:
{run_bench('lowpass', device='cuda')}
"""
print(template)
if __name__ == "__main__":
main()
|
1651163
|
from __future__ import print_function
import tweepy
import os
from pyspark.sql import *
from pyspark.sql.types import *
from pyspark.sql.functions import col
from pyspark.ml import Pipeline, PipelineModel
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import HashingTF, Tokenizer, StringIndexer, NGram, IDF
MAX_TWEETS = 50
ACCESS_TOKEN = os.environ['TWITTER_CONSUMER_ACCESS_TOKEN']
ACCESS_SECRET = os.environ['TWITTER_CONSUMER_ACCESS_TOKEN_SECRET']
CONSUMER_KEY = os.environ['TWITTER_CONSUMER_KEY']
CONSUMER_SECRET = os.environ['TWITTER_CONSUMER_SECRET']
def initialize():
spark = SparkSession \
.builder \
.appName("search-flight-spark-ml-model") \
.getOrCreate()
sc = spark.sparkContext
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
important_fields = ['id', 'text', 'user']
schema = StructType([
StructField('id', LongType(), False),
StructField('text', StringType(), False),
StructField('username', StringType(), False)
])
tweetsDf = spark.createDataFrame(sc.emptyRDD(), schema)
for tweet in tweepy.Cursor(api.search, q='barajas', rpp=100, lang='en').items(MAX_TWEETS):
json_tweet = {k: tweet._json[k] for k in important_fields}
json_tweet['text'] = json_tweet['text'].replace("'", "").replace("\"", "").replace("\n", "")
tweetDf = spark.createDataFrame([
(json_tweet['id'], json_tweet['text'], json_tweet['user']['name'])
], schema)
tweetsDf = tweetsDf.union(tweetDf)
tweets_df_splitted = tweetsDf.randomSplit([0.75, 0.25], MAX_TWEETS)
training_set = tweets_df_splitted[0]
test_set = tweets_df_splitted[1]
username_indexed = StringIndexer(inputCol="username", outputCol="username_indexed")
tokenizer = Tokenizer(inputCol="text", outputCol="token_raw")
ngram = NGram(inputCol="token_raw", outputCol="ngram", n=2)
hashing_tf = HashingTF(inputCol="ngram", outputCol="tf", numFeatures=20)
idf = IDF(inputCol="tf", outputCol="idf", minDocFreq=2)
lr = LogisticRegression(featuresCol="idf", labelCol="username_indexed")
pipeline = Pipeline(stages=[username_indexed, tokenizer, ngram, hashing_tf, idf, lr])
pipeline_model = pipeline.fit(training_set)
pipeline_model.write().overwrite().save("tweet_traveling_partners_model")
tweet_traveling_partners_prediction = pipeline_model.transform(test_set)
selected = tweet_traveling_partners_prediction.select("username", "text", "probability", "prediction")
for row in selected.collect():
print(row)
spark.stop()
if __name__ == "__main__":
initialize()
|
1651189
|
from django.forms import ModelForm
from common.models import CassandraFamilyMember
class CassandraFamilyMemberForm(ModelForm):
class Meta:
model = CassandraFamilyMember
exclude = ('created_on',)
|
1651210
|
def extractLionMasksReallyProfessionalTranslations(item):
"""
Lion Mask's Really Professional Translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'slime-translation' in item['tags']:
return buildReleaseMessageWithType(item, 'Tensei Shitara Slime Datta Ken', vol, chp, frag=frag, postfix=postfix)
if item['title'].lower().startswith('shaman chapter '):
return buildReleaseMessageWithType(item, 'The Shaman can\'t become a Hero', vol, chp, frag=frag, postfix=postfix)
return False
|
1651256
|
import os
import time
import pickle
from tqdm import tqdm
import librosa
import soundfile as sf
import numpy as np
import oneflow as flow
import utils.data_utils as preprocess
from utils.dataset import trainingDataset
from model.model import Generator, Discriminator
class CycleGANTrainr(object):
def __init__(
self,
logf0s_normalization,
mcep_normalization,
coded_sps_A_norm,
coded_sps_B_norm,
model_checkpoint,
validation_A_dir,
output_A_dir,
validation_B_dir,
output_B_dir,
restart_training_at=None,
):
self.start_epoch = 0
self.num_epochs = 200000
self.mini_batch_size = 10
self.dataset_A = self.loadPickleFile(coded_sps_A_norm)
self.dataset_B = self.loadPickleFile(coded_sps_B_norm)
self.device = flow.device("cuda" if flow.cuda.is_available() else "cpu")
# Speech Parameters
logf0s_normalization = np.load(logf0s_normalization)
self.log_f0s_mean_A = logf0s_normalization["mean_A"]
self.log_f0s_std_A = logf0s_normalization["std_A"]
self.log_f0s_mean_B = logf0s_normalization["mean_B"]
self.log_f0s_std_B = logf0s_normalization["std_B"]
mcep_normalization = np.load(mcep_normalization)
self.coded_sps_A_mean = mcep_normalization["mean_A"]
self.coded_sps_A_std = mcep_normalization["std_A"]
self.coded_sps_B_mean = mcep_normalization["mean_B"]
self.coded_sps_B_std = mcep_normalization["std_B"]
# Generator and Discriminator
self.generator_A2B = Generator().to(self.device)
self.generator_B2A = Generator().to(self.device)
self.discriminator_A = Discriminator().to(self.device)
self.discriminator_B = Discriminator().to(self.device)
# Loss Functions
criterion_mse = flow.nn.MSELoss()
# Optimizer
g_params = list(self.generator_A2B.parameters()) + list(
self.generator_B2A.parameters()
)
d_params = list(self.discriminator_A.parameters()) + list(
self.discriminator_B.parameters()
)
# Initial learning rates
self.generator_lr = 2e-4
self.discriminator_lr = 1e-4
# Learning rate decay
self.generator_lr_decay = self.generator_lr / 200000
self.discriminator_lr_decay = self.discriminator_lr / 200000
# Starts learning rate decay from after this many iterations have passed
self.start_decay = 10000
self.generator_optimizer = flow.optim.Adam(
g_params, lr=self.generator_lr, betas=(0.5, 0.999)
)
self.discriminator_optimizer = flow.optim.Adam(
d_params, lr=self.discriminator_lr, betas=(0.5, 0.999)
)
# To Load save previously saved models
self.modelCheckpoint = model_checkpoint
os.makedirs(self.modelCheckpoint, exist_ok=True)
# Validation set Parameters
self.validation_A_dir = validation_A_dir
self.output_A_dir = output_A_dir
os.makedirs(self.output_A_dir, exist_ok=True)
self.validation_B_dir = validation_B_dir
self.output_B_dir = output_B_dir
os.makedirs(self.output_B_dir, exist_ok=True)
# Storing Discriminatior and Generator Loss
self.generator_loss_store = []
self.discriminator_loss_store = []
self.file_name = "log_store_non_sigmoid.txt"
def adjust_lr_rate(self, optimizer, name="generator"):
if name == "generator":
self.generator_lr = max(0.0, self.generator_lr - self.generator_lr_decay)
for param_groups in optimizer.param_groups:
param_groups["lr"] = self.generator_lr
else:
self.discriminator_lr = max(
0.0, self.discriminator_lr - self.discriminator_lr_decay
)
for param_groups in optimizer.param_groups:
param_groups["lr"] = self.discriminator_lr
def reset_grad(self):
self.generator_optimizer.zero_grad()
self.discriminator_optimizer.zero_grad()
def train(self):
# Training Begins
for epoch in range(self.start_epoch, self.num_epochs):
start_time_epoch = time.time()
# Constants
cycle_loss_lambda = 10
identity_loss_lambda = 5
# Preparing Dataset
n_samples = len(self.dataset_A)
dataset = trainingDataset(
datasetA=self.dataset_A, datasetB=self.dataset_B, n_frames=128
)
train_loader = flow.utils.data.DataLoader(
dataset=dataset,
batch_size=self.mini_batch_size,
shuffle=True,
drop_last=False,
)
pbar = tqdm(enumerate(train_loader))
for i, (real_A, real_B) in enumerate(train_loader):
num_iterations = (n_samples // self.mini_batch_size) * epoch + i
if num_iterations > 10000:
identity_loss_lambda = 0
if num_iterations > self.start_decay:
self.adjust_lr_rate(self.generator_optimizer, name="generator")
self.adjust_lr_rate(self.generator_optimizer, name="discriminator")
real_A = real_A.to(self.device).float()
real_B = real_B.to(self.device).float()
# Generator Loss function
fake_B = self.generator_A2B(real_A)
cycle_A = self.generator_B2A(fake_B)
fake_A = self.generator_B2A(real_B)
cycle_B = self.generator_A2B(fake_A)
identity_A = self.generator_B2A(real_A)
identity_B = self.generator_A2B(real_B)
d_fake_A = self.discriminator_A(fake_A)
d_fake_B = self.discriminator_B(fake_B)
# for the second step adverserial loss
d_fake_cycle_A = self.discriminator_A(cycle_A)
d_fake_cycle_B = self.discriminator_B(cycle_B)
# Generator Cycle loss
cycleLoss = flow.mean(flow.abs(real_A - cycle_A)) + flow.mean(
flow.abs(real_B - cycle_B)
)
# Generator Identity Loss
identiyLoss = flow.mean(flow.abs(real_A - identity_A)) + flow.mean(
flow.abs(real_B - identity_B)
)
# Generator Loss
generator_loss_A2B = flow.mean((1 - d_fake_B) ** 2)
generator_loss_B2A = flow.mean((1 - d_fake_A) ** 2)
# Total Generator Loss
generator_loss = (
generator_loss_A2B
+ generator_loss_B2A
+ cycle_loss_lambda * cycleLoss
+ identity_loss_lambda * identiyLoss
)
self.generator_loss_store.append(generator_loss.item())
# Backprop for Generator
self.reset_grad()
generator_loss.backward()
self.generator_optimizer.step()
# Discriminator Feed Forward
d_real_A = self.discriminator_A(real_A)
d_real_B = self.discriminator_B(real_B)
generated_A = self.generator_B2A(real_B)
d_fake_A = self.discriminator_A(generated_A)
# for the second step adverserial loss
cycled_B = self.generator_A2B(generated_A)
d_cycled_B = self.discriminator_B(cycled_B)
generated_B = self.generator_A2B(real_A)
d_fake_B = self.discriminator_B(generated_B)
# for the second step adverserial loss
cycled_A = self.generator_B2A(generated_B)
d_cycled_A = self.discriminator_A(cycled_A)
# Loss Functions
d_loss_A_real = flow.mean((1 - d_real_A) ** 2)
d_loss_A_fake = flow.mean((0 - d_fake_A) ** 2)
d_loss_A = (d_loss_A_real + d_loss_A_fake) / 2.0
d_loss_B_real = flow.mean((1 - d_real_B) ** 2)
d_loss_B_fake = flow.mean((0 - d_fake_B) ** 2)
d_loss_B = (d_loss_B_real + d_loss_B_fake) / 2.0
# the second step adverserial loss
d_loss_A_cycled = flow.mean((0 - d_cycled_A) ** 2)
d_loss_B_cycled = flow.mean((0 - d_cycled_B) ** 2)
d_loss_A_2nd = (d_loss_A_real + d_loss_A_cycled) / 2.0
d_loss_B_2nd = (d_loss_B_real + d_loss_B_cycled) / 2.0
# Final Loss for discriminator with the second step adverserial loss
d_loss = (d_loss_A + d_loss_B) / 2.0 + (
d_loss_A_2nd + d_loss_B_2nd
) / 2.0
self.discriminator_loss_store.append(d_loss.item())
# Backprop for Discriminator
self.reset_grad()
d_loss.backward()
self.discriminator_optimizer.step()
if (i + 1) % 2 == 0:
pbar.set_description(
"Iter:{} Generator Loss:{:.4f} Discrimator Loss:{:.4f} GA2B:{:.4f} GB2A:{:.4f} G_id:{:.4f} G_cyc:{:.4f} D_A:{:.4f} D_B:{:.4f}".format(
num_iterations,
generator_loss.item(),
d_loss.item(),
generator_loss_A2B,
generator_loss_B2A,
identiyLoss,
cycleLoss,
d_loss_A,
d_loss_B,
)
)
if epoch % 2000 == 0 and epoch != 0:
end_time = time.time()
store_to_file = "Epoch: {} Generator Loss: {:.4f} Discriminator Loss: {}, Time: {:.2f}\n\n".format(
epoch,
generator_loss.item(),
d_loss.item(),
end_time - start_time_epoch,
)
self.store_to_file(store_to_file)
print(
"Epoch: {} Generator Loss: {:.4f} Discriminator Loss: {}, Time: {:.2f}\n\n".format(
epoch,
generator_loss.item(),
d_loss.item(),
end_time - start_time_epoch,
)
)
# Save the Entire model
print("Saving model Checkpoint ......")
store_to_file = "Saving model Checkpoint ......"
self.store_to_file(store_to_file)
self.saveModelCheckPoint(epoch, self.modelCheckpoint)
print("Model Saved!")
if epoch % 2000 == 0 and epoch != 0:
# Validation Set
validation_start_time = time.time()
self.validation_for_A_dir()
self.validation_for_B_dir()
validation_end_time = time.time()
store_to_file = "Time taken for validation Set: {}".format(
validation_end_time - validation_start_time
)
self.store_to_file(store_to_file)
print(
"Time taken for validation Set: {}".format(
validation_end_time - validation_start_time
)
)
def infer(self, PATH="sample"):
num_mcep = 36
sampling_rate = 16000
frame_period = 5.0
n_frames = 128
infer_A_dir = PATH
output_A_dir = PATH
for file in os.listdir(infer_A_dir):
filePath = os.path.join(infer_A_dir, file)
wav, _ = librosa.load(filePath, sr=sampling_rate, mono=True)
wav = preprocess.wav_padding(
wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4
)
f0, timeaxis, sp, ap = preprocess.world_decompose(
wav=wav, fs=sampling_rate, frame_period=frame_period
)
f0_converted = preprocess.pitch_conversion(
f0=f0,
mean_log_src=self.log_f0s_mean_A,
std_log_src=self.log_f0s_std_A,
mean_log_target=self.log_f0s_mean_B,
std_log_target=self.log_f0s_std_B,
)
coded_sp = preprocess.world_encode_spectral_envelop(
sp=sp, fs=sampling_rate, dim=num_mcep
)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (
coded_sp_transposed - self.coded_sps_A_mean
) / self.coded_sps_A_std
coded_sp_norm = np.array([coded_sp_norm])
if flow.cuda.is_available():
coded_sp_norm = flow.tensor(coded_sp_norm).cuda().float()
else:
coded_sp_norm = flow.tensor(coded_sp_norm).float()
coded_sp_converted_norm = self.generator_A2B(coded_sp_norm)
coded_sp_converted_norm = coded_sp_converted_norm.cpu().detach().numpy()
coded_sp_converted_norm = np.squeeze(coded_sp_converted_norm)
coded_sp_converted = (
coded_sp_converted_norm * self.coded_sps_B_std + self.coded_sps_B_mean
)
coded_sp_converted = coded_sp_converted.T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decoded_sp_converted = preprocess.world_decode_spectral_envelop(
coded_sp=coded_sp_converted, fs=sampling_rate
)
wav_transformed = preprocess.world_speech_synthesis(
f0=f0_converted,
decoded_sp=decoded_sp_converted,
ap=ap,
fs=sampling_rate,
frame_period=frame_period,
)
sf.write(
os.path.join(output_A_dir, "convert_" + os.path.basename(file)),
wav_transformed,
sampling_rate,
)
def validation_for_A_dir(self):
num_mcep = 36
sampling_rate = 16000
frame_period = 5.0
n_frames = 128
validation_A_dir = self.validation_A_dir
output_A_dir = self.output_A_dir
print("Generating Validation Data B from A...")
for file in os.listdir(validation_A_dir):
filePath = os.path.join(validation_A_dir, file)
wav, _ = librosa.load(filePath, sr=sampling_rate, mono=True)
wav = preprocess.wav_padding(
wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4
)
f0, timeaxis, sp, ap = preprocess.world_decompose(
wav=wav, fs=sampling_rate, frame_period=frame_period
)
f0_converted = preprocess.pitch_conversion(
f0=f0,
mean_log_src=self.log_f0s_mean_A,
std_log_src=self.log_f0s_std_A,
mean_log_target=self.log_f0s_mean_B,
std_log_target=self.log_f0s_std_B,
)
coded_sp = preprocess.world_encode_spectral_envelop(
sp=sp, fs=sampling_rate, dim=num_mcep
)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (
coded_sp_transposed - self.coded_sps_A_mean
) / self.coded_sps_A_std
coded_sp_norm = np.array([coded_sp_norm])
if flow.cuda.is_available():
coded_sp_norm = flow.tensor(coded_sp_norm).cuda().float()
else:
coded_sp_norm = flow.tensor(coded_sp_norm).float()
coded_sp_converted_norm = self.generator_A2B(coded_sp_norm)
coded_sp_converted_norm = coded_sp_converted_norm.cpu().detach().numpy()
coded_sp_converted_norm = np.squeeze(coded_sp_converted_norm)
coded_sp_converted = (
coded_sp_converted_norm * self.coded_sps_B_std + self.coded_sps_B_mean
)
coded_sp_converted = coded_sp_converted.T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decoded_sp_converted = preprocess.world_decode_spectral_envelop(
coded_sp=coded_sp_converted, fs=sampling_rate
)
wav_transformed = preprocess.world_speech_synthesis(
f0=f0_converted,
decoded_sp=decoded_sp_converted,
ap=ap,
fs=sampling_rate,
frame_period=frame_period,
)
sf.write(
os.path.join(output_A_dir, os.path.basename(file)),
wav_transformed,
sampling_rate,
)
def validation_for_B_dir(self):
num_mcep = 36
sampling_rate = 16000
frame_period = 5.0
n_frames = 128
validation_B_dir = self.validation_B_dir
output_B_dir = self.output_B_dir
print("Generating Validation Data A from B...")
for file in os.listdir(validation_B_dir):
filePath = os.path.join(validation_B_dir, file)
wav, _ = librosa.load(filePath, sr=sampling_rate, mono=True)
wav = preprocess.wav_padding(
wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4
)
f0, timeaxis, sp, ap = preprocess.world_decompose(
wav=wav, fs=sampling_rate, frame_period=frame_period
)
f0_converted = preprocess.pitch_conversion(
f0=f0,
mean_log_src=self.log_f0s_mean_B,
std_log_src=self.log_f0s_std_B,
mean_log_target=self.log_f0s_mean_A,
std_log_target=self.log_f0s_std_A,
)
coded_sp = preprocess.world_encode_spectral_envelop(
sp=sp, fs=sampling_rate, dim=num_mcep
)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (
coded_sp_transposed - self.coded_sps_B_mean
) / self.coded_sps_B_std
coded_sp_norm = np.array([coded_sp_norm])
if flow.cuda.is_available():
coded_sp_norm = flow.tensor(coded_sp_norm).cuda().float()
else:
coded_sp_norm = flow.tensor(coded_sp_norm).float()
coded_sp_converted_norm = self.generator_B2A(coded_sp_norm)
coded_sp_converted_norm = coded_sp_converted_norm.cpu().detach().numpy()
coded_sp_converted_norm = np.squeeze(coded_sp_converted_norm)
coded_sp_converted = (
coded_sp_converted_norm * self.coded_sps_A_std + self.coded_sps_A_mean
)
coded_sp_converted = coded_sp_converted.T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decoded_sp_converted = preprocess.world_decode_spectral_envelop(
coded_sp=coded_sp_converted, fs=sampling_rate
)
wav_transformed = preprocess.world_speech_synthesis(
f0=f0_converted,
decoded_sp=decoded_sp_converted,
ap=ap,
fs=sampling_rate,
frame_period=frame_period,
)
sf.write(
os.path.join(output_B_dir, os.path.basename(file)),
wav_transformed,
sampling_rate,
)
def savePickle(self, variable, fileName):
with open(fileName, "wb") as f:
pickle.dump(variable, f)
def loadPickleFile(self, fileName):
with open(fileName, "rb") as f:
return pickle.load(f)
def store_to_file(self, doc):
doc = doc + "\n"
with open(self.file_name, "a") as myfile:
myfile.write(doc)
def saveModelCheckPoint(self, epoch, PATH):
flow.save(
self.generator_A2B.state_dict(),
os.path.join(PATH, "generator_A2B_%d" % epoch),
)
flow.save(
self.generator_B2A.state_dict(),
os.path.join(PATH, "generator_B2A_%d" % epoch),
)
flow.save(
self.discriminator_A.state_dict(),
os.path.join(PATH, "discriminator_A_%d" % epoch),
)
flow.save(
self.discriminator_B.state_dict(),
os.path.join(PATH, "discriminator_B_%d" % epoch),
)
def loadModel(self, PATH):
self.generator_A2B.load_state_dict(
flow.load(os.path.join(PATH, "generator_A2B"))
)
self.generator_B2A.load_state_dict(
flow.load(os.path.join(PATH, "generator_B2A"))
)
self.discriminator_A.load_state_dict(
flow.load(os.path.join(PATH, "discriminator_A"))
)
self.discriminator_B.load_state_dict(
flow.load(os.path.join(PATH, "discriminator_B"))
)
|
1651271
|
import unittest
from nose.config import Config
from nose.plugins.skip import Skip, SkipTest
from nose.result import TextTestResult
from StringIO import StringIO
from nose.result import _TextTestResult
from optparse import OptionParser
try:
# 2.7+
from unittest.runner import _WritelnDecorator
except ImportError:
from unittest import _WritelnDecorator
class TestSkipPlugin(unittest.TestCase):
def test_api_present(self):
sk = Skip()
sk.addOptions
sk.configure
sk.prepareTestResult
def test_prepare_patches_result(self):
stream = _WritelnDecorator(StringIO())
res = _TextTestResult(stream, 0, 1)
sk = Skip()
sk.prepareTestResult(res)
res._orig_addError
res._orig_printErrors
res._orig_wasSuccessful
res.skipped
self.assertEqual(res.errorClasses,
{SkipTest: (res.skipped, 'SKIP', False)})
# result w/out print works too
res = unittest.TestResult()
sk = Skip()
sk.prepareTestResult(res)
res._orig_addError
res.skipped
self.assertEqual(res.errorClasses,
{SkipTest: (res.skipped, 'SKIP', False)})
def test_patched_result_handles_skip(self):
res = unittest.TestResult()
sk = Skip()
sk.prepareTestResult(res)
class TC(unittest.TestCase):
def test(self):
raise SkipTest('skip me')
test = TC('test')
test(res)
assert not res.errors, "Skip was not caught: %s" % res.errors
assert res.skipped
assert res.skipped[0][0] is test
def test_patches_only_when_needed(self):
class NoPatch(unittest.TestResult):
def __init__(self):
self.errorClasses = {}
res = NoPatch()
sk = Skip()
sk.prepareTestResult(res)
assert not hasattr(res, '_orig_addError'), \
"Skip patched a result class it didn't need to patch"
def test_skip_output(self):
class TC(unittest.TestCase):
def test(self):
raise SkipTest('skip me')
stream = _WritelnDecorator(StringIO())
res = _TextTestResult(stream, 0, 1)
sk = Skip()
sk.prepareTestResult(res)
test = TC('test')
test(res)
assert not res.errors, "Skip was not caught: %s" % res.errors
assert res.skipped
res.printErrors()
out = stream.getvalue()
print out
assert out
assert out.strip() == "S"
assert res.wasSuccessful()
def test_skip_output_verbose(self):
class TC(unittest.TestCase):
def test(self):
raise SkipTest('skip me too')
stream = _WritelnDecorator(StringIO())
res = _TextTestResult(stream, 0, verbosity=2)
sk = Skip()
sk.prepareTestResult(res)
test = TC('test')
test(res)
assert not res.errors, "Skip was not caught: %s" % res.errors
assert res.skipped
res.printErrors()
out = stream.getvalue()
print out
assert out
assert ' ... SKIP' in out
assert 'skip me too' in out
def test_enabled_by_default(self):
sk = Skip()
assert sk.enabled, "Skip was not enabled by default"
def test_can_be_disabled(self):
parser = OptionParser()
sk = Skip()
sk.addOptions(parser)
options, args = parser.parse_args(['--no-skip'])
sk.configure(options, Config())
assert not sk.enabled, "Skip was not disabled by noSkip option"
if __name__ == '__main__':
unittest.main()
|
1651274
|
import argparse
import json
import os
import time
import torch
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from lab import lab
from utils import datamanager as dm
from utils.exp_log import Logger
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import TensorDataset
parser = argparse.ArgumentParser(description='Run experiment.')
parser.add_argument('-e', '--experiment', default='cnn/cnn1_exp', help="experiment definition (json file)")
parser.add_argument('-d', '--dataset', default='hapt', help="from ['activemiles', 'hhar', 'fusion']")
parser.add_argument('-f', '--nfolds', default=5, help="number of folds", type=int)
parser.add_argument('-s', '--save', dest='save', action='store_true')
class Experiment:
def __init__(self, exp_def_file, dataset, n_folds, save_log):
self.exp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'exp', exp_def_file + '.json')
self.exp_name = exp_def_file.split('/')[-1]
self.dataset = dataset
self.n_folds = n_folds
self.k_fold = 1
self.save_log = save_log
self.logger = None
@staticmethod
def __load_data__(dataset, gyro, preprocess):
return dm.load_dataset(dataset, seq_length=100, gyro=gyro, preprocess=preprocess)
def update(self, **kwargs):
return self.logger.update(self.k_fold, **kwargs)
def run(self):
with open(self.exp_path, 'r') as exp_file:
experiment_definition = json.load(exp_file)
gyro = experiment_definition["gyroscope"]
arch_type = experiment_definition["type"]
name = experiment_definition["name"]
preprocess = experiment_definition["preprocess"]
log_path = os.path.dirname('{}{}..{}log{}'.format(os.path.dirname(os.path.abspath(__file__)),
os.sep, os.sep, os.sep))
self.logger = Logger(exp_name=name, dataset=self.dataset, n_folds=self.n_folds,
save_log=self.save_log, log_path=log_path)
ds = self.__load_data__(self.dataset, gyro=gyro, preprocess=preprocess)
gyro = gyro and ds.x_gyr_train is not None
x, y = np.concatenate((ds.x_acc_train, ds.x_gyr_train), axis=2) if gyro else ds.x_acc_train, ds.y_train
x_ts_np, y_ts_np = np.concatenate((ds.x_acc_test, ds.x_gyr_test), axis=2) if gyro else ds.x_acc_test, ds.y_test
print("Test: features shape, labels shape, mean, standard deviation")
print(x_ts_np.shape, y_ts_np.shape, np.mean(x_ts_np), np.std(x_ts_np))
if arch_type == 'cnn':
x_ts_np = np.reshape(x_ts_np, newshape=(x_ts_np.shape[0], 1, x_ts_np.shape[1], x_ts_np.shape[2]))
elif arch_type == 'dbn':
x = np.reshape(x, newshape=(x.shape[0], x.shape[1] * x.shape[2]))
x_ts_np = np.reshape(x_ts_np, newshape=(x_ts_np.shape[0], x_ts_np.shape[1] * x_ts_np.shape[2]))
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print(f'Using device: {device}')
print('Test: features shape, labels shape, mean, standard deviation')
print(x_ts_np.shape, y_ts_np.shape, np.mean(x_ts_np), np.std(x_ts_np))
x_ts = torch.from_numpy(x_ts_np).float().to(device)
y_ts = torch.from_numpy(y_ts_np).long().to(device)
n_out = np.unique(y).size
skf = StratifiedKFold(n_splits=self.n_folds, shuffle=True, random_state=0)
self.k_fold = 1
row = 'fold, time, best_epoch, best_accuracy, best_validation_f1, best_test_f1\n'
for tr_i, va_i in skf.split(X=x, y=y):
x_tr, x_va = x[tr_i], x[va_i]
y_tr, y_va = y[tr_i], y[va_i]
print("Training: features shape, labels shape, mean, standard deviation")
print(x_tr.shape, y_tr.shape, np.mean(x_tr), np.std(x_tr))
print("Validation: features shape, labels shape, mean, standard deviation")
print(x_va.shape, y_va.shape, np.mean(x_va), np.std(x_va))
if arch_type == 'dbn':
lab_experiment = lab.build_experiment(self.exp_path, n_out, seed=0)
start = time.time()
lab_experiment.fit(x_tr, y_tr)
end = time.time()
elapsed = end - start
y_pred_va = lab_experiment.predict(x_va)
best_validation_f1 = f1_score(y_va, y_pred_va, average='weighted')
epochs = lab_experiment.n_epochs
# Test
y_pred = lab_experiment.predict(x_ts_np)
best_accuracy = accuracy_score(y_ts_np, y_pred)
best_test_f1 = f1_score(y_ts_np, y_pred, average='weighted')
row += f'{self.k_fold},{elapsed},{epochs},{best_accuracy},{best_validation_f1},{best_test_f1}\n'
if self.save_log:
log_file_name = f'{self.dataset}_{self.exp_name}.csv'
log_file = os.path.join(log_path, log_file_name)
with open(log_file, "w") as text_file:
text_file.write(row)
else:
if arch_type == 'cnn':
x_tr = np.reshape(x_tr, newshape=(x_tr.shape[0], 1, x_tr.shape[1], x_tr.shape[2]))
x_va = np.reshape(x_va, newshape=(x_va.shape[0], 1, x_va.shape[1], x_va.shape[2]))
x_tr = torch.from_numpy(x_tr).float().to(device)
y_tr = torch.from_numpy(y_tr).long().to(device)
x_va = torch.from_numpy(x_va).float().to(device)
y_va = torch.from_numpy(y_va).long().to(device)
print(np.unique(y_tr.cpu().numpy(), return_counts=True))
print(np.unique(y_va.cpu().numpy(), return_counts=True))
print(np.unique(y_ts.cpu().numpy(), return_counts=True))
lab_experiment = lab.build_experiment(self.exp_path, n_out, seed=0)
print(lab_experiment.model)
lab_experiment.train(train_data=TensorDataset(x_tr, y_tr),
validation_data=TensorDataset(x_va, y_va),
test_data=TensorDataset(x_ts, y_ts),
update_callback=self.update)
self.k_fold += 1
if __name__ == "__main__":
args = parser.parse_args()
experiment = Experiment(args.experiment, args.dataset, args.nfolds, args.save)
experiment.run()
|
1651288
|
from uio import FileIO
from component import Component, components
class FileSystem:
def __init__(self, address):
self.fs = Component(address, components()[address])
self.address = address
self.readonly = self.fs.isReadOnly()
self.cwd = "/"
# noinspection PyUnusedLocal
def mount(self, readonly, mkfs):
self.readonly = self.fs.isReadOnly() or readonly
def umount(self):
pass
def ilistdir(self, dir):
for name in self.fs.list(dir):
if self.fs.isDirectory(dir + "/" + name):
yield (name, 0x4000, 0, -1)
else:
size = self.fs.size(name)
yield (name, 0x8000, 0, size)
def chdir(self, dir):
if not self.fs.isDirectory(dir):
raise OSError(1)
self.cwd = dir
def getcwd(self):
return self.cwd
def mkdir(self, path):
if self.readonly:
raise OSError(1)
result = self.fs.makeDirectory(path)
if not result:
exists = self.fs.exists(path)
if self.fs.isDirectory(path):
raise OSError(1)
elif exists: # file
raise OSError(1)
raise OSError(1)
def remove(self, path):
if self.readonly:
raise OSError(1)
self.fs.remove(path)
def rename(self, old_path, new_path):
if self.readonly:
raise OSError(1)
result = self.fs.rename(old_path, new_path)
if not result:
raise OSError(1)
def rmdir(self, path):
if self.readonly:
raise OSError(1)
if not self.fs.isDirectory(path):
if self.fs.exists(path):
# is file
raise OSError(1)
raise OSError(1)
result = self.fs.remove(path)
if not result:
raise OSError(1)
def stat(self, path):
if not self.fs.exists(path):
raise OSError(1)
is_dir = self.fs.isDirectory(path)
size = self.fs.size(path) if not is_dir else 0
mtime = self.fs.lastModified(path)
return (
0x4000 if is_dir else 0x8000, # st_mode
0, # st_ino
0, # dev
0, # nlink
0, # uid: root
0, # gid: root
size, # size
mtime, # atime
mtime, # mtime
mtime, # ctime
)
# noinspection PyUnusedLocal
def statvfs(self, path):
return (
0, # f_bsize
0, # f_frsize
0, # f_blocks
0, # f_bfree
0, # f_bavail
0, # f_files
0, # f_ffree
0, # f_favail
0, # f_flag
256, # f_namemax
)
def open(self, file, mode):
# TODO: nomalize mode
return FileIO(self.fs.address, file, mode)
def __repr__(self):
return "<FileSystem: {!r}>".format(self.address)
|
1651322
|
from django.core.exceptions import ValidationError
from django.core.validators import EmailValidator, RegexValidator
from lego.utils.validators import ReservedNameValidator
def validate_email_address(email_address):
if email_address.email != email_address.email.lower():
raise ValidationError("Email is not lowercased")
if email_address.is_assigned():
raise ValidationError("The address is already assigned")
def validate_email_address_content(email_address):
"""Make sure we only create valid emails."""
regex_validator = RegexValidator(regex=EmailValidator.user_regex)
reserved_valdator = ReservedNameValidator()
regex_validator(email_address.email)
reserved_valdator(email_address.email)
|
1651349
|
from .constants import is_valid_uint256
from .memory import Memory
from .stack import Stack
class InvalidCalldataAccess(Exception):
...
# see yellow paper section 9.4.3
def valid_jump_destinations(code: bytes) -> set[int]:
from .opcodes import JUMPDEST, PUSH1, PUSH32
jumpdests = set()
i = 0
while i < len(code):
current_op = code[i]
if current_op == JUMPDEST.opcode:
jumpdests.add(i)
elif PUSH1.opcode <= current_op <= PUSH32.opcode:
i += current_op - PUSH1.opcode + 1
i += 1
return jumpdests
class Calldata:
def __init__(self, data=bytes()) -> None:
self.data = data
def read_byte(self, offset: int) -> int:
if offset < 0:
raise InvalidCalldataAccess({"offset": offset})
return self.data[offset] if offset < len(self.data) else 0
def read_word(self, offset: int) -> int:
return int.from_bytes(
[self.read_byte(x) for x in range(offset, offset + 32)], "big"
)
def __len__(self) -> int:
return len(self.data)
class ExecutionContext:
def __init__(
self, code=bytes(), pc=0, stack=None, memory=None, calldata=None
) -> None:
self.code = code
self.stack = stack if stack else Stack()
self.memory = memory if memory else Memory()
self.pc = pc
self.stopped = False
self.returndata = bytes()
self.jumpdests = valid_jump_destinations(code)
self.calldata = calldata if calldata else Calldata()
def set_return_data(self, offset: int, length: int) -> None:
self.stopped = True
self.returndata = self.memory.load_range(offset, length)
def stop(self) -> None:
self.stopped = True
def read_code(self, num_bytes) -> int:
"""
Returns the next num_bytes from the code buffer (at index pc) as an integer and advances pc by num_bytes.
"""
value = int.from_bytes(
self.code[self.pc : self.pc + num_bytes], byteorder="big"
)
self.pc += num_bytes
return value
def set_program_counter(self, pc: int) -> None:
self.pc = pc
def __str__(self) -> str:
return "stack: " + str(self.stack) + "\nmemory: " + str(self.memory)
def __repr__(self) -> str:
return str(self)
|
1651354
|
alg.aggregation (
[ "l_orderkey", "o_orderdate", "o_shippriority" ],
[ ( Reduction.SUM, "revenue", "sum_rev" ) ],
alg.map (
"revenue",
scal.MulExpr (
scal.AttrExpr ( "l_extendedprice" ),
scal.SubExpr (
scal.ConstExpr ( "1.0f", Type.FLOAT ),
scal.AttrExpr ( "l_discount" )
)
),
alg.join (
( "o_orderkey", "l_orderkey" ),
alg.join (
( "c_custkey", "o_custkey" ),
alg.selection (
scal.EqualsExpr (
scal.AttrExpr ( "c_mktsegment" ),
scal.ConstExpr ( "BUILDING", Type.STRING )
),
alg.scan ( "customer" )
),
alg.selection (
scal.SmallerExpr (
scal.AttrExpr ( "o_orderdate" ),
scal.ConstExpr ( "19950315", Type.DATE )
),
alg.scan ( "orders" )
)
),
alg.selection (
scal.LargerExpr (
scal.AttrExpr ( "l_shipdate" ),
scal.ConstExpr ( "19950315", Type.DATE )
),
alg.scan ( "lineitem" )
)
)
)
)
|
1651401
|
import numpy as np
import pandas as pd
import tempfile
import re
import pkg_resources
weblogo_version = pkg_resources.get_distribution('weblogo').version
try:
if weblogo_version < "3.7":
import weblogolib as wl
else:
import weblogo as wl
except ModuleNotFoundError:
import weblogo as wl
from seqlogo import utils
_sizes = {
'small': 3.54,
'medium': 5,
'large': 7.25,
'xlarge': 10.25
}
def seqlogo(pm, ic_scale = True, color_scheme = None, size = 'medium',
format = 'svg', filename = None, **kwargs):
"""The plotting method of the `seqlogo` distribution. Depends on using
any of the 3 classes exposed by `seqlogo`:
* `seqlogo.Ppm`
* `seqlogo.Pwm`
* `seqlogo.CompletePm`
Given an `M x N` PM matrix, where `M` is the number of positions and `N`
is the number of letters, calculate and render a WebLogo-like motif plot.
When `ic_scale` is `True`, the height of each column is proportional to
its information content. The y-axis label and scale will reflect information
content. Otherwise, all columns have the same height and y-axis label will
reflect "bits"
Args:
pm (`seqlogo.Pm` subclass): a pre-formatted Pm instance
ic_scale (bool): whether or not to scale the column heights (default: True)
size (str): small (3.54 in), medium (5 in), large (7.25 in), xlarge (10.25) (default: 'medium')
format (str): desired matplotlib supported output format Options are 'eps', 'pdf', 'png', 'jpeg', and 'svg' (default: "svg")
filename (None | str): Name of the file to save the figure. If `None`:
the figure will not be saved. (default: None)
color_scheme (str): the color scheme to use for weblogo:
'auto': None
'monochrome': all black
'base pairing': (NA Only) TAU are orange, GC are blue
'classic': (NA Only) classic WebLogo color scheme for nucleic acids
'hydrophobicity': (AA only) Color based on hydrophobicity
'chemistry': (AA only) Color based on chemical properties
'charge': (AA Only) Color based on charge
**kwargs: all additional keyword arguments found at http://weblogo.threeplusone.com/manual.html
"""
# Ensure color scheme matches the alphabet
if pm._alphabet_type in utils._NA_ALPHABETS:
if color_scheme is None:
color_scheme = 'classic'
if color_scheme not in utils.NA_COLORSCHEMES:
raise ValueError('{} color_scheme selected is not an allowed nucleic acid color scheme'.format(color_scheme))
elif pm._alphabet_type in utils._AA_ALPHABETS:
if color_scheme is None:
color_scheme = 'hydrophobicity'
if color_scheme not in utils.AA_COLORSCHEMES:
raise ValueError('{} color_scheme selected is not an allowed amino acid color scheme'.format(color_scheme))
color_scheme = wl.std_color_schemes[color_scheme]
# Setup the format writer
out_format = wl.formatters[format]
# Prepare the logo size
stack_width = (_sizes[size]/pm.length) * 72
# Initialize the options
if ic_scale:
unit_name = 'bits'
else:
unit_name = 'probability'
options = wl.LogoOptions(unit_name = unit_name, color_scheme = color_scheme,
show_fineprint = False, stack_width = stack_width, **kwargs)
#Initialize the output format
logo_format = wl.LogoFormat(pm, options)
out = out_format(pm, logo_format)
# Create the file if the user supplied an filename
if filename:
with open('{}'.format(filename), 'wb') as out_file:
out_file.write(out)
if format == 'svg':
svg_hash = hash(out)
out = re.sub(rb'("#?glyph.*?)(")', rb'\1 %s\2' % str(svg_hash).encode(), out)
try:
if get_ipython():
import IPython.display as ipd
if format == 'svg':
return ipd.SVG(out)
elif format in ('png', 'jpeg', 'svg'):
return ipd.Image(out)
else:
raise ValueError('{} format not supported for plotting in console'.format(format))
except NameError:
if filename is None:
raise ValueError('If not in an IPython/Jupyter console and no filename is given, nothing will be rendered')
|
1651486
|
import re
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UniqueUserEmailField(forms.EmailField):
"""
An EmailField which only is valid if no User has that email.
"""
def validate(self, value):
super(forms.EmailField, self).validate(value)
try:
User.objects.get(email = value)
raise forms.ValidationError("Email already exists")
except User.MultipleObjectsReturned:
raise forms.ValidationError("Email already exists")
except User.DoesNotExist:
pass
class ExtendedUserCreationForm(UserCreationForm):
"""
Extends the built in UserCreationForm in several ways:
* Adds an email field, which uses the custom UniqueUserEmailField,
that is, the form does not validate if the email address already exists
in the User table.
* The username field is generated based on the email, and isn't visible.
* first_name and last_name fields are added.
* Data not saved by the default behavior of UserCreationForm is saved.
"""
username = forms.CharField(required = False, max_length = 30)
email = UniqueUserEmailField(required = True, label = 'Email address')
first_name = forms.CharField(required = True, max_length = 30)
last_name = forms.CharField(required = True, max_length = 30)
def __init__(self, *args, **kwargs):
"""
Changes the order of fields, and removes the username field.
"""
super(UserCreationForm, self).__init__(*args, **kwargs)
self.fields.keyOrder = ['email', 'first_name', 'last_name',
'<PASSWORD>', '<PASSWORD>']
def __generate_username(self, email):
"""
A simple way of deriving a username from an email address.
Hat tip: http://bit.ly/eIUR5R
>>> User.objects.all().order_by('-id')[0].id
1
>>> self.__generate_username("<EMAIL>")
abcabc2
>>> self.__generate_username("<EMAIL>")
heysup3
"""
# TODO: Something more efficient?
highest_user_id = User.objects.all().order_by('-id')[0].id
leading_part_of_email = email.split('@',1)[0]
leading_part_of_email = re.sub(r'[^a-zA-Z0-9+]', '',
leading_part_of_email)
truncated_part_of_email = leading_part_of_email[:3] \
+ leading_part_of_email[-3:]
derived_username = truncated_part_of_email + str(highest_user_id+1)
return derived_username
def clean(self, *args, **kwargs):
"""
Normal cleanup + username generation.
"""
cleaned_data = super(UserCreationForm, self).clean(*args, **kwargs)
if cleaned_data.has_key('email'):
cleaned_data['username'] = self.__generate_username(
cleaned_data['email'])
return cleaned_data
def save(self, commit=True):
"""
Saves the email, first_name and last_name properties, after the normal
save behavior is complete.
"""
user = super(UserCreationForm, self).save(commit)
if user:
user.email = self.cleaned_data['email']
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.set_password(self.cleaned_data['<PASSWORD>'])
if commit:
user.save()
return user
|
1651507
|
import os
import logging
import datetime
import unittest
import uuid
from celery_connectors.utils import ev
from celery_connectors.publisher import Publisher
from celery_connectors.kombu_subscriber import KombuSubscriber
log = logging.getLogger("base_test")
class BaseTestCase(unittest.TestCase):
debug = False
exchange_name = ev("TEST_EXCHANGE", "test.events")
queue_name = ev("TEST_QUEUE", "test.events.conversions")
routing_key = ev("TEST_ROUTING_KEY", "test.events.conversions")
exchange = None
queue = None
rabbitmq_auth_url = ev("TEST_RABBITMQ_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//")
redis_auth_url = ev("TEST_REDIS_BROKER_URL", "redis://localhost:6379/0")
pub_auth_url = rabbitmq_auth_url
sub_auth_url = rabbitmq_auth_url
pub_ssl_options = {}
sub_ssl_options = {}
pub_attrs = {}
sub_attrs = {}
pub_serializer = "json"
sub_serializer = "application/json"
test_body = {}
test_id = str(uuid.uuid4()).replace("-", "")
test_body = {"account_id": 123,
"subscription_id": 456,
"stripe_id": 789,
"product_id": "ABC"}
pub_msgs = []
sub_msgs = []
last_pub_msg = None
last_sub_msg = None
last_sub_callback = None
def setUp(self):
if self.debug:
print("setUp")
# state trips in the custom classes
os.environ["TEST_STOP_DONE"] = "1"
self.last_pub_msg = None
self.last_sub_msg = None
self.pub = None
self.sub = None
self.pub_msgs = []
self.sub_msgs = []
self.exchange_name = ev("TEST_EXCHANGE", "test.events")
self.routing_key = ev("TEST_ROUTING_KEY", "test.events.conversions")
self.queue_name = ev("TEST_QUEUE", "test.events.conversions")
self.exchange = None
self.queue = None
self.last_sub_callback = None
# end of setUp
def tearDown(self):
if self.debug:
print("tearDown")
self.pub = None
self.sub = None
self.exchange = None
self.queue = None
self.last_sub_callback = None
# end of tearDown
def handle_message(self,
body,
msg):
log.info(("test={} BASETEST handle_message got "
"body={} msg={}")
.format(self.test_id,
body,
msg))
if msg:
msg.ack()
# end of handle_message
def connect_pub(self,
auth_url=None,
ssl_options={},
attrs={}):
use_auth_url = self.pub_auth_url
use_ssl_options = self.pub_ssl_options
use_pub_attrs = self.pub_attrs
if auth_url:
use_auth_url = auth_url
if len(ssl_options) > 0:
use_ssl_options = ssl_options
if len(ssl_options) > 0:
use_pub_attrs = use_pub_attrs
self.pub = Publisher("test-pub",
use_auth_url,
use_ssl_options)
# end of connect_pub
def connect_sub(self,
auth_url=None,
ssl_options={},
attrs={}):
use_auth_url = self.sub_auth_url
use_ssl_options = self.sub_ssl_options
use_sub_attrs = self.sub_attrs
if auth_url:
use_auth_url = auth_url
if len(ssl_options) > 0:
use_ssl_options = ssl_options
if len(ssl_options) > 0:
use_sub_attrs = use_sub_attrs
self.sub = KombuSubscriber("test-sub",
use_auth_url,
use_ssl_options)
# end of connect_sub
def build_msg(self,
test_values={}):
body = {"test_id": self.test_id,
"date": datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S"),
"msg_id": str(uuid.uuid4()).replace("-", ""),
"test_values": test_values}
return body
# end of build_msg
def consume(self,
callback=None,
queue=queue,
exchange=exchange,
routing_key=routing_key,
serializer="application/json",
heartbeat=60,
time_to_wait=5.0,
forever=False,
silent=True):
if not callback:
log.error(("Subscriber - Requires a callback handler for message"
"processing with signature definition: "
"def handle_message(self, body, message):")
.format(self.sub_auth_url,
self.sub_ssl_options))
assert(callback)
# if not connected, just connect with defaults
if not self.sub:
self.connect_sub()
if not self.sub:
log.error(("Subscriber - Failed to connect "
"to broker={} ssl={}")
.format(self.sub_auth_url,
self.sub_ssl_options))
assert(self.sub)
if self.sub:
self.sub.consume(callback=callback,
queue=queue,
exchange=exchange,
routing_key=routing_key,
serializer=serializer,
heartbeat=heartbeat,
forever=forever,
time_to_wait=time_to_wait,
silent=silent)
else:
log.info("Sub is None already - client should not call consume")
# end of consume
def publish(self,
body=None,
exchange=exchange,
routing_key=routing_key,
queue=queue,
priority=0,
ttl=None,
serializer="json",
retry=True,
silent=True):
# if no body for the message
if not body:
log.error(("Publisher - requires argument: "
"body=some_dictionary to test"))
assert(body)
# if not connected, just connect with defaults
if not self.pub:
self.connect_pub()
if not self.pub:
log.error(("Publisher - Failed to connect "
"to broker={} ssl={}")
.format(self.pub_auth_url,
self.pub_ssl_options))
assert(self.pub)
if self.pub:
self.pub.publish(body=body,
exchange=exchange,
routing_key=routing_key,
queue=queue,
serializer=serializer,
priority=priority,
ttl=ttl,
retry=retry,
silent=silent)
else:
log.info("Pub is None already - client should not call publish")
# end of publish
# end of BaseTestCase
|
1651556
|
import os
import pathlib
import platform
import uqbar.io
import supriya
def find(sclang_path=None):
"""Find the ``sclang`` executable.
The following paths, if defined, will be searched (prioritised as ordered):
1. The absolute path ``sclang_path``
2. The environment variable ``SCLANG_PATH``
3. ``sclang_path`` if defined in Supriya's configuration file
4. The user's ``PATH``
5. Common installation directories of the SuperCollider application.
Returns a path to the ``sclang`` executable.
Raises ``RuntimeError`` if no path is found.
"""
sclang_path = pathlib.Path(
sclang_path
or os.environ.get("SCLANG_PATH")
or supriya.config.get("core", "sclang_path", fallback=None)
or "sclang"
)
if sclang_path.is_absolute() and uqbar.io.find_executable(sclang_path):
return sclang_path
sclang_path_candidates = uqbar.io.find_executable(sclang_path.name)
if sclang_path_candidates:
return pathlib.Path(sclang_path_candidates[0])
if platform.system() == "Darwin":
for path in [
pathlib.Path("/Applications/SuperCollider.app/Contents/MacOS/sclang"),
pathlib.Path(
"/Applications/SuperCollider/SuperCollider.app/Contents/MacOS/sclang"
),
]:
if path.exists():
return path
elif platform.system() == "Linux":
for path in [
pathlib.Path("/usr/bin/sclang"),
pathlib.Path("/usr/local/bin/sclang"),
]:
if path.exists():
return path
raise RuntimeError("Failed to locate sclang")
|
1651578
|
from django.apps import AppConfig
class ServeConfig(AppConfig):
name = 'daiquiri.serve'
label = 'daiquiri_serve'
verbose_name = 'Daiquiri Serve'
|
1651613
|
import os
from pathlib import Path
import pytest
from unit.applications.proto import TestApplicationProto
class TestStaticVariables(TestApplicationProto):
prerequisites = {}
@pytest.fixture(autouse=True)
def setup_method_fixture(self, temp_dir):
os.makedirs(temp_dir + '/assets/dir')
os.makedirs(temp_dir + '/assets/d$r')
Path(temp_dir + '/assets/index.html').write_text('0123456789')
Path(temp_dir + '/assets/dir/file').write_text('file')
Path(temp_dir + '/assets/d$r/file').write_text('d$r')
self._load_conf(
{
"listeners": {"*:7080": {"pass": "routes"}},
"routes": [{"action": {"share": temp_dir + "/assets$uri"}}],
}
)
def update_share(self, share):
if isinstance(share, list):
return self.conf(share, 'routes/0/action/share')
return self.conf('"' + share + '"', 'routes/0/action/share')
def test_static_variables(self, temp_dir):
assert self.get(url='/index.html')['status'] == 200
assert self.get(url='/d$r/file')['status'] == 200
assert 'success' in self.update_share('$uri')
assert self.get(url=temp_dir + '/assets/index.html')['status'] == 200
assert 'success' in self.update_share(temp_dir + '/assets${uri}')
assert self.get(url='/index.html')['status'] == 200
def test_static_variables_array(self, temp_dir):
assert 'success' in self.update_share(
[temp_dir + '/assets$uri', '$uri']
)
assert self.get(url='/dir/file')['status'] == 200
assert self.get(url=temp_dir + '/assets/index.html')['status'] == 200
assert self.get(url='/blah')['status'] == 404
assert 'success' in self.conf(
{
"share": [temp_dir + '/assets$uri', '$uri'],
"fallback": {"return": 201},
},
'routes/0/action',
)
assert self.get(url='/dir/file')['status'] == 200
assert self.get(url=temp_dir + '/assets/index.html')['status'] == 200
assert self.get(url='/dir/blah')['status'] == 201
def test_static_variables_buildin_start(self, temp_dir):
assert 'success' in self.update_share('$uri/assets/index.html')
assert self.get(url=temp_dir)['status'] == 200
def test_static_variables_buildin_mid(self, temp_dir):
assert 'success' in self.update_share(temp_dir + '$uri/index.html')
assert self.get(url='/assets')['status'] == 200
def test_static_variables_buildin_end(self):
assert self.get(url='/index.html')['status'] == 200
def test_static_variables_invalid(self, temp_dir):
assert 'error' in self.update_share(temp_dir + '/assets/d$r$uri')
assert 'error' in self.update_share(temp_dir + '/assets/$$uri')
assert 'error' in self.update_share(
[temp_dir + '/assets$uri', temp_dir + '/assets/dir', '$$uri']
)
|
1651626
|
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import six
import collections
import copy
from torch.utils.data import Dataset
from collections import defaultdict
import h5py
from scipy.stats import norm
import os
SKIP_TYPES = six.string_types
class SimpleDataset(Dataset):
'''
Assuming X and y are numpy arrays and
with X.shape = (n_samples, n_features)
y.shape = (n_samples,)
'''
def __init__(self, X, y=None):
self.X = X
self.y = y
def __len__(self):
return (len(self.X))
def __getitem__(self, i):
data = self.X[i]
#data = np.array(data).astype(np.float32)
if self.y is not None:
return dict(input=data, label=self.y[i])
else:
return dict(input=data)
class FastTensorDataLoader:
"""
A DataLoader-like object for a set of tensors that can be much faster than
TensorDataset + DataLoader because dataloader grabs individual indices of
the dataset and calls cat (slow).
Source: https://discuss.pytorch.org/t/dataloader-much-slower-than-manual-batching/27014/6
"""
def __init__(self, *tensors, tensor_names, batch_size=32, shuffle=False):
"""
Initialize a FastTensorDataLoader.
:param *tensors: tensors to store. Must have the same length @ dim 0.
:param tensor_names: name of tensors (for feed_dict)
:param batch_size: batch size to load.
:param shuffle: if True, shuffle the data *in-place* whenever an
iterator is created out of this object.
:returns: A FastTensorDataLoader.
"""
assert all(t.shape[0] == tensors[0].shape[0] for t in tensors)
self.tensors = tensors
self.tensor_names = tensor_names
self.dataset_len = self.tensors[0].shape[0]
self.batch_size = batch_size
self.shuffle = shuffle
# Calculate # batches
n_batches, remainder = divmod(self.dataset_len, self.batch_size)
if remainder > 0:
n_batches += 1
self.n_batches = n_batches
def __iter__(self):
if self.shuffle:
r = torch.randperm(self.dataset_len)
self.tensors = [t[r] for t in self.tensors]
self.i = 0
return self
def __next__(self):
if self.i >= self.dataset_len:
raise StopIteration
batch = {}
for k in range(len(self.tensor_names)):
batch.update({self.tensor_names[k]: self.tensors[k][self.i:self.i+self.batch_size]})
self.i += self.batch_size
return batch
def __len__(self):
return self.n_batches
'''standardize_dataset function is from utils_jared.py'''
def standardize_dataset(dataset, offset, scale):
norm_ds = copy.deepcopy(dataset)
norm_ds['x'] = (norm_ds['x'] - offset) / scale
return norm_ds
'''load_datasets function is from utils_jared.py'''
def load_datasets(dataset_file):
datasets = defaultdict(dict)
with h5py.File(dataset_file, 'r') as fp:
for ds in fp:
for array in fp[ds]:
datasets[ds][array] = fp[ds][array][:]
return datasets
def load_cox_gaussian_data():
dataset_file = os.path.join(os.path.dirname(__file__),
'datasets/gaussian_survival_data.h5')
datasets = defaultdict(dict)
with h5py.File(dataset_file, 'r') as fp:
for ds in fp:
for array in fp[ds]:
datasets[ds][array] = fp[ds][array][:]
return datasets
def prepare_data(x, label):
if isinstance(label, dict):
e, t = label['e'], label['t']
# Sort training data for accurate partial likelihood calculation.
sort_idx = np.argsort(t)[::-1]
x = x[sort_idx]
e = e[sort_idx]
t = t[sort_idx]
#return x, {'e': e, 't': t} this is for parse_data(x, label); see the third line in the parse_data function.
#return {'x': x, 'e': e, 't': t}
return x, e, t
def probe_infnan(v, name, extras={}):
nps = torch.isnan(v)
s = nps.sum().item()
if s > 0:
print('>>> {} >>>'.format(name))
print(name, s)
print(v[nps])
for k, val in extras.items():
print(k, val, val.sum().item())
quit()
class Identity(nn.Module):
def forward(self, *args):
if len(args) == 1:
return args[0]
return args
def get_batcnnorm(bn, nr_features=None, nr_dims=1):
if isinstance(bn, nn.Module):
return bn
assert 1 <= nr_dims <= 3
if bn in (True, 'async'):
clz_name = 'BatchNorm{}d'.format(nr_dims)
return getattr(nn, clz_name)(nr_features)
else:
raise ValueError('Unknown type of batch normalization: {}.'.format(bn))
def get_dropout(dropout, nr_dims=1):
if isinstance(dropout, nn.Module):
return dropout
if dropout is True:
dropout = 0.5
if nr_dims == 1:
return nn.Dropout(dropout, True)
else:
clz_name = 'Dropout{}d'.format(nr_dims)
return getattr(nn, clz_name)(dropout)
def get_activation(act):
if isinstance(act, nn.Module):
return act
assert type(act) is str, 'Unknown type of activation: {}.'.format(act)
act_lower = act.lower()
if act_lower == 'identity':
return Identity()
elif act_lower == 'relu':
return nn.ReLU(True)
elif act_lower == 'selu':
return nn.SELU(True)
elif act_lower == 'sigmoid':
return nn.Sigmoid()
elif act_lower == 'tanh':
return nn.Tanh()
else:
try:
return getattr(nn, act)
except AttributeError:
raise ValueError('Unknown activation function: {}.'.format(act))
def get_optimizer(optimizer, model, *args, **kwargs):
if isinstance(optimizer, (optim.Optimizer)):
return optimizer
if type(optimizer) is str:
try:
optimizer = getattr(optim, optimizer)
except AttributeError:
raise ValueError('Unknown optimizer type: {}.'.format(optimizer))
return optimizer(filter(lambda p: p.requires_grad, model.parameters()), *args, **kwargs)
def stmap(func, iterable):
if isinstance(iterable, six.string_types):
return func(iterable)
elif isinstance(iterable, (collections.Sequence, collections.UserList)):
return [stmap(func, v) for v in iterable]
elif isinstance(iterable, collections.Set):
return {stmap(func, v) for v in iterable}
elif isinstance(iterable, (collections.Mapping, collections.UserDict)):
return {k: stmap(func, v) for k, v in iterable.items()}
else:
return func(iterable)
def _as_tensor(o):
from torch.autograd import Variable
if isinstance(o, SKIP_TYPES):
return o
if isinstance(o, Variable):
return o
if torch.is_tensor(o):
return o
return torch.from_numpy(np.array(o))
def as_tensor(obj):
return stmap(_as_tensor, obj)
def _as_numpy(o):
from torch.autograd import Variable
if isinstance(o, SKIP_TYPES):
return o
if isinstance(o, Variable):
o = o
if torch.is_tensor(o):
return o.cpu().numpy()
return np.array(o)
def as_numpy(obj):
return stmap(_as_numpy, obj)
def _as_float(o):
if isinstance(o, SKIP_TYPES):
return o
if torch.is_tensor(o):
return o.item()
arr = as_numpy(o)
assert arr.size == 1
return float(arr)
def as_float(obj):
return stmap(_as_float, obj)
def _as_cpu(o):
from torch.autograd import Variable
if isinstance(o, Variable) or torch.is_tensor(o):
return o.cpu()
return o
def as_cpu(obj):
return stmap(_as_cpu, obj)
## For synthetic dataset creation
import math
from sklearn.datasets import make_moons
from scipy.stats import norm
# Create a simple dataset
def create_twomoon_dataset(n, p):
relevant, y = make_moons(n_samples=n, shuffle=True, noise=0.1, random_state=None)
print(y.shape)
noise_vector = norm.rvs(loc=0, scale=1, size=[n,p-2])
data = np.concatenate([relevant, noise_vector], axis=1)
print(data.shape)
return data, y
def create_sin_dataset(n,p):
x1=5*(np.random.uniform(0,1,n)).reshape(-1,1)
x2=5*(np.random.uniform(0,1,n)).reshape(-1,1)
y=np.sin(x1)*np.cos(x2)**3
relevant=np.hstack((x1,x2))
noise_vector = norm.rvs(loc=0, scale=1, size=[n,p-2])
data = np.concatenate([relevant, noise_vector], axis=1)
return data, y.astype(np.float32)
def create_simple_sin_dataset(n, p):
'''This dataset was added to provide an example of L1 norm reg failure for presentation.
'''
assert p == 2
x1 = np.random.uniform(-math.pi, math.pi, n).reshape(n ,1)
x2 = np.random.uniform(-math.pi, math.pi, n).reshape(n, 1)
y = np.sin(x1)
data = np.concatenate([x1, x2], axis=1)
print("data.shape: {}".format(data.shape))
return data, y
|
1651629
|
import mrl
import numpy as np
import torch, torch.nn.functional as F
import os
class GoalEnvReward(mrl.Module):
def __init__(self):
"""Wraps environment's compute reward function"""
super().__init__(
'goal_reward', required_agent_modules=['env'], locals=locals())
def _setup(self):
assert self.env.goal_env, "Environment must be a goal environment!"
assert hasattr(self.env, 'compute_reward'), "Environment must have compute reward defined!"
def __call__(self, achieved_goals, goals, info):
return self.env.compute_reward(achieved_goals, goals, info)
class NeighborReward(mrl.Module):
def __init__(self, max_neighbor_distance = 1, optimize_every = 5, batch_size = 1000, temperature = 1.):
"""Wraps environment's compute reward function. Should probably only be used for first-visit achievment."""
super().__init__(
'goal_reward', required_agent_modules=['replay_buffer', 'neighbor_embedding_network'], locals=locals())
self.step = 0
self.optimize_every = optimize_every
self.batch_size = batch_size
self.temperature = temperature
if max_neighbor_distance != 1: # this is the number of steps from which to count two goals as neighbors.
raise NotImplementedError
def _setup(self):
assert self.env.goal_env, "Environment must be a goal environment!"
assert hasattr(self.env, 'compute_reward'), "Environment must have compute reward defined!"
self.optimizer = torch.optim.Adam(
self.neighbor_embedding_network.model.parameters(),
lr=self.config.critic_lr, # just using critic hparams for now
weight_decay=self.config.critic_weight_decay)
def _optimize(self):
pag_buffer = self.replay_buffer.buffer.BUFF.buffer_previous_ag
ag_buffer = self.replay_buffer.buffer.BUFF.buffer_ag
self.step +=1
if self.step % self.optimize_every == 0 and len(ag_buffer):
sample_idxs = np.random.randint(len(ag_buffer), size=self.batch_size)
ags = ag_buffer.get_batch(sample_idxs)
pos = pag_buffer.get_batch(sample_idxs)
# mix it up to keep it symmetric for now...
temp = ags[:len(ags) //2].copy()
ags[:len(ags) //2] = pos[:len(ags) //2]
pos[:len(ags) //2] = temp
# get random negative samples by a 1 index roll
neg = np.roll(pos, 1, axis=0)
# move to torch
ags = self.torch(ags)
pos = self.torch(pos)
neg = self.torch(neg)
# get embeddings
embs = self.neighbor_embedding_network(torch.cat((ags, pos, neg), dim=0))
ags, pos, neg = torch.chunk(embs, 3)
pos_logits = -self.temperature * torch.norm(ags - pos, dim = 1)
neg_logits = -self.temperature * torch.norm(ags - neg, dim = 1)
# use soft targets
loss = F.binary_cross_entropy_with_logits(torch.exp(pos_logits), torch.ones_like(pos_logits) * 0.99) +\
F.binary_cross_entropy_with_logits(torch.exp(neg_logits), torch.ones_like(pos_logits) * 0.01)
self.logger.add_tabular('intrinsic_reward_loss', self.numpy(loss))
# optimize
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def __call__(self, achieved_goals, goals, info):
"""Should return 0 for ags, gs that are predicted to be neighbors, -1 otherwise, as a numpy array"""
ags = achieved_goals.reshape(-1, achieved_goals.shape[-1])
dgs = goals.reshape(-1, achieved_goals.shape[-1])
ags = self.torch(ags)
dgs = self.torch(dgs)
# get embeddings
embs = self.neighbor_embedding_network(torch.cat((ags, dgs), dim=0))
ags, dgs = torch.chunk(embs, 2)
# predict whether ags and dgs are transition neighbors
preds = torch.exp(-self.temperature * torch.norm(ags - dgs, dim = 1))
return -self.numpy(preds < 0.5).astype(np.float32)
def save(self, save_folder : str):
path = os.path.join(save_folder, self.module_name + '.pt')
torch.save({
'opt_state_dict': self.optimizer.state_dict()
}, path)
def load(self, save_folder : str):
path = os.path.join(save_folder, self.module_name + '.pt')
checkpoint = torch.load(path)
self.optimizer.load_state_dict(checkpoint['opt_state_dict'])
def load(self, save_folder):
self._load_props(['random_process'], save_folder)
|
1651650
|
from __future__ import absolute_import, unicode_literals
from celery import current_app
from celery.states import PENDING
from celery.app.task import Context, Task
from celery.signals import before_task_publish
from django_celery_fulldbresult.errors import SchedulingStopPublishing
from django.conf import settings
from django.utils.timezone import now
schedule_eta = getattr(
settings, "DJANGO_CELERY_FULLDBRESULT_SCHEDULE_ETA", False)
track_publish = getattr(
settings, "DJANGO_CELERY_FULLDBRESULT_TRACK_PUBLISH", False)
monkey_patch_async = getattr(
settings, "DJANGO_CELERY_FULLDBRESULT_MONKEY_PATCH_ASYNC", False)
old_apply_async = Task.apply_async
def new_apply_async(self, *args, **kwargs):
try:
return old_apply_async(self, *args, **kwargs)
except SchedulingStopPublishing as exc:
# There was an ETA and the task was not sent to the broker.
# A scheduled task was created instead.
return self.AsyncResult(exc.task_id)
def apply_async_monkey_patch():
Task.apply_async = new_apply_async
def unapply_async_monkey_patch():
Task.apply_async = old_apply_async
if monkey_patch_async:
apply_async_monkey_patch()
if track_publish or schedule_eta:
@before_task_publish.connect
def update_sent_state(sender=None, body=None, exchange=None,
routing_key=None, **kwargs):
# App may not be loaded on init
from django_celery_fulldbresult.models import SCHEDULED
task = current_app.tasks.get(sender)
save = False
status = None
schedule_eta = getattr(
settings, "DJANGO_CELERY_FULLDBRESULT_SCHEDULE_ETA", False)
track_publish = getattr(
settings, "DJANGO_CELERY_FULLDBRESULT_TRACK_PUBLISH", False)
ignore_result = getattr(task, "ignore_result", False) or\
getattr(settings, "CELERY_IGNORE_RESULT", False)
if schedule_eta and body.get("eta") and not body.get("chord")\
and not body.get("taskset"):
status = SCHEDULED
save = True
elif track_publish and not ignore_result:
status = PENDING
save = True
if save:
backend = task.backend if task else current_app.backend
request = Context()
request.update(**body)
request.date_submitted = now()
request.delivery_info = {
"exchange": exchange,
"routing_key": routing_key
}
backend.store_result(
body["id"], None, status, traceback=None, request=request)
if status == SCHEDULED:
raise SchedulingStopPublishing(task_id=body["id"])
|
1651675
|
from SCons.Script import BoolVariable, Dir, Environment, File, SCons, Variables
def can_build(env, platform):
return True
def configure(env):
envvars = Variables()
envvars.Add(BoolVariable('builtin_runtime', 'Use the built-in libraries', True))
envvars.Add(BoolVariable('use_graphite2', 'Enable Graphite2 complementary shaper', True))
envvars.Add(BoolVariable('use_font_wrapper', 'Enable Godot font wrapper', False))
envvars.Update(env)
def get_doc_classes():
return [
"TLICUDataLoader",
"TLFontFace",
"TLBitmapFontFace",
"TLDynamicFontFace",
"TLFontFamily",
"TLShapedString",
"TLShapedAttributedString",
"TLShapedParagraph",
"TLRichTextEdit",
"TLRichTextEditSelection",
"TLLabel",
"TLLineEdit",
"TLFontIterator",
"TLGDFontWrapper"
]
def get_doc_path():
return "doc_classes"
|
1651693
|
from PeptideBuilder import Geometry
import PeptideBuilder
import Bio.PDB
from Bio.PDB import calc_angle, rotaxis, Vector
from math import *
import numpy as np
def bytes2string(tbt_array):
return tbt_array.numpy().astype(dtype=np.uint8).tostring().split(b'\00')[0].decode("utf-8")
def generateAA(aaName):
geo = Geometry.geometry(aaName)
geo.phi=0
geo.psi_im1=0
structure = PeptideBuilder.initialize_res(geo)
tx = -np.pi/2.0
Rx = np.array([[1,0,0], [0, cos(tx), -sin(tx)], [0, sin(tx), cos(tx)]])
for atom in structure.get_atoms():
atom.transform(Rx, np.array([0,0,0]))
nAtom = list(structure.get_atoms())[0]
nV = nAtom.get_coord()
I = np.identity(3)
for atom in structure.get_atoms():
atom.transform(I, -nV)
R = rotaxis(np.pi, list(structure.get_atoms())[1].get_vector())
for atom in structure.get_atoms():
atom.transform(R, np.array([0,0,0]))
# print(list(structure.get_atoms())[1].get_coord(), list(structure.get_atoms())[1])
out = Bio.PDB.PDBIO()
out.set_structure(structure)
out.save( "example.pdb" )
return structure[0]['A'][1]
def transform(structure):
tx = -np.pi/2.0
Rx = np.array([[1,0,0], [0, cos(tx), -sin(tx)], [0, sin(tx), cos(tx)]])
for atom in structure.get_atoms():
atom.transform(Rx, np.array([0,0,0]))
nAtom = list(structure.get_atoms())[0]
nV = nAtom.get_coord()
I = np.identity(3)
for atom in structure.get_atoms():
atom.transform(I, -nV)
R = rotaxis(np.pi, list(structure.get_atoms())[1].get_vector())
for atom in structure.get_atoms():
atom.transform(R, np.array([0,0,0]))
return structure
|
1651711
|
import torch.nn as nn
import torch
import numpy as np
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
# Custom Implementation because the Voice Conversion Cycle GAN
# paper assumes GLU won't reduce the dimension of tensor by 2.
def forward(self, input):
return input * torch.sigmoid(input)
class PixelShuffle(nn.Module):
def __init__(self, upscale_factor):
super(PixelShuffle, self).__init__()
# Custom Implementation because PyTorch PixelShuffle requires,
# 4D input. Whereas, in this case we have have 3D array
self.upscale_factor = upscale_factor
def forward(self, input):
n = input.shape[0]
c_out = input.shape[1] // 2
w_new = input.shape[2] * 2
return input.view(n, c_out, w_new)
class ResidualLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(ResidualLayer, self).__init__()
# self.residualLayer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
# out_channels=out_channels,
# kernel_size=kernel_size,
# stride=1,
# padding=padding),
# nn.InstanceNorm1d(
# num_features=out_channels,
# affine=True),
# GLU(),
# nn.Conv1d(in_channels=out_channels,
# out_channels=in_channels,
# kernel_size=kernel_size,
# stride=1,
# padding=padding),
# nn.InstanceNorm1d(
# num_features=in_channels,
# affine=True)
# )
self.conv1d_layer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
self.conv_layer_gates = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
self.conv1d_out_layer = nn.Sequential(nn.Conv1d(in_channels=out_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=1,
padding=padding),
nn.InstanceNorm1d(num_features=in_channels,
affine=True))
def forward(self, input):
h1_norm = self.conv1d_layer(input)
h1_gates_norm = self.conv_layer_gates(input)
# GLU
h1_glu = h1_norm * torch.sigmoid(h1_gates_norm)
h2_norm = self.conv1d_out_layer(h1_glu)
return input + h2_norm
class downSample_Generator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(downSample_Generator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
self.convLayer_gates = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
def forward(self, input):
return self.convLayer(input) * torch.sigmoid(self.convLayer_gates(input))
class upSample_Generator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(upSample_Generator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
PixelShuffle(upscale_factor=2),
nn.InstanceNorm1d(num_features=out_channels // 2,
affine=True))
self.convLayer_gates = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
PixelShuffle(upscale_factor=2),
nn.InstanceNorm1d(num_features=out_channels // 2,
affine=True))
def forward(self, input):
return self.convLayer(input) * torch.sigmoid(self.convLayer_gates(input))
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.conv1 = nn.Conv1d(in_channels=24,
out_channels=128,
kernel_size=15,
stride=1,
padding=7)
self.conv1_gates = nn.Conv1d(in_channels=24,
out_channels=128,
kernel_size=15,
stride=1,
padding=7)
# Downsample Layer
self.downSample1 = downSample_Generator(in_channels=128,
out_channels=256,
kernel_size=5,
stride=2,
padding=1)
self.downSample2 = downSample_Generator(in_channels=256,
out_channels=512,
kernel_size=5,
stride=2,
padding=2)
# Residual Blocks
self.residualLayer1 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer2 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer3 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer4 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer5 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer6 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
# UpSample Layer
self.upSample1 = upSample_Generator(in_channels=512,
out_channels=1024,
kernel_size=5,
stride=1,
padding=2)
self.upSample2 = upSample_Generator(in_channels=1024 // 2,
out_channels=512,
kernel_size=5,
stride=1,
padding=2)
self.lastConvLayer = nn.Conv1d(in_channels=512 // 2,
out_channels=24,
kernel_size=15,
stride=1,
padding=7)
def forward(self, input):
# GLU
conv1 = self.conv1(input) * torch.sigmoid(self.conv1_gates(input))
downsample1 = self.downSample1(conv1)
downsample2 = self.downSample2(downsample1)
residual_layer_1 = self.residualLayer1(downsample2)
residual_layer_2 = self.residualLayer2(residual_layer_1)
residual_layer_3 = self.residualLayer3(residual_layer_2)
residual_layer_4 = self.residualLayer4(residual_layer_3)
residual_layer_5 = self.residualLayer5(residual_layer_4)
residual_layer_6 = self.residualLayer6(residual_layer_5)
upSample_layer_1 = self.upSample1(residual_layer_6)
upSample_layer_2 = self.upSample2(upSample_layer_1)
output = self.lastConvLayer(upSample_layer_2)
return output
class DownSample_Discriminator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(DownSample_Discriminator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm2d(num_features=out_channels,
affine=True))
self.convLayerGates = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm2d(num_features=out_channels,
affine=True))
def forward(self, input):
# GLU
return self.convLayer(input) * torch.sigmoid(self.convLayerGates(input))
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.convLayer1 = nn.Conv2d(in_channels=1,
out_channels=128,
kernel_size=[3, 3],
stride=[1, 2])
self.convLayer1_gates = nn.Conv2d(in_channels=1,
out_channels=128,
kernel_size=[3, 3],
stride=[1, 2])
# Note: Kernel Size have been modified in the PyTorch implementation
# compared to the actual paper, as to retain dimensionality. Unlike,
# TensorFlow, PyTorch doesn't have padding='same', hence, kernel sizes
# were altered to retain the dimensionality after each layer
# DownSample Layer
self.downSample1 = DownSample_Discriminator(in_channels=128,
out_channels=256,
kernel_size=[3, 3],
stride=[2, 2],
padding=0)
self.downSample2 = DownSample_Discriminator(in_channels=256,
out_channels=512,
kernel_size=[3, 3],
stride=[2, 2],
padding=0)
self.downSample3 = DownSample_Discriminator(in_channels=512,
out_channels=1024,
kernel_size=[6, 3],
stride=[1, 2],
padding=0)
# Fully Connected Layer
self.fc = nn.Linear(in_features=1024,
out_features=1)
# def downSample(self, in_channels, out_channels, kernel_size, stride, padding):
# convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
# out_channels=out_channels,
# kernel_size=kernel_size,
# stride=stride,
# padding=padding),
# nn.InstanceNorm2d(num_features=out_channels,
# affine=True),
# GLU())
# return convLayer
def forward(self, input):
# input has shape [batch_size, num_features, time]
# discriminator requires shape [batchSize, 1, num_features, time]
input = input.unsqueeze(1)
# GLU
pad_input = nn.ZeroPad2d((1, 0, 1, 1))
layer1 = self.convLayer1(
pad_input(input)) * torch.sigmoid(self.convLayer1_gates(pad_input(input)))
pad_input = nn.ZeroPad2d((1, 0, 1, 0))
downSample1 = self.downSample1(pad_input(layer1))
pad_input = nn.ZeroPad2d((1, 0, 1, 0))
downSample2 = self.downSample2(pad_input(downSample1))
pad_input = nn.ZeroPad2d((1, 0, 3, 2))
downSample3 = self.downSample3(pad_input(downSample2))
downSample3 = downSample3.contiguous().permute(0, 2, 3, 1).contiguous()
# fc = torch.sigmoid(self.fc(downSample3))
# Taking off sigmoid layer to avoid vanishing gradient problem
fc = self.fc(downSample3)
return fc
if __name__ == '__main__':
# Generator Dimensionality Testing
input = torch.randn(10, 24, 1100) # (N, C_in, Width) For Conv1d
np.random.seed(0)
print(np.random.randn(10))
input = np.random.randn(158, 24, 128)
input = torch.from_numpy(input).float()
# print(input)
generator = Generator()
output = generator(input)
print("Output shape Generator", output.shape)
# Discriminator Dimensionality Testing
# input = torch.randn(32, 1, 24, 128) # (N, C_in, height, width) For Conv2d
discriminator = Discriminator()
output = discriminator(output)
print("Output shape Discriminator", output.shape)
|
1651716
|
import tensorflow as tf
import subprocess
def build_cluster(cfg):
# Cluster configuration
workers = list()
if cfg.remote is None:
# Build a cluster in local machine
for i in range(cfg.num_workers):
ipport = cfg.local_ip + ":" + str(cfg.worker_port + i)
workers.append(ipport)
else:
# Build a cluster in remote servers
for i in range(cfg.num_workers):
ipport = cfg.remote_ip[cfg.remote[i]] + ":" + str(cfg.worker_port)
workers.append(ipport)
cluster = tf.train.ClusterSpec({"wk": workers})
server = tf.train.Server(cluster, job_name=cfg.job_name, task_index=cfg.nID)
print("Starting server /job:{}/task:{}".format(cfg.job_name, cfg.nID))
# Start Redis-server
redis_start_cmd = "redis-server --port %s &" % str(cfg.redis_port + cfg.nID)
redis_process = subprocess.Popen(redis_start_cmd, shell=True)
term_cmd = "kill -9 %s" % str(redis_process.pid + 1)
return cluster, server, workers, term_cmd
|
1651764
|
from __future__ import annotations
import logging
import typing
from typing import Any, Dict
import jsondiff
from silex_client.action.command_base import CommandBase
from silex_client.action.parameter_buffer import ParameterBuffer
from silex_client.utils.parameter_types import (
AnyParameter,
ListParameterMeta,
SelectParameterMeta,
)
# Forward references
if typing.TYPE_CHECKING:
from silex_client.action.action_query import ActionQuery
class SelectList(CommandBase):
"""
Prompt user for a custom list of parameters
"""
parameters = {
"parameters_list": {"type": ListParameterMeta(AnyParameter), "hide": True},
"param_name": {"type": str, "hide": True},
}
@CommandBase.conform_command()
async def __call__(
self,
parameters: Dict[str, Any],
action_query: ActionQuery,
logger: logging.Logger,
):
parameters_list: List[Any] = parameters.get("parameters_list")
param_name: str = parameters.get("param_name")
response: Dict[Any] = await self.prompt_user(
action_query,
{
"selected_param": ParameterBuffer(
name="selected_param",
type=SelectParameterMeta(*parameters_list),
label=param_name,
)
},
)
return response.get("selected_param")
|
1651767
|
from pkg_resources import safe_version
import stagpy
def test_version_format():
assert stagpy.__version__ == safe_version(stagpy.__version__)
def test_bogus_mplstyle(capsys):
stagpy.conf.plot.mplstyle = 'stagpy-bogus'
stagpy.load_mplstyle()
output = capsys.readouterr()
assert output.err == 'Cannot import style stagpy-bogus.\n'
del stagpy.conf.plot.mplstyle
|
1651779
|
from typing import Callable, Optional, Any
import flax.linen as nn
import jax.numpy as jnp
Dtype = Any
class SimCLR(nn.Module):
model_cls: Callable
frontend_cls: Optional[Callable] = None
embedding_dim: int = 512
dtype: Dtype = jnp.float32
@nn.compact
def __call__(self, inputs, train: bool = True):
"""
Inputs must have even numbered batch size
with top N/2 indices being the anchors
and bottom N/2 being corresponding positives
"""
outputs = inputs
if self.frontend_cls is not None:
outputs = self.frontend_cls(dtype=jnp.float32, name="frontend")(outputs)
outputs = outputs[Ellipsis, jnp.newaxis]
outputs = outputs.astype(self.dtype)
encoder = self.model_cls(num_classes=None, dtype=self.dtype, name='encoder')
fc = nn.Dense(self.embedding_dim, use_bias=False, name='embedding_fc')
encoded = encoder(outputs, train=train)
embedding = fc(encoded)
return embedding
|
1651888
|
from generators.cpp_builder.BuilderGenerator import BuilderGenerator
from generators.java.JavaFileGenerator import JavaFileGenerator
from generators.typescript.TypescriptFileGenerator import TypescriptFileGenerator
from generators.python.PythonFileGenerator import PythonFileGenerator
AVAILABLE_GENERATORS = {
'cpp_builder': BuilderGenerator,
'java': JavaFileGenerator,
'typescript': TypescriptFileGenerator,
'python': PythonFileGenerator
}
|
1651921
|
from typing import Any
from wav2vec_toolkit.text_preprocessing.normalizers import NormalizerOperation
class Normalizer(NormalizerOperation):
_whitelist = r"[0-9\w]+"
_dictionary = {}
_do_lowercase = True
_text_key_name = "sentence"
def text_level_normalizer(self, sentence: str, *args: Any, **kwargs: Any) -> str:
text = super().text_level_normalizer(sentence, *args, **kwargs)
# DO OTHER OPERATIONS REGARDING YOURS, COMES HERE
# text = ...
return text
|
1651939
|
import os
import tensorflow as tf
from src.dataset.dataset_factory import DatasetFactory
from src.dataset.dcase_dataset import DCASEDataset
from src.dataset.music_dataset import MusicDataset
from src.utils.params import Params
class TestDatasetFactory(tf.test.TestCase):
def setUp(self):
# load the parameters from json file
json_path = os.path.join("/tf/test_environment/", "config", "params.json")
self.params = Params(json_path)
def test_factory_creation_dcase(self):
dataset = DatasetFactory.create_dataset("DCASEDataset", params=self.params)
self.assertDTypeEqual(dataset, DCASEDataset)
def test_factory_creation_music(self):
dataset = DatasetFactory.create_dataset("MusicDataset", params=self.params)
self.assertDTypeEqual(dataset, MusicDataset)
if __name__ == '__main__':
tf.test.main()
|
1652000
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SelectField, SubmitField
class ShotForm(FlaskForm):
asset_task = SelectField('Add Asset task:', id='asset-task')
asset_users = SelectField('Add Assignee:', id='asset-task-user')
submit = SubmitField('submit')
|
1652011
|
from flask import Flask, send_file, jsonify
import os
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
avatar_dir = "avatars" # no slash
# create avatars directory if it does not exist
if not os.path.exists(avatar_dir):
os.makedirs(avatar_dir)
@app.route("/status")
def serverStatus():
return jsonify({
"response" : 200,
"status" : 1
})
@app.route("/<int:uid>")
def serveAvatar(uid):
# Check if avatar exists
if os.path.isfile("{}/{}.png".format(avatar_dir, uid)):
avatarid = uid
else:
avatarid = 0
# Serve actual avatar or default one
return send_file("{}/{}.png".format(avatar_dir, avatarid))
# Run the server
app.run(host="0.0.0.0", port=5000)
|
1652015
|
from .earnapp import EarnApp, Device, Transaction, RedeemDetails, Referee
VERSION = '0.0.16.0'
print(f"API Version: {VERSION}")
|
1652097
|
from django.db.models import query
from navigation import models as navigation_models
from django.shortcuts import get_list_or_404, get_object_or_404
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.request import Request
from manager import serializers, models
class LevelViewset(viewsets.ModelViewSet):
"""
A viewset used to retrieve or update levels.
"""
serializer_class = serializers.LevelSerializer
queryset = navigation_models.Level.objects.all()
class ProjectViewset(viewsets.ModelViewSet):
"""
Viewset to retrieve or update projects.
"""
serializer_class = serializers.ProjectSerializer
queryset = navigation_models.Project.objects.all()
class SoldierKitCollectionViewset(viewsets.ModelViewSet):
"""
Viewset that is used to retrieve or update soldier kit collections (assault, engineer, support, recon).
"""
serializer_class = serializers.SoldierKitCollectionSerializer
queryset = models.SoldierKitCollection.objects.all()
|
1652114
|
import ConfigParser
import logging
import functools
import json
import abc
import os
log = logging.getLogger(__name__)
EXE = "pbsmrtpipe"
__all__ = ['ButlerTask',
'ButlerWorkflow',
'config_parser_to_butler']
__author__ = "<NAME>"
class TestkitCfgParserError(ValueError):
pass
class Constants(object):
"""Allowed values in cfg file."""
CFG_TASK = 'pbsmrtpipe:task'
CFG_WORKFLOW = 'pbsmrtpipe:pipeline'
CFG_JOB_ID = "id"
CFG_ENTRY_POINTS = 'entry_points'
CFG_PRESET_XML = 'preset_xml'
CFG_PRESET_JSON = 'preset_json'
CFG_WORKFLOW_XML = 'pipeline_xml'
CFG_TASK_ID = 'task_id'
CFG_BASE_EXE = 'base_exe'
CFG_REQUIREMENTS = 'requirements'
CFG_XRAY_TESTS = "xray_tests"
CFG_OUTPUT_DIR = 'output_dir'
CFG_DEBUG = 'debug'
CFG_MOCK = 'mock'
class Butler(object):
__metaclass__ = abc.ABCMeta
def __init__(self, job_id, output_dir, entry_points, preset_json,
preset_xml, debug,
force_distribute=None, force_chunk=None, base_exe=EXE,
requirements=(), xray_tests=()):
self.output_dir = output_dir
self.entry_points = entry_points
self.preset_json = preset_json
self.preset_xml = preset_xml
self.debug_mode = debug
# None means no override, True|False means override
self.force_distribute = force_distribute
self.force_chunk = force_chunk
# this needs to be set in the Butler.cfg file.
self.job_id = job_id
self.base_exe = base_exe
self.requirements = requirements
self.xray_tests = xray_tests
def __repr__(self):
_d = dict(k=self.__class__.__name__, p=self.prefix)
return "<{k} {p} >".format(**_d)
@abc.abstractproperty
def prefix(self):
# Used in the repr
return ""
def to_cmd(self):
return _to_pbsmrtpipe_cmd(self.prefix, self.output_dir,
self.entry_points, self.preset_json,
self.preset_xml,
self.debug_mode, self.force_distribute,
self.force_chunk, self.base_exe)
class ButlerWorkflow(Butler):
def __init__(self, job_id, output_dir, pipeline_id, workflow_xml, entry_points, preset_json_path, preset_xml_path, debug, force_distribute=None, force_chunk=None, base_exe=EXE, requirements=(), xray_tests=()):
super(ButlerWorkflow, self).__init__(job_id, output_dir, entry_points, preset_json_path, preset_xml_path, debug, force_distribute=force_distribute, force_chunk=force_chunk, base_exe=base_exe, requirements=requirements, xray_tests=xray_tests)
assert [workflow_xml, pipeline_id].count(None) == 1
self.workflow_xml = workflow_xml
self.pipeline_id = pipeline_id
@property
def prefix(self):
if self.pipeline_id is not None:
return "pipeline-id {i}".format(i=self.pipeline_id)
else:
return "pipeline {i}".format(i=self.workflow_xml)
@staticmethod
def from_json(file_name, force_distribute=None, force_chunk=None):
with open(file_name) as json_f:
d = json.load(json_f)
assert d.get('jobType', "pbsmrtpipe") == "pbsmrtpipe"
return ButlerWorkflow(
job_id=d['testId'],
output_dir=d.get('outputDir', "job_output"),
pipeline_id=d.get("pipelineId", None),
workflow_xml=d.get("workflowXml", None),
entry_points={e['entryId']:e['path'] for e in d['entryPoints']},
preset_xml_path=d.get('presetXml', None),
preset_json_path=d.get("presetJson", None),
debug=d.get("debug", False),
force_distribute=force_distribute,
force_chunk=force_chunk,
requirements=tuple(d.get("requirements", [])),
xray_tests=tuple(d.get("xrayTests", [])))
class ButlerTask(Butler):
def __init__(self, job_id, output_dir, task_id, entry_points, preset_json, preset_xml, debug, force_distribute=None, force_chunk=None):
super(ButlerTask, self).__init__(job_id, output_dir, entry_points, preset_json, preset_xml, debug, force_distribute=force_distribute, force_chunk=force_chunk)
self.task_id = task_id
@property
def prefix(self):
return "task {i}".format(i=self.task_id)
def _to_pbsmrtpipe_cmd(prefix_mode, output_dir, entry_points_d, preset_json, preset_xml, debug, force_distribute, force_chunk, base_exe=EXE):
ep_str = " ".join([" -e '" + ":".join([k, v]) + "'" for k, v in entry_points_d.iteritems()])
d_str = '--debug' if debug else " "
p_str = " " if preset_xml is None else "--preset-xml={p}".format(p=preset_xml)
j_str = " " if preset_json is None else "--preset-json={j}".format(j=preset_json)
m_str = ' '
force_distribute_str = ''
if isinstance(force_distribute, bool):
m = {True: '--force-distributed', False: '--local-only'}
force_distribute_str = m[force_distribute]
force_chunk_str = ''
if isinstance(force_chunk, bool):
m = {True: '--force-chunk-mode', False: '--disable-chunk-mode'}
force_chunk_str = m[force_chunk]
_d = dict(x=base_exe, e=ep_str, d=d_str, p=p_str, j=j_str, m=prefix_mode, o=output_dir, k=m_str,
f=force_distribute_str, c=force_chunk_str)
cmd = "{x} {m} {c} {d} {e} {p} {j} {k} {f} --output-dir={o}"
return cmd.format(**_d)
to_task_cmd = functools.partial(_to_pbsmrtpipe_cmd, 'task')
to_workflow_cmd = functools.partial(_to_pbsmrtpipe_cmd, 'pipeline')
def _parse_or_default(section, key, p, default):
if p.has_option(section, key):
return p.get(section, key)
return default
def _parse_preset_xml(section_name, p, base_dir):
v = _parse_or_default(section_name, Constants.CFG_PRESET_XML, p, None)
if v is None:
return None
else:
p = v if os.path.isabs(v) else os.path.join(base_dir, v)
if os.path.exists(p):
return p
else:
raise IOError("Unable to find preset XML '{p}'".format(p=p))
def _parse_preset_json(section_name, p, base_dir):
v = _parse_or_default(section_name, Constants.CFG_PRESET_JSON, p, None)
if v is None:
return None
else:
p = v if os.path.isabs(v) else os.path.join(base_dir, v)
if os.path.exists(p):
return p
else:
raise IOError("Unable to find preset XML '{p}'".format(p=p))
def _parse_debug_mode(section_name, p):
return bool(_parse_or_default(section_name, Constants.CFG_DEBUG, p, True))
def _parse_entry_points(p, root_dir_name):
"""
Files may be defined relative to the butler.cfg file or absolute paths
"""
ep_d = {}
ep_keys = p.options(Constants.CFG_ENTRY_POINTS)
for ep_key in ep_keys:
v = p.get(Constants.CFG_ENTRY_POINTS, ep_key)
if not os.path.isabs(v):
v = os.path.join(root_dir_name, v)
ep_d[ep_key] = os.path.abspath(v)
return ep_d
def _parse_entry_points_and_presets(section_name, p, root_dir):
return _parse_entry_points(p, root_dir), _parse_preset_xml(section_name, p, root_dir), _parse_preset_json(section_name, p, root_dir)
def _to_parse_workflow_config(job_output_dir, base_dir):
"""
:param job_output_dir: Job output directory
:param base_dir: base directory of the butler.cfg file
:return:
"""
def _parse_workflow_config(p):
ep_d, preset_xml, preset_json = _parse_entry_points_and_presets(Constants.CFG_WORKFLOW, p, base_dir)
x = p.get(Constants.CFG_WORKFLOW, Constants.CFG_WORKFLOW_XML)
if not os.path.isabs(x):
x = os.path.join(base_dir, x)
if not os.path.exists(x):
raise IOError("Unable to find pipeline XML '{x}'".format(x=x))
d = _parse_debug_mode(Constants.CFG_WORKFLOW, p)
workflow_xml = os.path.abspath(x)
# FIXME. This should be defined in cfg file.
default_job_id = os.path.basename(base_dir)
job_id = _parse_or_default(Constants.CFG_WORKFLOW, Constants.CFG_JOB_ID, p, default_job_id)
base_exe = _parse_or_default(Constants.CFG_WORKFLOW, Constants.CFG_BASE_EXE, p, EXE)
requirements = _parse_or_default(Constants.CFG_WORKFLOW, Constants.CFG_REQUIREMENTS, p, "").split()
return ButlerWorkflow(job_id, job_output_dir, None, workflow_xml, ep_d, preset_json, preset_xml, d, base_exe=base_exe, requirements=requirements)
return _parse_workflow_config
def _to_parse_task_config(output_dir, base_dir):
def _parse_task_config(p):
# FIXME. This should be defined in cfg file.
default_job_id = os.path.basename(base_dir)
ep_d, preset_xml, preset_json = _parse_entry_points_and_presets(Constants.CFG_TASK, p, base_dir)
job_id = _parse_or_default(Constants.CFG_TASK, Constants.CFG_JOB_ID, p, default_job_id)
task_id = p.get(Constants.CFG_TASK, Constants.CFG_TASK_ID)
d = _parse_debug_mode(Constants.CFG_TASK, p)
b = ButlerTask(job_id, output_dir, task_id, ep_d, preset_json, preset_xml, d, force_distribute=False)
return b
return _parse_task_config
def config_parser_to_butler(file_path):
"""
:param file_path: path to butler config file
:return: Butler instance
:rtype: Butler
"""
if file_path.endswith(".json"):
return ButlerWorkflow.from_json(file_path)
# this is weak. Needs error handling.
p = ConfigParser.ConfigParser()
_ = p.read(file_path)
# paths within the config file can be relative the butler.cfg file
base_dir = os.path.dirname(file_path)
# pbsmrtpipe will make the directory if it doesn't exist
default_output_dir = os.path.join(base_dir, 'job_output')
output_dir = _parse_or_default(Constants.CFG_WORKFLOW, Constants.CFG_OUTPUT_DIR, p, default_output_dir)
# output_dir must be defined relative to testkit.cfg or an absolute path
if not os.path.isabs(output_dir):
output_dir = os.path.join(base_dir, output_dir)
if p.has_section(Constants.CFG_WORKFLOW):
func = _to_parse_workflow_config(output_dir, base_dir)
elif p.has_section(Constants.CFG_TASK):
func = _to_parse_task_config(output_dir, base_dir)
else:
_d = dict(x=Constants.CFG_WORKFLOW, y=Constants.CFG_TASK, f=file_path)
raise TestkitCfgParserError("Expected section {x} or {y} in {f}".format(**_d))
butler = func(p)
return butler
|
1652120
|
import cantera as ct
import numpy as np
from src.ct.def_ct_tools import *
from src.ct.senkin import senkin
import time
def PFA_algo(soln, RR, crit, seeds, path_save=None, overwrite=False):
#if soln.n_species > 200:
# verbose = True
#else:
if path_save is None:
overwrite = True
rxn = soln.reaction
sp = soln.species
n_sp = soln.n_species
n_rxn = soln.n_reactions
if_compute = True
if overwrite is False:
try:
PFA_file = np.load(open(path_save, 'rb'))
rmat = PFA_file['rmat']
if_compute = False
except IOError:
#print 'no such file ---- '+ path_save
pass
verbose = if_compute
if if_compute:
cpu0 = time.time()
if verbose:
print 'calculating P, C flux ... '
P = [0]*n_sp
C = [0]*n_sp
Pmat = np.zeros([n_sp,n_sp])
Cmat = np.zeros([n_sp,n_sp])
rPmat1 = np.zeros([n_sp,n_sp])
rCmat1 = np.zeros([n_sp,n_sp])
rPmat2 = np.zeros([n_sp,n_sp])
rCmat2 = np.zeros([n_sp,n_sp])
rmat = np.zeros([n_sp,n_sp])
for i_rxn in range(n_rxn):
#print 'rxn'+str(i_rxn)+' RR = '+str(RR[i_rxn])
s_mu = dict()
for s in rxn(i_rxn).reactants.keys():
if s not in s_mu.keys():
s_mu[s] = 0
s_mu[s] -= rxn(i_rxn).reactants[s]
for s in rxn(i_rxn).products.keys():
if s not in s_mu.keys():
s_mu[s] = 0
s_mu[s] += rxn(i_rxn).products[s]
#print s_mu
for s in s_mu.keys():
#print s
sp_id = soln.species_index(s)
#print 'P[sp_id] = '+str(P[sp_id])
P[sp_id] += max( 1.0 * s_mu[s] * RR[i_rxn], 0)
C[sp_id] += max(-1.0 * s_mu[s] * RR[i_rxn], 0)
for A in s_mu.keys():
id_A = soln.species_index(A)
for B in s_mu.keys():
id_B = soln.species_index(B)
if B != A:
#print str(id_A)+' not '+str(id_B)
Pmat[id_A,id_B] += float(max( 1.0 * s_mu[A] * RR[i_rxn], 0))
Cmat[id_A,id_B] += float(max(-1.0 * s_mu[A] * RR[i_rxn], 0))
#print 'Pmat('+str(id_A)+','+str(id_B)+') = '+str(s_mu[A] * RR[i_rxn])+','+str(Pmat[id_A,id_B])
if verbose:
print 'calculating 1st-gen coefficients ... '
for id_A in range(n_sp):
for id_B in range(n_sp):
norm = max(P[id_A], C[id_A])
if norm>0 and id_B != id_A:
rPmat1[id_A, id_B] = Pmat[id_A, id_B] / norm
rCmat1[id_A, id_B] = Cmat[id_A, id_B] / norm
if verbose:
print 'calculating 2st-gen coefficients ... (the correct way!!!)'
for id_A in range(n_sp):
for id_B in range(n_sp):
rPmat2[id_A, id_B] = float(np.dot(rPmat1[id_A, :], rPmat1[:, id_B]))
rCmat2[id_A, id_B] = float(np.dot(rCmat1[id_A, :], rCmat1[:, id_B]))
#raise ValueError(str(rPmat2[id_A, id_B]) + ' vs ' + str(rPmat2_slow))
if verbose:
print 'calculating total-gen coefficients ... '
for id_A in range(n_sp):
for id_B in range(n_sp):
rmat[id_A, id_B] = rPmat1[id_A, id_B] + rCmat1[id_A, id_B] + \
rPmat2[id_A, id_B] +rCmat2[id_A, id_B]
if verbose:
print 'CPU time for coefficients = ' + str(time.time()-cpu0)
np.savez(path_save,\
P=P, C=C, Pmat=Pmat, Cmat=Cmat, rPmat1=rPmat1, \
rCmat1=rCmat1, rPmat2=rPmat2, rCmat2=rCmat2, rmat=rmat)
if verbose:
print 'iterating ... '
cpu0 = time.time()
species_kept = seeds
n_kept = len(species_kept)
n_round = 0
if verbose:
print species_kept
while True:
n_round += 1
if verbose:
print 'round '+str(n_round)
for id_B in range(n_sp):
B = sp(id_B).name
for A in species_kept:
id_A = soln.species_index(A)
if (rmat[id_A, id_B] > crit) and (B not in species_kept):
species_kept.append(B)
if verbose:
print 'added '+B+' from '+A
if len(species_kept) == n_kept:
break
else:
n_kept = len(species_kept)
if verbose:
print 'CPU time for picking up species = ' + str(time.time()-cpu0)
return species_kept
def test_PFA():
soln = 'gri30.xml'
soln = ct.Solution(soln)
fuel_dict = {'CH4':1.0}
crit = 0.1
phi = 1.0
X0 = Xstr(soln, fuel_dict, phi)
path_save = 'senkin.npz'
raw = load_raw(path_save)
#raw = senkin(soln, 5, 1000, X0, 'True')
#raw = save_raw(raw, path_save)
RR = []
for i in range(soln.n_reactions):
RR.append(float(raw['net_reaction_rate'][10,i]))
#print RR
species_kept = PFA(soln, RR, crit, fuel_dict)
print len(species_kept)
if __name__ == '__main__':
test_PFA()
|
1652130
|
import unittest
import numpy as np
from neural_compressor.adaptor.engine_utils.util import collate_preds
class TestUtil(unittest.TestCase):
@classmethod
def setUpClass(self):
pass
@classmethod
def tearDownClass(self):
pass
def test_collate_preds(self):
fake_preds = np.random.randn(300, 32)
res = collate_preds(fake_preds)
self.assertEqual(int(res.shape[0]), 300*32)
if __name__ == "__main__":
unittest.main()
|
1652155
|
import os
import tempfile
import zipfile
from shutil import copy
from unittest import mock
import pandas as pd
from ludwig.datasets.titanic import Titanic
class FakeTitanicDataset(Titanic):
def __init__(self, cache_dir):
super().__init__(cache_dir=cache_dir)
def test_download_titanic_dataset(tmpdir):
titanic_train_df = pd.DataFrame(
{
"passenger_id": [1216, 699, 234],
"pclass": [3, 3, 4],
"name": ["<NAME>", "bo staff", "<NAME>"],
"sex": ["female", "male", "male"],
"age": [38, 28, 18],
"sibsp": [0, 1, 0],
"parch": [1, 1, 2],
"ticket": [335432, 315089, 322472],
"fare": [7.7333, 8.6625, 9.8765],
"cabin": [1, 2, 4],
"embarked": ["C", "Q", "S"],
"boat": [0, 0, 0],
"body": [0, 1, 0],
"home.dest": ["Croatia", "Italy", "Sweden"],
"survived": [0, 1, 0],
}
)
titanic_test_df = pd.DataFrame(
{
"passenger_id": [1216, 699, 234],
"pclass": [3, 3, 4],
"name": ["<NAME>", "bo bo bo", "<NAME>"],
"sex": ["female", "male", "male"],
"age": [28, 18, 30],
"sibsp": [0, 1, 0],
"parch": [1, 1, 2],
"ticket": [335412, 215089, 922472],
"fare": [17.7333, 18.6625, 19.8765],
"cabin": [2, 2, 1],
"embarked": ["Q", "Q", "C"],
"boat": [0, 0, 0],
"body": [0, 1, 0],
"home.dest": ["Sweden", "Slovenia", "Italy"],
"survived": [0, 1, 0],
}
)
with tempfile.TemporaryDirectory() as source_dir:
train_fname = os.path.join(source_dir, "train.csv")
titanic_train_df.to_csv(train_fname, index=False)
test_fname = os.path.join(source_dir, "test.csv")
titanic_test_df.to_csv(test_fname, index=False)
archive_filename = os.path.join(source_dir, "titanic.zip")
with zipfile.ZipFile(archive_filename, "w") as z:
z.write(train_fname, "train.csv")
z.write(test_fname, "test.csv")
config = {
"version": 1.0,
"competition": "titanic",
"archive_filename": "titanic.zip",
"split_filenames": {
"train_file": "train.csv",
"test_file": "test.csv",
},
"csv_filename": "titanic.csv",
}
def download_files(competition_name, path):
assert competition_name == "titanic"
copy(archive_filename, path)
with mock.patch("ludwig.datasets.base_dataset.read_config", return_value=config):
with mock.patch("ludwig.datasets.mixins.kaggle.create_kaggle_client") as mock_kaggle_cls:
mock_kaggle_api = mock.MagicMock()
mock_kaggle_api.competition_download_files = download_files
mock_kaggle_cls.return_value = mock_kaggle_api
dataset = FakeTitanicDataset(tmpdir)
assert not dataset.is_downloaded()
dataset.download()
assert dataset.is_downloaded()
mock_kaggle_api.authenticate.assert_called_once()
assert not dataset.is_processed()
dataset.process()
assert dataset.is_processed()
output_train_df, output_test_df, output_val_df = dataset.load(split=True)
assert len(output_train_df) == len(titanic_train_df)
assert len(output_test_df) == len(titanic_test_df)
assert len(output_val_df) == 0
|
1652186
|
import os
import subprocess
import sys
import urllib.request
import hashlib
import socket
import shutil
import errno
import statistics
from datetime import date
INVALID_SYMBOLS = [";","&","(",")","|","*","?","[","]","~","{","}","<","!","^",'"',"'","\\","$","/","+","-","#"," "]
class insertSizeError(Exception):
def __init__(self, message):
self.message = message
pass
def mkdir(indir, log=None):
if os.path.isdir(indir) == False:
os.mkdir(indir)
# else:
# msg = "cannot make dir:"+indir+" dir exists...skipping....\n"
# sys.stderr.write(msg)
# writelog(log, msg)
def get_abs_path(in_file, log=None):
if os.path.isfile(in_file):
return os.path.abspath(in_file)
else:
msg = " ".join(["Cannot find file:",in_file,"exiting....\n"])
sys.stderr.write(msg)
writelog(log, msg)
sys.exit(1)
def get_base_name(path):
no_path = os.path.basename(path)
if no_path[-3:] == ".gz":
no_path = no_path[:-3]
no_ext = ".".join(no_path.split(".")[:-1])
return no_ext
def is_empty_file(infile):
if os.stat(infile).st_size == 0:
return True
else:
return False
def run_command_stdout(cmd_list, out_file, log=None, fatal=False):
msg = ""
if log is None:
try:
# print(" ".join(cmd_list)+" > "+out_file)
out = open(out_file,"w")
subprocess.check_call(cmd_list, stdout=out)
out.close()
except subprocess.CalledProcessError as e:
if e.output is not None:
msg = str(e.output)+"\n"
if e.stderr is not None:
msg += str(e.stderr)+"\n"
cmd_string = " ".join(cmd_list)
msg += msg + cmd_string + "\n"
sys.stderr.write(msg)
if fatal:
sys.exit(1)
else:
return False
else:
try:
out_log = open(log,"a")
out_log.write(" ".join(cmd_list)+" > "+out_file+"\n")
out = open(out_file,"w")
subprocess.check_call(cmd_list, stdout=out, stderr=out_log)
out.close()
out_log.close()
except subprocess.CalledProcessError as e:
if e.output is not None:
msg = str(e.output)+"\n"
if e.stderr is not None:
msg += str(e.stderr)+"\n"
cmd_string = " ".join(cmd_list)
msg += msg + cmd_string + "\n"
writelog(log, msg)
sys.stderr.write(msg)
if fatal:
sys.exit(1)
else:
return False
return True
def run_command(cmd_list, log=None, fatal=False):
msg = ""
if log is None:
try:
# print(" ".join(cmd_list))
subprocess.check_call(cmd_list)
except subprocess.CalledProcessError as e:
if e.output is not None:
msg = str(e.output)+"\n"
if e.stderr is not None:
msg += str(e.stderr)+"\n"
cmd_string = " ".join(cmd_list)
msg += msg + cmd_string + "\n"
sys.stderr.write(msg)
if fatal:
sys.exit(1)
else:
return False
else:
try:
out = open(log,"a")
out.write(" ".join(cmd_list)+"\n")
subprocess.check_call(cmd_list, stdout=out, stderr=out)
out.close()
except subprocess.CalledProcessError as e:
if e.output is not None:
msg = str(e.output)+"\n"
if e.stderr is not None:
msg += str(e.stderr)+"\n"
cmd_string = " ".join(cmd_list)
msg += msg + cmd_string + "\n"
writelog(log, msg)
sys.stderr.write(msg)
if fatal:
sys.exit(1)
else:
return False
return True
def writelog(log, msg):
if log is not None:
with open(log, "a") as out:
out.write(msg)
def download(url, out_file, md5=None, timeout=60, _attempt=1, max_attempts=1):
if _attempt > max_attempts:
return False
socket.setdefaulttimeout(timeout)
print("downloading ", url, "to", out_file)
try:
urllib.request.urlretrieve(url, out_file)
except KeyboardInterrupt:
sys.exit(1)
except:
print(sys.exc_info())
print("download failed...")
return download(url, out_file, md5=md5, timeout=timeout, _attempt=_attempt+1, max_attempts=max_attempts)
print("download complete")
if md5 == None:
return True
else:
print("checking md5 of ", out_file)
with open(out_file,"rb") as out:
data = out.read()
this_md5 = hashlib.md5(data).hexdigest()
if this_md5 != md5:
print("MD5 of", out_file, " : ", this_md5, "does not match currect MD5 hash: ", md5)
return download(url, out_file, md5=md5, timeout=timeout, _attempt=_attempt+1, max_attempts=max_attempts)
else:
print("MD5 hash of ", out_file, "matches expected")
return True
def remove(infile):
if os.path.exists(infile) or os.path.islink(infile):
try:
if os.path.isfile(infile) or os.path.islink(infile):
os.remove(infile)
elif os.path.isdir(infile):
shutil.rmtree(infile)
except OSError as e:
print("Error: %s : %s" % (infile, e.strerror))
def get_median_insert_size(infile):
median_insert_size = 0
with open(infile,"r") as inf:
for line in inf:
insert = line.split("=")[1]
insert = insert.replace("\n","")
median_insert_size = int(float(insert))
return median_insert_size
def calc_median_insert_size(insam):
insert_sizes = []
with open(insam,"r") as sam:
for line in sam:
split_line = line.split("\t")
if len(split_line) >= 8:
insert_size = int(split_line[8])
if insert_size > 0:
insert_sizes.append(insert_size)
if len(insert_sizes) < 1:
raise insertSizeError("Can't calculate median insert size due to lack of valid insert size values")
return 0
insert_sizes.sort()
median = statistics.median(insert_sizes)
return median
def check_file_exists(infile):
if os.path.exists(infile):
return True
else:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), infile)
def check_status_file(infile):
succeeded = True
if os.path.exists(infile):
with open(infile,"r") as inf:
for line in inf:
if "FAILED" in line:
succeeded = False
return succeeded
def replace_special_chars(string, encode="_"):
for char in INVALID_SYMBOLS:
string = string.replace(char,encode)
return string
def estimate_read_length(fq, reads=10000):
lengths = []
with open(fq,"r") as f:
for x, line in enumerate(f):
if x%4 == 1:
lengths.append(len(line.replace('\n',"")))
if x >= reads:
break
length = sum(lengths)//len(lengths)
return length
def log(step, msg, log=None):
step = step.upper()
max_step = 15
if len(step) > max_step:
step = step[:max_step]
step_buffer = (max_step - len(step)) + 2
step += " "*step_buffer
max_msg = 103
lines = msg
lines = step + lines
if log is not None:
lines += " &> " + log
print(lines)
|
1652197
|
import json
from collections import OrderedDict
import openpyxl
import pandas as pd
from .common import *
from .df_utility import *
from .time_utility import *
def methods_from_prob(prob: dict) -> []:
return methods_from_method_list(prob.get('methods', []))
def methods_from_method_list(method_list: list) -> []:
return [method for method, _, _, _ in method_list]
# def standard_dispatch_analysis(securities: [str], methods: [str], data_hub, database,
# method_list: list) -> []:
# result_list = []
# for query_method in methods:
# for hash_id, _, _, function_entry in method_list:
# if hash_id == query_method:
# if function_entry is None:
# print('Method ' + hash_id + ' not implemented yet.')
# else:
# try:
# result = function_entry(securities, data_hub, database)
# except Exception as e:
# print('Execute analyzer [' + hash_id + '] Error: ')
# print(e)
# print(traceback.format_exc())
# result = None
# finally:
# pass
# if result is not None and len(result) > 0:
# result_list.append((query_method, result))
# break
# return result_list
# ----------------------------------------------------------------------------------------------------------------------
class AnalysisResult:
SCORE_MIN = 0
SCORE_MAX = 100
SCORE_PASS = SCORE_MAX
SCORE_JUST = 60
SCORE_FAIL = SCORE_MIN
SCORE_NOT_APPLIED = None
WEIGHT_NORMAL = 1
WEIGHT_ONE_VOTE_VETO = 999999
def __init__(self, securities: str = '', period: datetime.datetime or None = None,
score: int or bool = False, reason: str or [str] = '', brief: str = '',
weight: int = WEIGHT_NORMAL):
self.method = ''
self.period = to_py_datetime(period) if period is not None else None
self.securities = securities
if isinstance(score, bool):
self.score = AnalysisResult.SCORE_PASS if score else AnalysisResult.SCORE_FAIL
elif isinstance(score, (int, float)):
self.score = score
self.score = min(self.score, AnalysisResult.SCORE_MAX)
self.score = max(self.score, AnalysisResult.SCORE_MIN)
else:
self.score = score
if brief is None:
self.brief = ''
elif not str_available(brief):
self.brief = str(brief)
else:
self.brief = brief
if reason is None:
self.reason = brief
elif isinstance(reason, (list, tuple)):
self.reason = '\n'.join(reason)
else:
self.reason = str(reason)
self.weight = weight
self.exception = None
self.traceback = None
def pack(self, to_str: bool = True) -> dict:
return {
# Make the dict keys 'happens to' the fields of Result.Analyzer
'period': str(self.period) if to_str else self.period,
'analyzer': str(self.method) if to_str else self.method,
'stock_identity': str(self.securities) if to_str else self.securities,
'score': str(self.score) if to_str else self.score,
'brief': str(self.brief) if to_str else self.brief,
'reason': str(self.reason) if to_str else self.reason,
'weight': str(self.weight) if to_str else self.weight,
}
def unpack(self, data: dict):
period = data.get('period', 'None')
self.period = None if period == 'None' else text_auto_time(period)
self.method = data.get('analyzer', '')
self.securities = data.get('stock_identity', '')
score = data.get('score', AnalysisResult.SCORE_NOT_APPLIED)
self.score = str2float_safe(score, AnalysisResult.SCORE_NOT_APPLIED)
self.brief = data.get('brief', '')
self.reason = data.get('reason', [])
self.weight = float(data.get('weight', AnalysisResult.WEIGHT_NORMAL))
def serialize(self) -> str:
return json.dumps(self.pack())
def deserialize(self, json_text: str):
self.unpack(json.loads(json_text))
def rough_size(self) -> int:
return sys.getsizeof(self.period) + \
sys.getsizeof(self.method) + \
sys.getsizeof(self.securities) + \
sys.getsizeof(self.score) + \
sys.getsizeof(self.brief) + \
sys.getsizeof(self.reason) + \
sys.getsizeof(self.weight)
# --------------------------------------------- Analysis Result Conversion ---------------------------------------------
# ----------------------- Analysis Result List <--> Json -----------------------
def analysis_results_to_json(result_list: [AnalysisResult], fp=None) -> bool or str:
def _analysis_result_json_hook(analysis_result: AnalysisResult) ->dict:
if isinstance(analysis_result, AnalysisResult):
return analysis_result.pack(True)
else:
print('Unknown class: ' + str(analysis_result))
return {}
return json.dump(result_list, fp, default=_analysis_result_json_hook, sort_keys=True, indent=4) \
if fp is not None else json.dumps(result_list, default=_analysis_result_json_hook, sort_keys=True, indent=4)
def analysis_results_from_json(fp) -> [AnalysisResult]:
def _json_analysis_result_hook(_dict: dict) -> AnalysisResult:
analysis_result = AnalysisResult()
analysis_result.unpack(_dict)
return analysis_result
if isinstance(fp, str):
return json.loads(fp, object_hook=_json_analysis_result_hook)
else:
return json.load(fp, object_hook=_json_analysis_result_hook)
# -------------------- Analysis Result List --> Group/Select --------------------
def analysis_result_list_to_analyzer_security_table(
result_list: [AnalysisResult], converter=None) -> {str: {str: [AnalysisResult or any]}}:
result_table = OrderedDict()
for analysis_result in result_list:
analyzer_uuid = analysis_result.method
stock_identity = analysis_result.securities
if analyzer_uuid not in result_table.keys():
result_table[analyzer_uuid] = OrderedDict()
if stock_identity not in result_table[analyzer_uuid].keys():
result_table[analyzer_uuid][stock_identity] = []
if converter is None:
result_table[analyzer_uuid][stock_identity].append(analysis_result)
else:
result_table[analyzer_uuid][stock_identity].append(converter(analysis_result))
return result_table
def analysis_result_list_to_security_analyzer_table(
result_list: [AnalysisResult], converter=None) -> {str: {str: [AnalysisResult or any]}}:
result_table = OrderedDict()
for analysis_result in result_list:
analyzer_uuid = analysis_result.method
stock_identity = analysis_result.securities
if stock_identity not in result_table.keys():
result_table[stock_identity] = OrderedDict()
if analyzer_uuid not in result_table[stock_identity].keys():
result_table[stock_identity][analyzer_uuid] = []
if converter is None:
result_table[stock_identity][analyzer_uuid].append(analysis_result)
else:
result_table[stock_identity][analyzer_uuid].append(converter(analysis_result))
return result_table
def group_analysis_report_by_analyzer(result_list: [AnalysisResult]) -> {str: [AnalysisResult]}:
analyzer_result_group = {}
for analysis_result in result_list:
analyzer_uuid = analysis_result.method
if analyzer_uuid not in analyzer_result_group.keys():
analyzer_result_group[analyzer_uuid] = [analysis_result]
else:
analyzer_result_group[analyzer_uuid].append(analysis_result)
return analyzer_result_group
def group_analysis_report_by_securities(result_list: [AnalysisResult]) -> {str: [AnalysisResult]}:
security_result_group = {}
for analysis_result in result_list:
identity = analysis_result.securities
if identity in security_result_group.keys():
security_result_group[identity].append(analysis_result)
else:
security_result_group[identity] = [analysis_result]
return security_result_group
def get_security_result_from_analysis_result_list(result_list: [AnalysisResult],
stock_identity: str) -> [AnalysisResult]:
security_result_list = []
for analysis_result in result_list:
identity = analysis_result.securities
if identity == stock_identity:
security_result_list.append(analysis_result)
return security_result_list
# --------------------- Analysis Result List <--> DataFrame ---------------------
def analyzer_table_to_dataframe(result_table: {str: [AnalysisResult]}) -> pd.DataFrame:
result_table_processed = {}
for analyzer_uuid, result_list in result_table.items():
# content = [r.reason + ' | ' + str(r.score) for r in result_list]
# indexes = [r.period for r in result_list]
content = []
indexes = []
for r in result_list:
text = r.reason if str_available(r.reason) else 'OK'
text += ' [' + str(r.score) + ']'
content.append(text)
indexes.append(r.period)
s = pd.Series(content, index=indexes)
s.name = analyzer_uuid
result_table_processed[analyzer_uuid] = s
# df = s.to_frame()
# df = df.groupby(df.index).first()
# result_report = df if result_report is None else pd.concat([result_report, df], axis=1)
result_report = pd.DataFrame(result_table_processed)
return result_report.sort_index(ascending=False)
def analysis_result_list_to_dataframe(result_list: [AnalysisResult]) -> pd.DataFrame:
result_table = group_analysis_report_by_analyzer(result_list)
return analyzer_table_to_dataframe(result_table)
def analysis_result_dataframe_to_list(df: pd.DataFrame) -> [AnalysisResult]:
if df is None or df.empty:
return []
result_list = []
# data_dict = df.T.apply(lambda x: x.dropna().to_dict()).tolist()
for period, analyzer, stock_identity, score, reason, weight in \
zip(df['period'], df['analyzer'], df['stock_identity'], df['score'], df['reason'], df['weight']):
analysis_result = AnalysisResult()
analysis_result.period = to_py_datetime(period) if period is not None else None
analysis_result.method = analyzer
analysis_result.securities = stock_identity
analysis_result.score = str2float_safe(score, None)
analysis_result.reason = reason
analysis_result.weight = str2float_safe(weight, None)
if str_available(analyzer):
result_list.append(analysis_result)
return result_list
# --------------------------------- Compatible ---------------------------------
def analysis_result_list_to_single_stock_report(result_list: [AnalysisResult], stock_ideneity: str) -> pd.DataFrame:
security_result_list = get_security_result_from_analysis_result_list(result_list, stock_ideneity)
return analysis_result_list_to_dataframe(security_result_list)
# ----------------------------------------------------------------------------------------------------------------------
class AnalysisContext:
def __init__(self):
self.cache = {}
self.extra = {}
self.logger = None
self.progress = None
# ----------------------------------------------------------------------------------------------------------------------
# def function_entry_example(securities: str, time_serial: tuple, data_hub: DataHubEntry,
# database: DatabaseEntry, context: AnalysisContext, **kwargs) -> AnalysisResult:
# """
# The example of analyzer function entry.
# :param securities: A single securities code, should be a str.
# :param time_serial: The analysis period
# :param data_hub: DataHubEntry type
# :param database: DatabaseEntry type
# :param context: AnalysisContext type, which can hold cache data for multiple analysis
# :return: AnalysisResult
# """
# pass
#
#
# method_list_example = [
# ('5c496d06-9961-4157-8d3e-a90683d6d32c', 'analyzer brief', 'analyzer details', function_entry_example),
# ]
# ----------------------------------------------------------------------------------------------------------------------
def standard_dispatch_analysis(methods: [str], securities: [str], time_serial: tuple,
data_hub, database, extra: dict, method_list: list) -> [(str, [])] or None:
context = AnalysisContext()
if isinstance(extra, dict):
context.extra = extra
context.sas = extra.get('sas', None)
context.logger = extra.get('logger', print)
context.progress = extra.get('progress', ProgressRate())
result_list = []
for query_method in methods:
sub_list = []
context.cache.clear()
for _uuid, _, _, function_entry in method_list:
if _uuid != query_method:
continue
if function_entry is None:
print('Method ' + _uuid + ' not implemented yet.')
break
context.progress.set_progress(_uuid, 0, len(securities))
for s in securities:
try:
result = function_entry(s, time_serial, data_hub, database, context, **extra)
except Exception as e:
error_info = 'Execute analyzer [' + _uuid + '] for [' + s + '] got exception.'
print(error_info)
print(e)
print(traceback.format_exc())
result = AnalysisResult(s, None, AnalysisResult.SCORE_NOT_APPLIED, error_info)
result.exception = e
result.traceback = traceback.format_exc()
finally:
context.progress.increase_progress(_uuid)
# print('Analyzer %s progress: %.2f%%' % (_uuid, context.progress.get_progress_rate(_uuid) * 100))
if result is None:
result = AnalysisResult(s, None, AnalysisResult.SCORE_NOT_APPLIED, 'NONE')
if not isinstance(result, (list, tuple)):
result = [result]
for r in result:
r.method = _uuid
sub_list.extend(result)
# # Fill result list for alignment
# while len(sub_list) < len(securities):
# sub_list.append([AnalysisResult(securities[len(sub_list)],
# None, AnalysisResult.SCORE_NOT_APPLIED, 'NONE')])
result_list.extend(sub_list)
context.progress.set_progress(_uuid, len(securities), len(securities))
return result_list if len(result_list) > 0 else None
# --------------------------------------------------- Analyzer Helper --------------------------------------------------
# def check_append_report_when_data_missing(df: pd.DataFrame, securities: str,
# uri: str, fields: str or [str], result: list):
# if df is None or len(df) == 0:
# error_info = uri + ': Cannot find data for securities : ' + securities
# log_error(error_info)
# result.append(AnalysisResult(securities, AnalysisResult.SCORE_NOT_APPLIED, error_info))
# return True
# if not isinstance(fields, (list, tuple)):
# fields = [fields]
# for field in fields:
# if field not in df.columns:
# error_info = uri + ': Field ' + field + ' missing for securities : ' + securities
# log_error(error_info)
# result.append(AnalysisResult(securities, AnalysisResult.SCORE_NOT_APPLIED, error_info))
# return True
# return False
def check_gen_report_when_data_missing(df: pd.DataFrame, securities: str,
uri: str, fields: str or [str]) -> AnalysisResult or None:
if df is None or len(df) == 0:
error_info = uri + ': Cannot find data for securities : ' + securities
log_error(error_info)
return AnalysisResult(securities, None, AnalysisResult.SCORE_NOT_APPLIED, error_info)
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
if field not in df.columns:
error_info = uri + ': Field ' + field + ' missing for securities : ' + securities
log_error(error_info)
return AnalysisResult(securities, None, AnalysisResult.SCORE_NOT_APPLIED, error_info)
return None
def gen_report_when_analyzing_error(securities: str, exception: Exception):
error_info = 'Error when analysing : ' + securities + '\n'
error_info += str(exception)
log_error(error_info)
print(traceback.format_exc())
return AnalysisResult(securities, None, AnalysisResult.SCORE_NOT_APPLIED, error_info)
def batch_query_readable_annual_report_pattern(
data_hub, securities: str, time_serial: tuple,
fields_balance_sheet: [str] = None,
fields_income_statement: [str] = None,
fields_cash_flow_statement: [str] = None) -> (pd.DataFrame, AnalysisResult):
df = None
if fields_balance_sheet is not None and len(fields_balance_sheet) > 0:
df_balance, result = query_readable_annual_report_pattern(
data_hub, 'Finance.BalanceSheet', securities, time_serial, fields_balance_sheet)
if result is not None:
return df, result
df = df_balance
if fields_income_statement is not None and len(fields_income_statement) > 0:
df_income, result = query_readable_annual_report_pattern(
data_hub, 'Finance.IncomeStatement', securities, time_serial, fields_income_statement)
if result is not None:
return df, result
df = df_income if df is None else (pd.merge(df, df_income, how='left', on=['stock_identity', 'period']))
if fields_cash_flow_statement is not None and len(fields_cash_flow_statement) > 0:
df_cash, result = query_readable_annual_report_pattern(
data_hub, 'Finance.CashFlowStatement', securities, time_serial, fields_cash_flow_statement)
if result is not None:
return df, result
df = df_cash if df is None else (pd.merge(df, df_cash, how='left', on=['stock_identity', 'period']))
df = df.sort_values('period')
return df, None
def query_readable_annual_report_pattern(data_hub, uri: str, securities: str, time_serial: tuple,
fields: [str]) -> (pd.DataFrame, AnalysisResult):
"""
The pattern of query readable annual report. It will do the following things:
1. Check readable names are all known
2. Query data from data center
3. Only keep annual report
4. Check empty
5. Fill na with 0.0 and sort by date
:param data_hub: The instance of DataHubEntry
:param uri: The uri user queries for
:param securities: The securities user queries
:param time_serial: The data range user queries
:param fields: The readable fields user queries
:return: (Query Result if successful, else None, Analysis Result if fail else None)
"""
if not data_hub.get_data_center().check_readable_name(fields):
return None, AnalysisResult(securities, None, AnalysisResult.SCORE_NOT_APPLIED, 'Unknown readable name detect.')
fields_stripped = list(set(fields + ['stock_identity', 'period']))
df = data_hub.get_data_center().query(uri, securities, time_serial, fields=fields_stripped, readable=True)
if df is None or len(df) == 0:
return None, AnalysisResult(securities, None, AnalysisResult.SCORE_NOT_APPLIED,
'No data, skipped' + str(time_serial))
# Only analysis annual report
df = df[df['period'].dt.month == 12]
if len(df) == 0:
return None, AnalysisResult(securities, None, AnalysisResult.SCORE_NOT_APPLIED,
'No data in this period' + str(time_serial))
df.fillna(0.0, inplace=True)
df = df.sort_values('period', ascending=False)
return df, None
def check_industry_in(securities: str, industries: [str], data_hub,
database, context: AnalysisContext) -> bool:
nop(database)
if context.cache.get('securities_info', None) is None:
context.cache['securities_info'] = data_hub.get_data_center().query('Market.SecuritiesInfo')
df_info = context.cache.get('securities_info', None)
df_slice = df_info[df_info['stock_identity'] == securities]
industry = get_dataframe_slice_item(df_slice, 'industry', 0, '')
return industry in industries
# ---------------------------------------------------- Parse Result ----------------------------------------------------
"""
The results should look like:
method1 method2 method3 ... methodM
m1_result1 m2_result1 m3_result1 mM_result1
m1_result2 m2_result2 m3_result2 mM_result2
m1_result3 m2_result3 m3_result3 mM_result3
. . . .
. . . .
. . . .
m1_resultN m2_resultN m3_resultN mM_resultN
"""
def get_securities_in_result(result: dict) -> [str]:
securities = []
for method, results in result.items():
for r in results:
if str_available(r.securities) and r.securities not in securities:
securities.append(r.securities)
return securities
def pick_up_pass_securities(result: dict, score_threshold: int, not_applied_as_fail: bool = False) -> [str]:
securities = get_securities_in_result(result)
for method, results in result.items():
for r in results:
if r.score == AnalysisResult.SCORE_NOT_APPLIED:
exclude = not_applied_as_fail
else:
exclude = (r.score < score_threshold)
if exclude and r.securities in securities:
securities.remove(r.securities)
return securities
# ---------------------------------------------------- Excel Report ----------------------------------------------------
fill_pass = openpyxl.styles.PatternFill(patternType="solid", start_color="10EF10")
fill_flaw = openpyxl.styles.PatternFill(patternType="solid", start_color="FFFF10")
fill_fail = openpyxl.styles.PatternFill(patternType="solid", start_color="EF1010")
fill_none = openpyxl.styles.PatternFill(patternType="solid", start_color="808080")
def __score_to_fill_style(score: int or None):
if score is None:
fill_style = fill_none
elif score < 50:
fill_style = fill_fail
elif score <= 75:
fill_style = fill_flaw
else:
fill_style = fill_pass
return fill_style
def __score_to_fill_text(score: int or None):
if score is None:
return '-'
elif score == 0:
return 'VETO'
elif score <= 50:
return 'FAIL'
elif score <= 75:
return 'FLAW'
elif score <= 90:
return 'WELL'
else:
return 'PASS'
def __aggregate_single_security_results(results: [AnalysisResult]) -> (int, int, str):
veto = False
score = []
reason = []
weight = AnalysisResult.WEIGHT_NORMAL
for r in results:
if r.score is not None:
if r.weight == AnalysisResult.WEIGHT_ONE_VOTE_VETO and r.score == AnalysisResult.SCORE_FAIL:
veto = True
if isinstance(r.score, (int, float)):
score.append(r.score)
else:
print('Error score.')
if str_available(r.reason):
reason.append(r.reason)
if r.weight is not None:
weight = max(weight, r.weight)
if veto:
score = AnalysisResult.SCORE_FAIL
elif len(score) == 0:
score = None
else:
score = int(sum(score) / len(score))
reason = '\n'.join(reason)
return score, weight, reason
def __calc_avg_score_with_weight(scores: list, weights: list) -> int or None:
sum_score = 0
sum_weight = 0
for score, weight in zip(scores, weights):
if score is None:
continue
elif weight == AnalysisResult.WEIGHT_ONE_VOTE_VETO:
if score != AnalysisResult.SCORE_PASS:
return 0
else:
sum_score += score * weight
sum_weight += weight
return (sum_score / sum_weight) if sum_weight > 0 else None
def generate_analysis_report(result: dict, file_path: str, analyzer_name_dict: dict = {}, stock_name_dict: dict = {},
extra_data: pd.DataFrame = None):
"""
Format of result: {analyzer_uuid: {security_identity:[AnalysisResult]}}
"""
if result is None or len(result) == 0:
return
wb = openpyxl.Workbook()
ws_score = wb.active
ws_score.title = 'Score'
ws_comments = wb.create_sheet('Comments')
ws_score['A1'] = 'Securities\\Analyzer'
ws_comments['A1'] = 'Securities\\Analyzer'
ROW_OFFSET = 2
all_weight = []
all_score = []
clock = Clock()
securities_list = []
for analyzer_uuid, analysis_result in result.items():
securities_list = list(set(securities_list + list(analysis_result.keys())))
securities_list.sort()
print('Collect the stock list from results, time spending: %sms' % clock.elapsed_ms())
column = 1
for analyzer_uuid, analysis_result in result.items():
# Note that this function will generate the report column by column
# The first column is the securities code and name
# The first row of each column is the name of analyzer
# So we need to record the score for each cell, then we can calculate the total score by row at the end
if len(all_score) < len(analysis_result):
print('%s : Result buffer increased: %d -> %d' % (analyzer_uuid, len(all_score), len(analysis_result)))
# Extend the horizon size to fits the analyzer count
while len(all_score) < len(analysis_result):
all_score.append([])
all_weight.append([])
# Write securities column
if column == 1:
# The first run. Init the total score list here.
# Flaw: The first column of result should be the full one. Otherwise the index may out of range.
# all_score = [[] for _ in range(0, len(analysis_result))]
# all_weight = [[] for _ in range(0, len(analysis_result))]
#
# if len(all_score) != len(analysis_result) or len(all_weight) != len(analysis_result):
# print('Error: list length not as expect.')
# assert False
# Collect and sort securities list as the following order
# securities_list = sorted(analysis_result.keys())
row = ROW_OFFSET
col = index_to_excel_column_name(column)
# Output the stock name columns
for security in securities_list:
securities_name = stock_name_dict.get(security, '')
display_text = (security + ' | ' + securities_name) if securities_name != '' else security
ws_score[col + str(row)] = display_text
ws_comments[col + str(row)] = display_text
row += 1
column = 2
# Write analyzer name
row = 1
col = index_to_excel_column_name(column)
analyzer_name = analyzer_name_dict.get(analyzer_uuid, analyzer_uuid)
ws_score[col + str(row)] = analyzer_name
ws_comments[col + str(row)] = analyzer_name
# Write scores
row = ROW_OFFSET
for security in securities_list:
results = analysis_result.get(security, None)
if results is not None:
score, weight, reason = __aggregate_single_security_results(results)
else:
score, weight, reason = AnalysisResult.SCORE_NOT_APPLIED, 0, 'x'
ws_score[col + str(row)] = score
ws_comments[col + str(row)] = reason
# DEBUG: Catch issue
try:
if score is not None:
all_score[row - ROW_OFFSET].append(score)
all_weight[row - ROW_OFFSET].append(weight)
fill_style = __score_to_fill_style(score)
ws_score[col + str(row)].fill = fill_style
ws_comments[col + str(row)].fill = fill_style
except Exception as e:
print(e)
print('Catch')
finally:
pass
row += 1
column += 1
# Write total score
row = 1
col_rate = index_to_excel_column_name(column)
col_vote = index_to_excel_column_name(column + 1)
for scores, weights in zip(all_score, all_weight):
if row == 1:
ws_score[col_vote + str(row)] = 'Vote'
ws_comments[col_vote + str(row)] = 'Vote'
ws_score[col_rate + str(row)] = 'Total Rate'
ws_comments[col_rate + str(row)] = 'Total Rate'
row = 2
if len(scores) > 0:
# min_score = min(score)
avg_score = __calc_avg_score_with_weight(scores, weights)
else:
# min_score = None
avg_score = None
# fill_text = __score_to_fill_text(min_score)
# fill_style = __score_to_fill_style(min_score)
fill_text = __score_to_fill_text(avg_score)
fill_style = __score_to_fill_style(avg_score)
# ------------------- Rate -------------------
if avg_score is not None:
ws_score[col_rate + str(row)] = str(int(avg_score))
ws_comments[col_rate + str(row)] = str(int(avg_score))
else:
ws_score[col_rate + str(row)] = '-'
ws_comments[col_rate + str(row)] = '-'
ws_score[col_rate + str(row)].fill = fill_style
ws_comments[col_rate + str(row)].fill = fill_style
# ------------------- Vote -------------------
ws_score[col_vote + str(row)] = fill_text
ws_comments[col_vote + str(row)] = fill_text
ws_score[col_vote + str(row)].fill = fill_style
ws_comments[col_vote + str(row)].fill = fill_style
row += 1
# Write the extra data
if isinstance(extra_data, pd.DataFrame):
row = 1
column += 2
col_extra = index_to_excel_column_name(column)
ws_score[col_extra + str(row)] = 'Extra Data: '
ws_comments[col_extra + str(row)] = 'Extra Data: '
column += 1
alignment_df = pd.DataFrame({'stock_identity': securities_list})
merged_df = pd.merge(alignment_df, extra_data, how='left', on='stock_identity')
merged_df = merged_df.fillna('-')
del merged_df['stock_identity']
columns = merged_df.columns
for title in columns:
row = 1
col_extra = index_to_excel_column_name(column)
ws_score[col_extra + str(row)] = title
ws_comments[col_extra + str(row)] = title
row = 2
column_data = merged_df[title]
for serial_item in column_data:
ws_score[col_extra + str(row)] = serial_item
ws_comments[col_extra + str(row)] = serial_item
row += 1
column += 1
# Write file
wb.save(file_path)
|
1652205
|
from .IPSM import *
class IPSMConstant(IPSM):
def __init__(self, constant_ips, max_ips, duration, SLA):
super().__init__()
self.constant_ips = constant_ips
self.max_ips = max_ips
self.SLA = SLA
self.duration = duration
self.completedInstructions = 0
def getIPS(self):
self.totalInstructions = self.constant_ips * self.duration * self.container.env.intervaltime
if self.completedInstructions < self.totalInstructions:
return self.constant_ips
return 0
def getMaxIPS(self):
return self.max_ips
|
1652212
|
import tensorflow as tf
from tensorflow.keras import layers
import os
import numpy as np
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
# metrics setting
g_loss_metrics = tf.metrics.Mean(name='g_loss')
d_loss_metrics = tf.metrics.Mean(name='d_loss')
total_loss_metrics = tf.metrics.Mean(name='total_loss')
# hyper-parameters
ITERATION = 10000
Z_DIM = 100
BATCH_SIZE = 512
BUFFER_SIZE = 60000
D_LR = 0.0004
G_LR = 0.0004
IMAGE_SHAPE = (28, 28, 1)
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
test_z = tf.random.normal([36, Z_DIM])
def get_random_z(z_dim, batch_size):
return tf.random.uniform([batch_size, z_dim], minval=-1, maxval=1)
# define discriminator
def make_discriminaor(input_shape):
return tf.keras.Sequential([
layers.Conv2D(64, 5, strides=2, padding='same',
input_shape=input_shape),
layers.LeakyReLU(),
layers.Dropout(0.3),
layers.Conv2D(128, 5, strides=2, padding='same'),
layers.LeakyReLU(),
layers.Dropout(0.3),
layers.Flatten(),
layers.Dense(1)
])
# define generator
def make_generator(input_shape):
return tf.keras.Sequential([
layers.Dense(7*7*256, use_bias=False, input_shape=input_shape),
layers.BatchNormalization(),
layers.LeakyReLU(),
layers.Reshape((7, 7, 256)),
layers.Conv2DTranspose(
128, 5, strides=1, padding='same', use_bias=False),
layers.BatchNormalization(),
layers.LeakyReLU(),
layers.Conv2DTranspose(
64, 5, strides=2, padding='same', use_bias=False),
layers.BatchNormalization(),
layers.LeakyReLU(),
layers.Conv2DTranspose(
1, 5, strides=2, padding='same', use_bias=False, activation='tanh')
])
# define loss function
def get_loss_fn():
criterion = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def d_loss_fn(real_logits, fake_logits):
real_loss = criterion(tf.ones_like(real_logits), real_logits)
fake_loss = criterion(tf.zeros_like(fake_logits), fake_logits)
return real_loss + fake_loss
def g_loss_fn(fake_logits):
return criterion(tf.ones_like(fake_logits), fake_logits)
return d_loss_fn, g_loss_fn
# data load & preprocessing
(train_x, _), (_, _) = tf.keras.datasets.fashion_mnist.load_data()
train_x = train_x.reshape(train_x.shape[0], 28, 28, 1)
train_x = (train_x - 127.5) / 127.5
train_ds = (
tf.data.Dataset.from_tensor_slices(train_x)
.shuffle(BUFFER_SIZE)
.batch(BATCH_SIZE, drop_remainder=True)
.repeat()
)
# generator & discriminator
G = make_generator((Z_DIM,))
D = make_discriminaor(IMAGE_SHAPE)
# optimizer
g_optim = tf.keras.optimizers.Adam(G_LR, beta_1=0.5, beta_2=0.999)
d_optim = tf.keras.optimizers.Adam(D_LR, beta_1=0.5, beta_2=0.999)
# loss function
d_loss_fn, g_loss_fn = get_loss_fn()
@tf.function
def train_step(real_images):
z = get_random_z(Z_DIM, BATCH_SIZE)
with tf.GradientTape() as d_tape, tf.GradientTape() as g_tape:
fake_images = G(z, training=True)
fake_logits = D(fake_images, training=True)
real_logits = D(real_images, training=True)
d_loss = d_loss_fn(real_logits, fake_logits)
g_loss = g_loss_fn(fake_logits)
d_gradients = d_tape.gradient(d_loss, D.trainable_variables)
g_gradients = g_tape.gradient(g_loss, G.trainable_variables)
d_optim.apply_gradients(zip(d_gradients, D.trainable_variables))
g_optim.apply_gradients(zip(g_gradients, G.trainable_variables))
return g_loss, d_loss
# training loop
def train(ds, log_freq=20):
ds = iter(ds)
for step in range(ITERATION):
images = next(ds)
g_loss, d_loss = train_step(images)
g_loss_metrics(g_loss)
d_loss_metrics(d_loss)
total_loss_metrics(g_loss + d_loss)
if step % log_freq == 0:
template = '[{}/{}] D_loss={:.5f} G_loss={:.5f} Total_loss={:.5f}'
print(template.format(step, ITERATION, d_loss_metrics.result(),
g_loss_metrics.result(), total_loss_metrics.result()))
g_loss_metrics.reset_states()
d_loss_metrics.reset_states()
total_loss_metrics.reset_states()
if __name__ == "__main__":
train(train_ds)
|
1652216
|
import tensorflow as tf
from kgcnn.layers.base import GraphBaseLayer
from kgcnn.layers.modules import LazyConcatenate
from kgcnn.layers.norm import GraphLayerNormalization
from kgcnn.layers.mlp import GraphMLP
from kgcnn.layers.gather import GatherNodesOutgoing, GatherNodes
from kgcnn.layers.pooling import PoolingLocalMessages, PoolingLocalEdgesLSTM
@tf.keras.utils.register_keras_serializable(package='kgcnn', name='GraphSageNodeLayer')
class GraphSageNodeLayer(GraphBaseLayer):
r"""This is a convolutional layer for `GraphSAGE <http://arxiv.org/abs/1706.02216>`_ model as proposed
by Hamilton et al. (2018). It is not used in the :obj:``kgcnn.literature.GraphSAGE`` model implementation
but meant as a simplified module for other networks.
Args:
units: Dimensionality of embedding for each layer in the MLP.
use_edge_features: Whether to use edge-features in addition to node features for convolution. Default is False.
pooling_method: Pooling method to apply to node attributes. Default is "sum".
activation: Activation function to use. Default is "relu".
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
"""
def __init__(self,
units,
use_edge_features=False,
pooling_method='sum',
activation='relu',
use_bias=True,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
"""Initialize layer."""
super(GraphSageNodeLayer, self).__init__(**kwargs) # Sets additional kwargs for base GraphBaseLayer
self.units = units
self.pooling_method = pooling_method
self.use_edge_features = use_edge_features
kernel_args = {"kernel_regularizer": kernel_regularizer, "activity_regularizer": activity_regularizer,
"bias_regularizer": bias_regularizer, "kernel_constraint": kernel_constraint,
"bias_constraint": bias_constraint, "kernel_initializer": kernel_initializer,
"bias_initializer": bias_initializer, "use_bias": use_bias}
self.gather_nodes_outgoing = GatherNodesOutgoing()
self.concatenate = LazyConcatenate()
self.update_node_from_neighbors_mlp = GraphMLP(units=units, activation=activation, **kernel_args)
self.update_node_from_self_mlp = GraphMLP(units=units, activation=activation, **kernel_args)
if self.pooling_args['pooling_method'] in ["LSTM", "lstm"]:
# We do not allow full access to all parameters for the LSTM here for simplification.
self.pooling = PoolingLocalEdgesLSTM(pooling_method=pooling_method, units=units)
else:
self.pooling = PoolingLocalMessages(pooling_method=pooling_method)
self.normalize_nodes = GraphLayerNormalization(axis=-1)
def build(self, input_shape):
"""Build layer."""
super(GraphSageNodeLayer, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Forward pass.
Args:
inputs: [nodes, edge_index] or [nodes, edges, edge_index]
- nodes (tf.RaggedTensor): Node embeddings of shape (batch, [N], F)
- edges (tf.RaggedTensor): Edge or message embeddings of shape (batch, [M], F)
- edge_index (tf.RaggedTensor): Edge indices referring to nodes of shape (batch, [M], 2)
Returns:
tf.RaggedTensor: Node embeddings of shape (batch, [N], F)
"""
if self.use_edge_features:
n, ed, edi = inputs
else:
n, edi = inputs
ed = None
neighboring_node_features = self.gather_nodes_outgoing([n, edi], **kwargs)
if self.use_edge_features:
neighboring_node_features = self.concatenate([neighboring_node_features, ed], **kwargs)
neighboring_node_features = self.update_node_from_neighbors_mlp(neighboring_node_features, **kwargs)
# Pool message
nu = self.pooling([n, neighboring_node_features, edi], **kwargs)
nu = self.concatenate([n, nu], **kwargs) # LazyConcatenate node features with new edge updates
n = self.update_node_from_self_mlp(nu, **kwargs)
n = self.normalize_nodes(n, **kwargs) # Normalize
return n
def get_config(self):
"""Update config."""
config = super(GraphSageNodeLayer, self).get_config()
config.update({"pooling_method": self.pooling_method, "units": self.units,
"use_edge_features": self.use_edge_features})
conf_mlp = self.update_node_from_neighbors_mlp.get_config()
for x in ["kernel_regularizer", "activity_regularizer", "bias_regularizer", "kernel_constraint",
"bias_constraint", "kernel_initializer", "bias_initializer", "use_bias", "activation"]:
config.update({x: conf_mlp[x]})
return config
@tf.keras.utils.register_keras_serializable(package='kgcnn', name='GraphSageEdgeUpdateLayer')
class GraphSageEdgeUpdateLayer(GraphBaseLayer):
r"""A extension for `GraphSAGE <http://arxiv.org/abs/1706.02216>`_ model to have edge updates.
It is a direct extension and should fit the GraphSAGE idea of message passing.
Args:
units: Dimensionality of embedding for each layer in the MLP.
use_normalization: Whether to use GraphLayerNormalization at the output of the update. Default is True.
activation: Activation function to use. Default is "relu".
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
"""
def __init__(self,
units,
activation='relu',
use_bias=True,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
use_normalization=True,
**kwargs):
super(GraphSageEdgeUpdateLayer, self).__init__(**kwargs) # Sets additional kwargs for base GraphBaseLayer
self.units = units
self.use_normalization = use_normalization
kernel_args = {"kernel_regularizer": kernel_regularizer, "activity_regularizer": activity_regularizer,
"bias_regularizer": bias_regularizer, "kernel_constraint": kernel_constraint,
"bias_constraint": bias_constraint, "kernel_initializer": kernel_initializer,
"bias_initializer": bias_initializer, "use_bias": use_bias}
# non-stateful layers
self.gather_nodes = GatherNodes()
self.concatenate = LazyConcatenate()
self.update_edge_mlp = GraphMLP(units=units, activation=activation, **kernel_args)
# normalization layer for edge features
self.normalize_edges = GraphLayerNormalization(axis=-1)
def build(self, input_shape):
"""Build layer."""
super(GraphSageEdgeUpdateLayer, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Forward pass.
Args:
inputs: [nodes, edges, edge_index]
- nodes (tf.RaggedTensor): Node embeddings of shape (batch, [N], F)
- edges (tf.RaggedTensor): Edge or message embeddings of shape (batch, [M], F)
- edge_index (tf.RaggedTensor): Edge indices referring to nodes of shape (batch, [M], 2)
Returns:
tf.RaggedTensor: Edge embeddings of shape (batch, [M], F)
"""
n, ed, edi = inputs
node_features = self.gather_nodes([n, edi], **kwargs)
ed_new_input = self.concatenate([ed, node_features], **kwargs)
ed = self.update_edge_mlp(ed_new_input, **kwargs)
if self.use_normalization:
ed = self.normalize_edges(ed, **kwargs)
return ed
def get_config(self):
"""Update config."""
config = super(GraphSageEdgeUpdateLayer, self).get_config()
config.update({"units": self.units, "use_normalization": self.use_normalization})
conf_mlp = self.update_edge_mlp.get_config()
for x in ["kernel_regularizer", "activity_regularizer", "bias_regularizer", "kernel_constraint",
"bias_constraint", "kernel_initializer", "bias_initializer", "use_bias", "activation"]:
config.update({x: conf_mlp[x]})
return config
|
1652262
|
from boa3.builtin import public
from boa3.builtin.interop.stdlib import base58_check_decode
@public
def main(key: str) -> bytes:
return base58_check_decode(key)
|
1652269
|
from Solid.ParticleSwarm import ParticleSwarm
class Algorithm(ParticleSwarm):
"""
Tries to get a randomly-generated list to match [.1, .2, .3, .2, .1]
"""
def _objective(self, member):
return sum(abs(member[i] - [.1, .2, .3, .2, .1][i]) if (member[i] > 0 or member[i] < 0) else 1 for i in range(5))
def test_algorithm():
algorithm = Algorithm(50, 5, [0.,0.,0.,0.,0.], [1.,1.,1.,1.,1.], 1., 2., 2., 500, min_objective=None)
algorithm.run()
|
1652279
|
import six
from nose2 import events
class PrintFixture(events.Plugin):
alwaysOn = True
def startLayerSetup(self, event):
six.print_("StartLayerSetup: {0}".format(event.layer))
def stopLayerSetup(self, event):
six.print_("StopLayerSetup: {0}".format(event.layer))
def startLayerSetupTest(self, event):
log = "StartLayerSetupTest: {0}:{1}"
six.print_(log.format(event.layer, event.test))
def stopLayerSetupTest(self, event):
log = "StopLayerSetupTest: {0}:{1}"
six.print_(log.format(event.layer, event.test))
def startLayerTeardownTest(self, event):
log = "StartLayerTeardownTest: {0}:{1}"
six.print_(log.format(event.layer, event.test))
def stopLayerTeardownTest(self, event):
log = "StopLayerTeardownTest: {0}:{1}"
six.print_(log.format(event.layer, event.test))
def startLayerTeardown(self, event):
six.print_("StartLayerTeardown: {0}".format(event.layer))
def stopLayerTeardown(self, event):
six.print_("StopLayerTeardown: {0}".format(event.layer))
|
1652280
|
import torch
from ignite.metrics import Metric
class MeanAveragePrecision(Metric):
def __init__(self, num_classes=20, output_transform=lambda x: x):
super(MeanAveragePrecision, self).__init__(output_transform=output_transform)
self.num_classes = num_classes
def reset(self):
self._true_boxes = torch.tensor([], dtype=torch.long)
self._true_labels = torch.tensor([], dtype=torch.long)
self._det_boxes = torch.tensor([], dtype=torch.float32)
self._det_labels = torch.tensor([], dtype=torch.float32)
self._det_scores = torch.tensor([], dtype=torch.float32)
def update(self, output):
boxes_preds, labels_preds, scores_preds, boxes, labels = output
self._true_boxes = torch.cat([self._true_boxes, boxes], dim=0)
self._true_labels = torch.cat([self._true_labels, labels], dim=0)
self._det_boxes = torch.cat([self._det_boxes, boxes_preds], dim=0)
self._det_labels = torch.cat([self._det_labels, labels_preds], dim=0)
self._det_scores = torch.cat([self._det_scores, scores_preds], dim=0)
def compute(self):
for c in range(1, self.num_classes):
pass
|
1652282
|
import pytest
from nlpiper.transformers.normalizers import (
CaseTokens,
RemovePunctuation,
RemoveStopWords,
VocabularyFilter,
Stemmer,
SpellCheck
)
from nlpiper.transformers.tokenizers import BasicTokenizer
from nlpiper.core.document import (
Document,
Token
)
class TestNormalizersValidations:
@pytest.mark.parametrize('inputs', ["string", 2])
def test_with_invalid_input(self, inputs):
with pytest.raises(TypeError):
t = CaseTokens()
t(inputs)
@pytest.mark.parametrize('inputs', ["test"])
def test_without_doc_tokens(self, inputs):
doc = Document("test")
t = CaseTokens()
with pytest.raises(RuntimeError):
t(doc)
@pytest.mark.parametrize('hide_available_pkg', ['nltk'], indirect=['hide_available_pkg'])
def test_if_no_package_nltk(self, hide_available_pkg): # noqa: F811
with pytest.raises(ModuleNotFoundError):
RemoveStopWords()
with pytest.raises(ModuleNotFoundError):
SpellCheck(max_distance=1)
with pytest.raises(ModuleNotFoundError):
Stemmer(version='nltk')
@pytest.mark.parametrize('hide_available_pkg', ['hunspell'], indirect=['hide_available_pkg'])
def test_if_no_package_hunspell(self, hide_available_pkg): # noqa: F811
with pytest.raises(ModuleNotFoundError):
SpellCheck(max_distance=None)
with pytest.raises(ModuleNotFoundError):
Stemmer(version='hunspell')
class TestCaseTokens:
@pytest.mark.parametrize('mode,inputs,results', [
('lower', ['TEST'], ['test']),
('lower', ['test'], ['test']),
('upper', ['test'], ['TEST']),
('upper', ['TEST'], ['TEST']),
])
def test_modes(self, mode, inputs, results):
results_expected = [Token(tk) for tk in inputs]
for tk, out in zip(results_expected, results):
tk.cleaned = out
doc = Document(" ".join(inputs))
# To apply a normalizer is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
n = CaseTokens(mode)
# Inplace False
out = n(doc)
assert out.tokens == results_expected
assert out.steps == [repr(t), repr(n)]
assert doc.tokens == [Token(token) for token in inputs]
assert doc.steps == [repr(t)]
# Inplace True
out2 = n(doc, True)
assert doc.tokens == results_expected
assert doc.steps == [repr(t), repr(n)]
assert out2 is None
@pytest.mark.parametrize('mode', [1, 'other'])
def test_non_existent_mode(self, mode):
with pytest.raises(ValueError):
CaseTokens(mode)
class TestRemovePunctuation:
@pytest.mark.parametrize('inputs,results', [
(['TEST.%$#"#'], ['TEST']),
([r'!"te""!"#$%&()*+,-.s/:;<=>?@[\]^_`{|}~""t'], ['test']),
])
def test_remove_punctuation(self, inputs, results):
results_expected = [Token(tk) for tk in inputs]
for tk, out in zip(results_expected, results):
tk.cleaned = out
doc = Document(" ".join(inputs))
# To apply a normalizer is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
n = RemovePunctuation()
# Inplace False
out = n(doc)
assert out.tokens == results_expected
assert out.steps == [repr(t), repr(n)]
assert doc.tokens == [Token(token) for token in inputs]
assert doc.steps == [repr(t)]
# Inplace True
out = n(doc, True)
assert doc.tokens == results_expected
assert doc.steps == [repr(t), repr(n)]
assert out is None
class TestRemoveStopWords:
@pytest.mark.parametrize('sensitive,inputs,results', [
(True, ['This', 'is', 'a', 'stop', 'Word'], ['This', '', '', 'stop', 'Word']),
(False, ['This', 'is', 'a', 'stop', 'Word'], ['', '', '', 'stop', 'Word']),
])
def test_remove_stop_words_w_case_sensitive(self, sensitive, inputs, results):
pytest.importorskip('nltk')
results_expected = [Token(tk) for tk in inputs]
for tk, out in zip(results_expected, results):
tk.cleaned = out
doc = Document(" ".join(inputs))
# To apply a normalizer is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
n = RemoveStopWords(case_sensitive=sensitive)
# Inplace False
out = n(doc)
assert out.tokens == results_expected
assert out.steps == [repr(t), repr(n)]
assert doc.tokens == [Token(token) for token in inputs]
assert doc.steps == [repr(t)]
# Inplace True
out = n(doc, True)
assert doc.tokens == results_expected
assert doc.steps == [repr(t), repr(n)]
assert out is None
class TestVocabularyFilter:
vocabulary = ['this', 'is', 'a', 'token']
@pytest.mark.parametrize('sensitive,inputs,results', [
(True, ['This', 'is', 'a', 'Token'], ['', 'is', 'a', '']),
(False, ['This', 'is', 'a', 'Token'], ['This', 'is', 'a', 'Token']),
])
def test_vocabulary_filter_w_case_sensitive(self, sensitive, inputs, results):
results_expected = [Token(tk) for tk in inputs]
for tk, out in zip(results_expected, results):
tk.cleaned = out
doc = Document(" ".join(inputs))
# To apply a normalizer is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
n = VocabularyFilter(vocabulary=self.vocabulary, case_sensitive=sensitive)
# Inplace False
out = n(doc)
assert out.tokens == results_expected
assert out.steps == [repr(t), repr(n)]
assert doc.tokens == [Token(token) for token in inputs]
assert doc.steps == [repr(t)]
# Inplace True
out = n(doc, True)
assert doc.tokens == results_expected
assert doc.steps == [repr(t), repr(n)]
assert out is None
class TestSpellCheck:
@pytest.mark.parametrize('max_distance,inputs,results', [
(None, ['This', 'isx', 'a', 'stop', 'Word'], ['This', '', 'a', 'stop', 'Word']),
(1, ['Thisx', 'iszk', 'a', 'stop', 'Word'], ['This', 'iszk', 'a', 'stop', 'Word']),
])
def test_spell_checking(self, max_distance, inputs, results):
pytest.importorskip('hunspell')
pytest.importorskip('nltk')
results_expected = [Token(tk) for tk in inputs]
for tk, out in zip(results_expected, results):
tk.cleaned = out
doc = Document(" ".join(inputs))
# To apply a normalizer is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
n = SpellCheck(max_distance=max_distance)
# Inplace False
out = n(doc)
assert out.tokens == results_expected
assert out.steps == [repr(t), repr(n)]
assert doc.tokens == [Token(token) for token in inputs]
assert doc.steps == [repr(t)]
# Inplace True
out = n(doc, True)
assert doc.tokens == results_expected
assert doc.steps == [repr(t), repr(n)]
assert out is None
class TestStemmer:
@pytest.mark.parametrize('version,language,inputs,results', [
('nltk', 'english', ['This', 'computer', 'is', 'fastest', 'because'],
['this', 'comput', 'is', 'fastest', 'becaus']),
('hunspell', 'en_GB', ['This', 'computer', 'is', 'fastest', 'because'],
['this', 'computer', 'is', 'fast', 'because'])])
def test_stemmer(self, version, language, inputs, results):
pytest.importorskip('nltk')
pytest.importorskip('hunspell')
results_expected = [Token(tk) for tk in inputs]
for tk, out in zip(results_expected, results):
tk.cleaned = out
tk.stem = out
doc = Document(" ".join(inputs))
# To apply a normalizer is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
n = Stemmer(version=version, language=language)
# Inplace False
out = n(doc)
assert out.tokens == results_expected
assert out.steps == [repr(t), repr(n)]
assert doc.tokens == [Token(token) for token in inputs]
assert doc.steps == [repr(t)]
# Inplace True
out = n(doc, True)
assert doc.tokens == results_expected
assert doc.steps == [repr(t), repr(n)]
assert out is None
def test_unavailable_version(self):
with pytest.raises(ValueError):
Stemmer(version='random')
|
1652305
|
import numpy as np
import time
from pykin.kinematics.transform import Transform
JOINT_TYPE_MAP = {'revolute' : 'revolute',
'fixed' : 'fixed',
'prismatic' : 'prismatic'}
LINK_TYPE_MAP = {'cylinder' : 'cylinder',
'sphere' : 'sphere',
'box' : 'box',
'mesh' : 'mesh'}
LINK_TYPES = ['box', 'cylinder', 'sphere', 'capsule', 'mesh']
class ShellColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Baxter:
left_e0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.107, 0., 0. ])
left_w0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.088, 0., 0. ])
right_e0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.107, 0., 0. ])
right_w0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.088, 0., 0. ])
@staticmethod
def add_visual_link(link_transforms, f):
if "left_lower_shoulder" in f.link.name:
link_transforms["left_upper_elbow_visual"] = np.dot(link_transforms["left_lower_shoulder"],
Baxter.left_e0_fixed_offset)
if "left_lower_elbow" in f.link.name:
link_transforms["left_upper_forearm_visual"] = np.dot(link_transforms["left_lower_elbow"],
Baxter.left_w0_fixed_offset)
if "right_lower_shoulder" in f.link.name:
link_transforms["right_upper_elbow_visual"] = np.dot(link_transforms["right_lower_shoulder"],
Baxter.right_e0_fixed_offset)
if "right_lower_elbow" in f.link.name:
link_transforms["right_upper_forearm_visual"] = np.dot(link_transforms["right_lower_elbow"],
Baxter.right_w0_fixed_offset)
def convert_thetas_to_dict(active_joint_names, thetas):
"""
Check if any pair of objects in the manager collide with one another.
Args:
active_joint_names (list): actuated joint names
thetas (sequence of float): If not dict, convert to dict ex. {joint names : thetas}
Returns:
thetas (dict): Dictionary of actuated joint angles
"""
if not isinstance(thetas, dict):
assert len(active_joint_names) == len(thetas
), f"""the number of robot joint's angle is {len(active_joint_names)},
but the number of input joint's angle is {len(thetas)}"""
thetas = dict((j, thetas[i]) for i, j in enumerate(active_joint_names))
return thetas
def logging_time(original_fn):
"""
Decorator to check time of function
"""
def wrapper_fn(*args, **kwargs):
start_time = time.time()
result = original_fn(*args, **kwargs)
end_time = time.time()
print(f"WorkingTime[{original_fn.__name__}]: {end_time-start_time:.4f} sec\n")
return result
return wrapper_fn
def convert_transform(origin):
"""
Args:
origin (None or Transform): offset of object
Returns:
Transform: Returns Transform if origin is None
"""
if origin is None:
return Transform()
else:
return Transform(rot=origin.rot, pos=origin.pos)
def convert_string_to_narray(str_input):
"""
Args:
str_input (str): string
Returns:
np.array: Returns string to np.array
"""
if str_input is not None:
return np.array([float(data) for data in str_input.split()])
def calc_pose_error(tar_pose, cur_pose, EPS):
"""
Args:
tar_pos (np.array): target pose
cur_pos (np.array): current pose
EPS (float): epsilon
Returns:
np.array: Returns pose error
"""
pos_err = np.array([tar_pose[:3, -1] - cur_pose[:3, -1]])
rot_err = np.dot(cur_pose[:3, :3].T, tar_pose[:3, :3])
w_err = np.dot(cur_pose[:3, :3], rot_to_omega(rot_err, EPS))
return np.vstack((pos_err.T, w_err))
def rot_to_omega(R, EPS):
# referred p36
el = np.array(
[[R[2, 1] - R[1, 2]],
[R[0, 2] - R[2, 0]],
[R[1, 0] - R[0, 1]]]
)
norm_el = np.linalg.norm(el)
if norm_el > EPS:
w = np.dot(np.arctan2(norm_el, np.trace(R) - 1) / norm_el, el)
elif (R[0, 0] > 0 and R[1, 1] > 0 and R[2, 2] > 0):
w = np.zeros((3, 1))
else:
w = np.dot(np.pi/2, np.array([[R[0, 0] + 1], [R[1, 1] + 1], [R[2, 2] + 1]]))
return w
def limit_joints(joint_angles, lower, upper):
"""
Set joint angle limit
Args:
joint_angles (sequence of float): joint angles
lower (sequence of float): lower limit
upper (sequence of float): upper limit
Returns:
joint_angles (sequence of float): Returns limited joint angle
"""
if lower is not None and upper is not None:
for i in range(len(joint_angles)):
if joint_angles[i] < lower[i]:
joint_angles[i] = lower[i]
if joint_angles[i] > upper[i]:
joint_angles[i] = upper[i]
return joint_angles
|
1652314
|
import os
import yaml
def load_config(config):
config = os.path.join("config", config)
with open(config, 'r') as config_file:
return yaml.load(config_file)
|
1652333
|
import sqlite3
con = sqlite3.connect(":memory:")
con.execute("create table lang (id integer primary key, name varchar unique)")
# Successful, con.commit() is called automatically afterwards
with con:
con.execute("insert into lang(name) values (?)", ("Python",))
# con.rollback() is called after the with block finishes with an exception, the
# exception is still raised and must be caught
try:
with con:
con.execute("insert into lang(name) values (?)", ("Python",))
except sqlite3.IntegrityError:
print("couldn't add Python twice")
# Connection object used as context manager only commits or rollbacks transactions,
# so the connection object should be closed manually
con.close()
|
1652371
|
from PIL import Image
import os
basepath = 'fashionshop\static\img\product\Fragrance'
# os.chdir(basepath)
def save_img(img):
i = Image.open(os.path.join(basepath,img))
t, f_ext = os.path.splitext(i.filename)
text = t.replace("-"," ")
f = text + f_ext
print('infor:',img, i.format, i.size, i.mode)
if i.mode == 'RGBA':
i = i.convert('RGB')
output = (264,363)
i.thumbnail(output, Image.ANTIALIAS)
# i = i.resize(output, Image.ANTIALIAS)
i.save(f, "JPEG")
print('infor changed:',img, i.format, i.size, i.mode)
pass
for img in os.listdir(basepath):
save_img(img)
|
1652375
|
import nlopt
import sys
import numpy as np
import numpy.testing as npt
def test_nlopt_import():
assert "nlopt" in sys.modules
def myfunc(x, grad):
if grad.size > 0:
grad[0] = 0.0
grad[1] = 0.5 / np.sqrt(x[1])
return np.sqrt(x[1])
def myconstraint(x, grad, a, b):
if grad.size > 0:
grad[0] = 3 * a * (a * x[0] + b) ** 2
grad[1] = -1.0
return (a * x[0] + b) ** 3 - x[1]
def test_nlopt():
opt = nlopt.opt(nlopt.LD_MMA, 2)
opt.set_lower_bounds([-float("inf"), 0])
opt.set_min_objective(myfunc)
opt.add_inequality_constraint(lambda x, grad: myconstraint(x, grad, 2, 0), 1e-8)
opt.add_inequality_constraint(lambda x, grad: myconstraint(x, grad, -1, 1), 1e-8)
opt.set_xtol_rel(1e-4)
x = opt.optimize([1.234, 5.678])
minf = opt.last_optimum_value()
# numevals = opt.get_numevals()
res = opt.last_optimize_result()
print("optimum at ", x[0], x[1])
print("minimum value = ", minf)
print("result code = ", res)
# print("nevals = ", numevals)
min_fref = 0.5443310476200902
xref = np.array([0.3333333346933468, 0.29629628940318486])
assert res == 4
# assert numevals == 11
assert minf == min_fref
npt.assert_almost_equal(xref, x, decimal=3)
|
1652396
|
from typing import Text
from ..request import get
from ..Model.ApiModel import Member
def get_memder(guild_id:Text,user_id:Text) -> Member:
"""
获得成员信息
-----------
获取`指定频道`中的`指定成员`信息。
"""
member = get(f"/guilds/{guild_id}/members/{user_id}")
member_info = Member(**member)
return member_info
|
1652406
|
class dotHierarchicList_t(object):
# no doc
aObjects=None
ModelFatherObject=None
nObjects=None
ObjectsLeftToGet=None
OperationType=None
|
1652438
|
import pytest
from butterfree.validations import BasicValidation
class TestBasicValidation:
def test_validate_without_column_ts(self, feature_set_without_ts):
check = BasicValidation(feature_set_without_ts)
with pytest.raises(ValueError):
check.validate_column_ts()
def test_validate_empty(self, feature_set_empty):
check = BasicValidation(feature_set_empty)
with pytest.raises(ValueError):
check.validate_df_is_empty()
def test_validate_not_spark_df(self):
df_writer = "not a spark df writer"
check = BasicValidation(df_writer)
with pytest.raises(ValueError):
check.validate_df_is_spark_df()
|
1652461
|
from .exceptions import SeleniumRespectfulError, SeleniumRespectfulRateLimitedError
from redis import StrictRedis, ConnectionError
from selenium.webdriver.remote.webdriver import WebDriver
from types import LambdaType
import yaml
import copy
import uuid
import time
import inspect
try:
FileNotFoundError
except NameError: # Python 2 Compatibility
FileNotFoundError = IOError
class RespectfulWebdriver:
default_config = {
"redis": {
"host": "localhost",
"port": 6379,
"database": 0
},
"safety_threshold": 0
}
def __init__(self, **kwargs):
self.config = self._load_config()
self.webdriver = kwargs.get("webdriver")
if not WebDriver in self.webdriver.__class__.__bases__:
raise SeleniumRespectfulError("The provided webdriver does not inherit from RemoteWebDriver")
self.redis = StrictRedis(
host=self.config["redis"]["host"],
port=self.config["redis"]["port"],
db=self.config["redis"]["database"],
)
try:
self.redis.echo("Testing Connection")
except ConnectionError:
raise SeleniumRespectfulError("Could not establish a connection to the provided Redis server")
def __getattr__(self, attr):
if attr == "get":
return getattr(self, "_selenium_webdriver_proxy_%s" % attr)
else:
return getattr(self.webdriver, attr)
@property
def redis_prefix(self):
return "SeleniumRequester"
def register_realm(self, realm, max_requests, timespan):
redis_key = self._realm_redis_key(realm)
if not self.redis.hexists(redis_key, "max_requests"):
self.redis.hmset(redis_key, {"max_requests": max_requests, "timespan": timespan})
self.redis.sadd("%s:REALMS" % self.redis_prefix, realm)
return True
def register_realms(self, realm_tuples):
for realm_tuple in realm_tuples:
self.register_realm(*realm_tuple)
return True
def update_realm(self, realm, **kwargs):
redis_key = self._realm_redis_key(realm)
updatable_keys = ["max_requests", "timespan"]
for updatable_key in updatable_keys:
if updatable_key in kwargs and type(kwargs[updatable_key]) == int:
self.redis.hset(redis_key, updatable_key, kwargs[updatable_key])
return True
def unregister_realm(self, realm):
self.redis.delete(self._realm_redis_key(realm))
self.redis.srem("%s:REALMS" % self.redis_prefix, realm)
request_keys = self.redis.keys("%s:REQUEST:%s:*" % (self.redis_prefix, realm))
[self.redis.delete(k) for k in request_keys]
return True
def unregister_realms(self, realms):
for realm in realms:
self.unregister_realm(realm)
return True
def fetch_registered_realms(self):
return list(map(lambda k: k.decode("utf-8"), self.redis.smembers("%s:REALMS" % self.redis_prefix)))
def realm_max_requests(self, realm):
realm_info = self._fetch_realm_info(realm)
return int(realm_info["max_requests".encode("utf-8")].decode("utf-8"))
def realm_timespan(self, realm):
realm_info = self._fetch_realm_info(realm)
return int(realm_info["timespan".encode("utf-8")].decode("utf-8"))
def _load_config(self):
try:
with open("selenium-respectful.config.yml", "r") as f:
config = yaml.load(f)
if "safety_threshold" not in config:
config["safety_threshold"] = self.__class__.default_config.get("safety_threshold")
else:
if not isinstance(config["safety_threshold"], int) or config["safety_threshold"] < 0:
raise SeleniumRespectfulError(
"'safety_threshold' key must be a positive integer in 'selenium-respectful.config.yml'"
)
if "redis" not in config:
raise SeleniumRespectfulError("'redis' key is missing from 'selenium-respectful.config.yml'")
expected_redis_keys = ["host", "port", "database"]
missing_redis_keys = list()
for expected_redis_key in expected_redis_keys:
if expected_redis_key not in config["redis"]:
missing_redis_keys.append(expected_redis_key)
if len(missing_redis_keys):
raise SeleniumRespectfulError(
"'%s' %s missing from the 'redis' configuration key in 'selenium-respectful.config.yml'" % (
", ".join(missing_redis_keys),
"is" if len(missing_redis_keys) == 1 else "are"
)
)
except FileNotFoundError:
return copy.deepcopy(self.__class__.default_config)
def _can_perform_get(self, realm):
return self._requests_in_timespan(realm) < (self.realm_max_requests(realm) - self.config["safety_threshold"])
def _realm_redis_key(self, realm):
return "%s:REALMS:%s" % (self.redis_prefix, realm)
def _fetch_realm_info(self, realm):
redis_key = self._realm_redis_key(realm)
return self.redis.hgetall(redis_key)
def _requests_in_timespan(self, realm):
return len(
self.redis.scan(
cursor=0,
match="%s:REQUEST:%s:*" % (self.redis_prefix, realm),
count=self._redis_keys_in_db() + 100
)[1]
)
def _redis_keys_in_db(self):
return self.redis.info().get("db%d" % self.config["redis"]["database"]).get("keys")
def _selenium_webdriver_proxy_get(self, *args, **kwargs):
realms = kwargs.pop("realms", list())
if not len(realms):
raise SeleniumRespectfulError("'realms' is a required kwarg")
wait = kwargs.pop("wait", False)
return self._webdriver_get(lambda: self.webdriver.get(*args, **kwargs), realms=realms, wait=wait)
def _webdriver_get(self, get_func, realms=None, wait=False):
registered_realms = self.fetch_registered_realms()
for realm in realms:
if realm not in registered_realms:
raise SeleniumRespectfulError("Realm '%s' hasn't been registered" % realm)
if wait:
while True:
try:
return self._perform_webdriver_get(get_func, realms=realms)
except SeleniumRespectfulRateLimitedError:
pass
time.sleep(1)
else:
return self._perform_webdriver_get(get_func, realms=realms)
def _perform_webdriver_get(self, get_func, realms=None):
self._validate_get_func(get_func)
rate_limited_realms = list()
for realm in realms:
if not self._can_perform_get(realm):
rate_limited_realms.append(realm)
if not len(rate_limited_realms):
for realm in realms:
request_uuid = str(uuid.uuid4())
self.redis.setex(
name="%s:REQUEST:%s:%s" % (self.redis_prefix, realm, request_uuid),
time=self.realm_timespan(realm),
value=request_uuid
)
return get_func()
else:
raise SeleniumRespectfulRateLimitedError(
"Currently rate-limited on Realm(s): %s" % ", ".join(rate_limited_realms))
@staticmethod
def _validate_get_func(get_func):
if not isinstance(get_func, LambdaType):
raise SeleniumRespectfulError("'get_func' is expected to be a lambda")
get_func_string = inspect.getsource(get_func)
post_lambda_string = get_func_string.split(":")[1].strip()
if not post_lambda_string.startswith("self.webdriver.get"):
raise SeleniumRespectfulError("The lambda can only contain a self.webdriver.get function call")
|
1652464
|
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
def get_mean_std_results(df):
odom_multipliers = np.unique(df['Odometry Multiplier'])
results = []
for multiplier in odom_multipliers:
data = df[df['Odometry Multiplier'] == multiplier]
Error_corrected = data['Path Error'][df['Correction']]
Error_uncorrected = data['Path Error'][df['Correction'] == False]
Image_corrected = data['Image Error'][df['Correction']]
Image_uncorrected = data['Image Error'][df['Correction'] == False]
results.append([
np.mean(Error_corrected),
np.std(Error_corrected, ddof=1),
np.mean(Error_uncorrected),
np.std(Error_uncorrected, ddof=1),
np.mean(Image_corrected),
np.std(Image_corrected, ddof=1),
np.mean(Image_uncorrected),
np.std(Image_uncorrected, ddof=1),
])
results = pd.DataFrame(results, columns=['Corrected Error mean','Corrected Error std','Uncorrected Error mean','Uncorrected Error std','Corrected Image Error mean','Corrected Image Error std','Uncorrected Image Error mean','Uncorrected Image Error std'], index=odom_multipliers)
results.index.rename('Odometry Multiplier', inplace=True)
return results
def get_theoretical_results(df, N, correction, sr, d):
theoretical = []
for multiplier in df.index:
path = d / multiplier
for i in range(1, N):
loc = round(path/d)
if loc < i:
path += d * (correction ** min(sr, i-loc)) / multiplier
else:
path += d / multiplier
theoretical.append(N*d - path)
return pd.DataFrame({'Theoretical Error': theoretical}, index=df.index)
labels = ['Odometry Multiplier', 'Correction', 'Path Error', 'Image Error']
data1 = [
[1.0, True, 70, 0],
[1.0, True, 65, 0],
[1.0, True, 50, 0],
[1.0, False, 140, 0],
[1.25, True, 100, 0],
[1.25, True, 180, 0],
[1.25, True, 110, 0],
[1.25, False, 1080, 6*200],
[1.5, True, 280, 0],
[1.5, True, 300, 200],
[1.5, True, 200, 0],
[1.5, False, 1770, 6*200],
[1.75, True, 880, 4*200],
[1.75, True, 1180, 4*200],
[1.75, True, 920, 4*200],
[1.75, False, 2360, np.NaN]
]
data2 = [
[1.0, True, 60, 0],
[1.0, True, 75, 0],
[1.0, True, 50, 0],
[1.0, False, 220, 0],
[1.5, True, 350, 1*200],
[1.5, True, 380, 1*200],
[1.5, True, 250, 1*200],
[1.5, False, 1750, 8*200],
[2.0, True, 495, 2*200],
[2.0, True, 480, 2*200],
[2.0, True, 560, 2*200],
[2.0, False, 2520, 11*200],
[2.5, True, 1010, 5*200],
[2.5, True, 1400, 7*200],
[2.5, True, 1810, 8*200],
[2.5, False, 2990, 14*200]
]
df1 = pd.DataFrame(data1, columns=labels)
results1 = get_mean_std_results(df1)
results1 = results1.join(get_theoretical_results(results1, N=24, correction=1.5, sr=1, d=200))
df2 = pd.DataFrame(data2, columns=labels)
results2 = get_mean_std_results(df2)
results2 = results2.join(get_theoretical_results(results2, N=24, correction=1.5, sr=2, d=200))
matplotlib.style.use('ggplot')
results1[['Corrected Error mean','Theoretical Error','Corrected Image Error mean']].plot(kind='bar', yerr=[results1['Corrected Error std'],np.zeros(4),results1['Corrected Image Error std']])
plt.xticks(rotation=0)
plt.ylabel('Path Error (mm)')
plt.legend(['Real path error','Theoretical path error','Image localisation error (at end)'], loc='upper left')
plt.title('Effect of odom corruption on localisation (SR=1)')
# plt.figure()
# matplotlib.style.use('ggplot')
results2[['Corrected Error mean','Theoretical Error','Corrected Image Error mean']].plot(kind='bar', yerr=[results2['Corrected Error std'],np.zeros(4),results2['Corrected Image Error std']])
plt.xticks(rotation=0)
plt.ylabel('Path Error (mm)')
plt.legend(['Real path error','Theoretical path error','Image localisation error (at end)'], loc='upper left')
plt.title('Effect of odom corruption on localisation (SR=2)')
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.