hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
98463e119070853e4ff482c858d10b20b3eec8fb | 3,332 | py | Python | lib/tri_declarative/shortcut.py | TriOptima/tri.declarative | e1972287f7b00ab335cc6c9a8384d5a39232171e | [
"BSD-3-Clause"
] | 15 | 2016-02-09T18:07:30.000Z | 2021-11-08T09:05:40.000Z | lib/tri_declarative/shortcut.py | TriOptima/tri.declarative | e1972287f7b00ab335cc6c9a8384d5a39232171e | [
"BSD-3-Clause"
] | 7 | 2016-02-08T12:07:13.000Z | 2020-10-08T06:51:06.000Z | lib/tri_declarative/shortcut.py | TriOptima/tri.declarative | e1972287f7b00ab335cc6c9a8384d5a39232171e | [
"BSD-3-Clause"
] | 7 | 2016-01-06T09:29:11.000Z | 2021-04-07T09:35:20.000Z | import functools
from .declarative import get_members
from .dispatch import dispatch
from .namespace import (
Namespace,
setdefaults_path,
)
# This is just a marker class for declaring shortcuts, and later for collecting them
class Shortcut(Namespace):
shortcut = True
# decorator
def shortcut(f):
f.shortcut = True
return f
def is_shortcut(x):
return getattr(x, 'shortcut', False)
def class_shortcut(*decorator_args, **defaults):
def decorator(__target__):
@functools.wraps(__target__)
@shortcut
@dispatch(
**defaults
)
def class_shortcut_wrapper(cls, *args, **kwargs):
name = __target__.__name__
next_call_target = kwargs.pop('call_target', None)
if (
isinstance(next_call_target, Namespace)
and name == next_call_target.get('attribute', None)
):
# Next call is to the same attribute name, but on the base class.
initial_resolve = getattr(cls, name).__func__
# Loop until we find a super class implementation
base_class_candidate = cls
while getattr(base_class_candidate, name).__func__ == initial_resolve:
base_class_candidate = base_class_candidate.__bases__[0]
next_call_target_cls = base_class_candidate
next_call_target_attribute = next_call_target.attribute
# We need to retain the cls value for later use (as _final_cls).
setdefaults_path(kwargs, _final_cls=cls)
call_target_after_shortcut = Namespace(
call_target__cls=next_call_target_cls,
call_target__attribute=next_call_target_attribute,
)
else:
next_call_target_cls = kwargs.pop('_final_cls', cls)
if next_call_target is None:
# No call_target specified in the decorator, just use the cls (or _final_cls from earlier)
call_target_after_shortcut = Namespace(
call_target__cls=next_call_target_cls,
)
else:
# Merge decorator specified call_target with what final class we should have.
call_target_after_shortcut = Namespace(
call_target=next_call_target,
call_target__cls=next_call_target_cls,
)
result = __target__(cls, *args, call_target=call_target_after_shortcut, **kwargs)
shortcut_stack = [name] + getattr(result, '__tri_declarative_shortcut_stack', [])
try:
result.__tri_declarative_shortcut_stack = shortcut_stack
except AttributeError:
pass
return result
class_shortcut_wrapper.__doc__ = __target__.__doc__
return class_shortcut_wrapper
assert len(decorator_args) in (0, 1), "There are no (explicit) positional arguments to class_shortcut" # pragma: no mutate
if len(decorator_args) == 1:
return decorator(decorator_args[0])
return decorator
def get_shortcuts_by_name(class_):
return dict(get_members(class_, member_class=Shortcut, is_member=is_shortcut))
| 34.708333 | 127 | 0.620348 | 368 | 3,332 | 5.173913 | 0.285326 | 0.136555 | 0.095588 | 0.044643 | 0.181723 | 0.147059 | 0.147059 | 0.065126 | 0.065126 | 0.065126 | 0 | 0.002199 | 0.317527 | 3,332 | 95 | 128 | 35.073684 | 0.835092 | 0.135054 | 0 | 0.123077 | 0 | 0 | 0.045945 | 0.011138 | 0 | 0 | 0 | 0 | 0.015385 | 1 | 0.092308 | false | 0.015385 | 0.061538 | 0.030769 | 0.292308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98463fa3b6a220d27d47ba06579da54d0fd9e3d5 | 1,032 | py | Python | ml-agents/mlagents/torch_utils/torch.py | Phong13/ml-agents | 393808b50581e66085578b01d9d907b65a9240f0 | [
"Apache-2.0"
] | null | null | null | ml-agents/mlagents/torch_utils/torch.py | Phong13/ml-agents | 393808b50581e66085578b01d9d907b65a9240f0 | [
"Apache-2.0"
] | null | null | null | ml-agents/mlagents/torch_utils/torch.py | Phong13/ml-agents | 393808b50581e66085578b01d9d907b65a9240f0 | [
"Apache-2.0"
] | null | null | null | import os
# Detect availability of torch package here.
# NOTE: this try/except is temporary until torch is required for ML-Agents.
try:
# This should be the only place that we import torch directly.
# Everywhere else is caught by the banned-modules setting for flake8
import torch # noqa I201
torch.set_num_interop_threads(2)
os.environ["KMP_BLOCKTIME"] = "0"
# Known PyLint compatibility with PyTorch https://github.com/pytorch/pytorch/issues/701
# pylint: disable=E1101
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
device = torch.device("cuda")
else:
torch.set_default_tensor_type(torch.FloatTensor)
device = torch.device("cpu")
nn = torch.nn
# pylint: disable=E1101
except ImportError:
torch = None
nn = None
device = None
def default_device():
return device
def is_available():
"""
Returns whether Torch is available in this Python environment
"""
return torch is not None
| 27.157895 | 91 | 0.695736 | 139 | 1,032 | 5.071942 | 0.561151 | 0.029787 | 0.051064 | 0.059574 | 0.085106 | 0.085106 | 0 | 0 | 0 | 0 | 0 | 0.021197 | 0.222868 | 1,032 | 37 | 92 | 27.891892 | 0.857855 | 0.43314 | 0 | 0 | 0 | 0 | 0.037367 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.15 | 0.05 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98480be6c6b339a9b16fc6aaedb013fff31d11b5 | 3,958 | py | Python | tests/server/database.py | 0x0mar/king-phisher | ff294c1a6ea3d59e238f0f4ed7ec668c5e8c2cfd | [
"BSD-3-Clause"
] | null | null | null | tests/server/database.py | 0x0mar/king-phisher | ff294c1a6ea3d59e238f0f4ed7ec668c5e8c2cfd | [
"BSD-3-Clause"
] | null | null | null | tests/server/database.py | 0x0mar/king-phisher | ff294c1a6ea3d59e238f0f4ed7ec668c5e8c2cfd | [
"BSD-3-Clause"
] | 1 | 2018-12-18T00:44:14.000Z | 2018-12-18T00:44:14.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests/server/database.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from king_phisher import testing
from king_phisher.server.database import manager as db_manager
from king_phisher.server.database import models as db_models
from king_phisher.utilities import random_string
get_tables_with_column_id = db_models.get_tables_with_column_id
class ServerDatabaseTests(testing.KingPhisherTestCase):
def test_create_database(self):
try:
db_manager.init_database('sqlite://')
except Exception as error:
self.fail("failed to initialize the database (error: {0})".format(error.__class__.__name__))
def test_get_meta_data(self):
try:
db_manager.init_database('sqlite://')
except Exception as error:
self.fail("failed to initialize the database (error: {0})".format(error.__class__.__name__))
database_driver = db_manager.get_meta_data('database_driver')
self.assertEqual(database_driver, 'sqlite')
schema_version = db_manager.get_meta_data('schema_version')
self.assertEqual(schema_version, db_models.SCHEMA_VERSION)
def test_get_tables_id(self):
tables = set([
'alert_subscriptions',
'campaigns',
'credentials',
'deaddrop_connections',
'deaddrop_deployments',
'landing_pages',
'messages',
'meta_data',
'users',
'visits'
])
tables_with_id = get_tables_with_column_id('id')
self.assertSetEqual(set(get_tables_with_column_id('id')), tables)
def test_get_tables_campaign_id(self):
tables = set([
'alert_subscriptions',
'credentials',
'deaddrop_connections',
'deaddrop_deployments',
'landing_pages',
'messages',
'visits'
])
self.assertSetEqual(set(get_tables_with_column_id('campaign_id')), tables)
def test_get_tables_message_id(self):
tables = set([
'credentials',
'visits'
])
self.assertSetEqual(set(get_tables_with_column_id('message_id')), tables)
def test_set_meta_data(self):
try:
db_manager.init_database('sqlite://')
except Exception as error:
self.fail("failed to initialize the database (error: {0})".format(error.__class__.__name__))
# set a new value
key = random_string(10)
value = random_string(20)
db_manager.set_meta_data(key, value)
self.assertEqual(db_manager.get_meta_data(key), value)
# update an existing value
value = random_string(30)
db_manager.set_meta_data(key, value)
self.assertEqual(db_manager.get_meta_data(key), value)
if __name__ == '__main__':
unittest.main()
| 34.417391 | 95 | 0.757959 | 543 | 3,958 | 5.28361 | 0.353591 | 0.03137 | 0.027187 | 0.039735 | 0.440572 | 0.39526 | 0.323109 | 0.323109 | 0.260369 | 0.226908 | 0 | 0.00298 | 0.152097 | 3,958 | 114 | 96 | 34.719298 | 0.851907 | 0.392875 | 0 | 0.567164 | 0 | 0 | 0.197046 | 0 | 0 | 0 | 0 | 0 | 0.104478 | 1 | 0.089552 | false | 0 | 0.074627 | 0 | 0.179104 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
984a26f181b80332cd6014478a8eb8713af1bd02 | 860 | py | Python | examples/plot_geodesics_s2.py | effigies/geomstats | 0d6979a15cefcf98f7f92bade9d0e4abee3dde14 | [
"MIT"
] | null | null | null | examples/plot_geodesics_s2.py | effigies/geomstats | 0d6979a15cefcf98f7f92bade9d0e4abee3dde14 | [
"MIT"
] | null | null | null | examples/plot_geodesics_s2.py | effigies/geomstats | 0d6979a15cefcf98f7f92bade9d0e4abee3dde14 | [
"MIT"
] | null | null | null | """
Plot a geodesic on the sphere S2
"""
import matplotlib.pyplot as plt
import numpy as np
import geomstats.visualization as visualization
from geomstats.hypersphere import Hypersphere
SPHERE2 = Hypersphere(dimension=2)
METRIC = SPHERE2.metric
def main():
initial_point = [1., 0., 0.]
initial_tangent_vec = SPHERE2.projection_to_tangent_space(
vector=[1., 2., 0.8],
base_point=initial_point)
geodesic = METRIC.geodesic(initial_point=initial_point,
initial_tangent_vec=initial_tangent_vec)
n_steps = 10
t = np.linspace(0, 1, n_steps)
points = geodesic(t)
ax = plt.subplot(111, projection="3d", aspect="equal")
visualization.plot(points, ax, space='S2')
plt.show()
if __name__ == "__main__":
main()
| 23.888889 | 71 | 0.624419 | 103 | 860 | 4.980583 | 0.495146 | 0.093567 | 0.099415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033654 | 0.274419 | 860 | 35 | 72 | 24.571429 | 0.788462 | 0.037209 | 0 | 0 | 0 | 0 | 0.020732 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.190476 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
984a405ea5778fbc9af3554ca90adfa1ba3e5686 | 3,666 | py | Python | video_utils/collect_and_store.py | monocongo/video_utils | c1fd5d0041294f91b498f2878029723377fe6f66 | [
"BSD-3-Clause"
] | null | null | null | video_utils/collect_and_store.py | monocongo/video_utils | c1fd5d0041294f91b498f2878029723377fe6f66 | [
"BSD-3-Clause"
] | 9 | 2019-05-14T21:52:09.000Z | 2019-06-12T17:32:13.000Z | video_utils/collect_and_store.py | monocongo/video_save_boto3 | c1fd5d0041294f91b498f2878029723377fe6f66 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import datetime
import os
import boto3
import ffmpeg
# ------------------------------------------------------------------------------
def collect_and_store(rtsp_url: str,
start_seconds: int,
duration_seconds: int,
s3_bucket: str,
s3_prefix: str=None) -> str:
"""
:param rtsp_url:
:param start_seconds:
:param duration_seconds:
:param s3_bucket:
:param s3_prefix:
:return: S3 URL to MP4 clip file
"""
# build URL with start and end times
# NOTE URL is for Uniview RTSP, add options for other camera types
url = rtsp_url + f"/b{start_seconds}/replay/"
# file where we'll write clip data
temp_file = f"clip_b{start_seconds}_e{(start_seconds + duration_seconds)}.mp4"
# create the equivalent of the ffmpeg command:
# $ ffmpeg -i <rtsp_url> -vcodec copy -y -rtsp_transport tcp <output_mp4>
stream = ffmpeg.input(url)
stream = ffmpeg.output(stream, temp_file,
**{"codec:v": "copy",
"rtsp_transport": "tcp",
"t": f"{(duration_seconds//3600):02}:{(duration_seconds%3600//60):02}:{(duration_seconds%60):02}",
"y": None
}
)
ffmpeg.run(stream)
# store the clip to the S3 bucket using the name
s3_client = boto3.client("s3")
s3_client.upload_file(temp_file, s3_bucket, s3_prefix + temp_file)
os.remove(temp_file)
# return the S3 URL for the created file
return f"s3://{s3_bucket}/{s3_prefix}{temp_file}"
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# USAGE
# $ python collect_and_store.py --rtsp rtsp://user:pass1@71.85.125.110:554 \
# --s3_bucket scw.james.adams \
# --duration 30 --count 10
# construct the argument parser and parse the arguments
args_parser = argparse.ArgumentParser()
args_parser.add_argument("--rtsp",
required=True,
type=str,
help="RTSP URL for video stream")
args_parser.add_argument("--duration",
required=True,
type=int,
help="duration of saved clips (in seconds)")
args_parser.add_argument("--count",
required=True,
type=int,
help="number of clips to save")
args_parser.add_argument("--s3_bucket",
required=True,
type=str,
help="Destination S3 bucket")
args_parser.add_argument("--s3_prefix",
type=str,
help="Key prefix of the file that will be "
"stored in the S3 bucket")
args = vars(args_parser.parse_args())
# sanity check for some of the arguments
if not args["rtsp"].lower().startswith("rtsp://"):
raise ValueError("Invalid input URL -- only RTSP supported")
start = int(datetime.datetime.now().strftime("%s"))
end = start + args["duration"]
number_of_files_to_collect = args["count"]
while number_of_files_to_collect > 0:
collect_and_store(args["rtsp"], start, args["duration"],
args["s3_bucket"], args["s3_prefix"])
number_of_files_to_collect -= 1
start += args["duration"]
end += args["duration"]
| 36.29703 | 128 | 0.516094 | 403 | 3,666 | 4.493797 | 0.33995 | 0.044174 | 0.035892 | 0.057979 | 0.13915 | 0.026505 | 0 | 0 | 0 | 0 | 0 | 0.026904 | 0.340971 | 3,666 | 100 | 129 | 36.66 | 0.722682 | 0.23677 | 0 | 0.152542 | 0 | 0.016949 | 0.210047 | 0.077539 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016949 | false | 0 | 0.084746 | 0 | 0.118644 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
984b7b0287b2a89ff337b5b57399afb859eeb8ee | 332 | py | Python | actions/update_field_value.py | gmenie-ak/stackstorm-jira | 55c3cd92229e6cac15da270a324571aba83e5834 | [
"Apache-2.0"
] | 12 | 2017-11-18T04:34:56.000Z | 2022-03-06T08:32:18.000Z | actions/update_field_value.py | gmenie-ak/stackstorm-jira | 55c3cd92229e6cac15da270a324571aba83e5834 | [
"Apache-2.0"
] | 26 | 2016-12-22T01:53:40.000Z | 2021-10-01T14:00:51.000Z | actions/update_field_value.py | gmenie-ak/stackstorm-jira | 55c3cd92229e6cac15da270a324571aba83e5834 | [
"Apache-2.0"
] | 31 | 2017-03-06T20:16:12.000Z | 2021-12-26T06:54:26.000Z | from lib.base import BaseJiraAction
__all__ = [
'UpdateFieldValue'
]
class UpdateFieldValue(BaseJiraAction):
def run(self, issue_key, field, value, notify):
issue = self._client.issue(issue_key)
issue.update(fields={field: value}, notify=notify)
result = issue.fields.labels
return result
| 23.714286 | 58 | 0.686747 | 37 | 332 | 5.972973 | 0.594595 | 0.072398 | 0.144796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.213855 | 332 | 13 | 59 | 25.538462 | 0.846743 | 0 | 0 | 0 | 0 | 0 | 0.048193 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
984d60f3c8bc904d220d941b1dd1c7fd3ce9eac2 | 6,524 | py | Python | vespa/datasim/util_menu.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | null | null | null | vespa/datasim/util_menu.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | 4 | 2021-04-17T13:58:31.000Z | 2022-01-20T14:19:57.000Z | vespa/datasim/util_menu.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | 3 | 2021-06-05T16:34:57.000Z | 2022-01-19T16:13:22.000Z | # Python modules
# 3rd party modules
import wx
# Our modules
import vespa.common.menu as common_menu
import vespa.common.util.config as util_common_config
########################################################################
# This is a collection of menu-related constants, functions and utilities.
# The function that builds the menu bar lives here, as does the menu
# definition.
########################################################################
class ViewIds(common_menu.IdContainer):
"""A container for the ids of all of the menu items to which we need
explicit references.
"""
ZERO_LINE_SHOW = "replace me"
ZERO_LINE_TOP = "replace me"
ZERO_LINE_MIDDLE = "replace me"
ZERO_LINE_BOTTOM = "replace me"
XAXIS_SHOW = "replace me"
XAXIS_PPM = "replace me"
XAXIS_HERTZ = "replace me"
DATA_TYPE_REAL = "replace me"
DATA_TYPE_IMAGINARY = "replace me"
DATA_TYPE_MAGNITUDE = "replace me"
DATA_TYPE_SUMMED = "replace me"
PLOT_VIEW_FINAL = "replace me"
PLOT_VIEW_ALL = "replace me"
EXPERIMENT_TO_TEXT = "replace me"
VIEW_TO_PNG = "replace me"
VIEW_TO_SVG = "replace me"
VIEW_TO_PDF = "replace me"
VIEW_TO_EPS = "replace me"
# When main creates an instance of DatasimMenuBar(), it sets the variable
# below to that instance. It's a convenience. It's the same as
# wx.GetApp().GetTopWindow().GetMenuBar(), but much easier to type.
bar = None
class DatasimMenuBar(common_menu.VespaMenuBar):
"""A subclass of wx.MenuBar that adds some app-specific functions
and constants.
There should be only one instance of this class per invocation of the
app. It's a singleton class.
"""
def __init__(self, main):
common_menu.VespaMenuBar.__init__(self, main)
ViewIds.init_ids()
# _get_menu_data() is called just once, right here.
datasim, view, help = _get_menu_data(main)
# Build the top-level menus that are always present.
datasim = common_menu.create_menu(main, "Datasim", datasim)
view = common_menu.create_menu(main, "&View", view)
help = common_menu.create_menu(main, "&Help", help)
for menu in (datasim, view, help):
self.Append(menu, menu.label)
ViewIds.enumerate_booleans(self.view_menu)
# ================ Module Internal Use Only =======================
def _get_menu_data(main):
# Note that wx treats the ids wx.ID_EXIT and wx.ID_ABOUT specially by
# moving them to their proper location on the Mac. wx will also change
# the text of the ID_EXIT item to "Quit" as is standard under OS X.
# Quit is also the standard under Gnome but unfortunately wx doesn't seem
# to change Exit --> Quit there, so our menu looks a little funny under
# Gnome.
prior = (
("N&ew Datasim from Experiment...\tCTRL+N", main.on_new),
("O&pen Datasim...\tCTRL+O", main.on_open),
common_menu.SEPARATOR,
("S&ave\tCTRL+S", main.on_save_viff),
("S&ave As...", main.on_save_as_viff),
common_menu.SEPARATOR,
("Close\tCTRL+W", main.on_close_datasim),
common_menu.SEPARATOR,
("E&xport Spectrum", (
("to VIFF Raw Data...", main.on_export_spectrum_viff),
("to Siemens *.rda...", main.on_export_spectrum_siemens_rda))),
("E&xport Monte Carlo", (
("to VIFF Raw Data...", main.on_export_monte_carlo_viff), )),
common_menu.SEPARATOR,
("&Exit", main.on_self_close))
view = (
("Zero Line", (
("Show", main.on_menu_view_option, wx.ITEM_CHECK, ViewIds.ZERO_LINE_SHOW),
common_menu.SEPARATOR,
("Top", main.on_menu_view_option, wx.ITEM_RADIO, ViewIds.ZERO_LINE_TOP),
("Middle", main.on_menu_view_option, wx.ITEM_RADIO, ViewIds.ZERO_LINE_MIDDLE),
("Bottom", main.on_menu_view_option, wx.ITEM_RADIO, ViewIds.ZERO_LINE_BOTTOM))),
("X-Axis", (
("Show", main.on_menu_view_option, wx.ITEM_CHECK, ViewIds.XAXIS_SHOW),
common_menu.SEPARATOR,
("PPM", main.on_menu_view_option, wx.ITEM_RADIO, ViewIds.XAXIS_PPM),
("Hertz", main.on_menu_view_option, wx.ITEM_RADIO, ViewIds.XAXIS_HERTZ))),
common_menu.SEPARATOR,
("Data Type", (
("Real", main.on_menu_view_option, wx.ITEM_RADIO, ViewIds.DATA_TYPE_REAL),
("Imaginary", main.on_menu_view_option, wx.ITEM_RADIO, ViewIds.DATA_TYPE_IMAGINARY),
("Magnitude", main.on_menu_view_option, wx.ITEM_RADIO, ViewIds.DATA_TYPE_MAGNITUDE),
common_menu.SEPARATOR,
("Summed", main.on_menu_view_option, wx.ITEM_CHECK, ViewIds.DATA_TYPE_SUMMED))),
common_menu.SEPARATOR,
("Plot Views", (
("Final Only", main.on_menu_view_option, wx.ITEM_RADIO, ViewIds.PLOT_VIEW_FINAL),
("All Three", main.on_menu_view_option, wx.ITEM_RADIO, ViewIds.PLOT_VIEW_ALL))),
common_menu.SEPARATOR,
("Output Experiment Text", main.on_menu_view_output, wx.ITEM_NORMAL, ViewIds.EXPERIMENT_TO_TEXT),
("Output Plots", (
("View to PNG", main.on_menu_view_output, wx.ITEM_NORMAL, ViewIds.VIEW_TO_PNG),
("View to SVG", main.on_menu_view_output, wx.ITEM_NORMAL, ViewIds.VIEW_TO_SVG),
("View to EPS", main.on_menu_view_output, wx.ITEM_NORMAL, ViewIds.VIEW_TO_EPS),
("View to PDF", main.on_menu_view_output, wx.ITEM_NORMAL, ViewIds.VIEW_TO_PDF) )))
help = (
# ("&User Manual", main.on_user_manual),
("&DataSim Online User Manual", main.on_datasim_online_user_manual),
("&Vespa Help Online", main.on_vespa_help_online),
("&About", main.on_about, wx.ITEM_NORMAL, wx.ID_ABOUT),
)
if util_common_config.VespaConfig().show_wx_inspector:
help = list(help)
help.append(common_menu.SEPARATOR)
help.append( ("Show Inspection Tool", main.on_show_inspection_tool) )
return (prior, view, help)
| 42.921053 | 113 | 0.592888 | 820 | 6,524 | 4.442683 | 0.252439 | 0.052704 | 0.04941 | 0.069174 | 0.251716 | 0.231952 | 0.231952 | 0.218227 | 0.218227 | 0.19709 | 0 | 0.000213 | 0.280656 | 6,524 | 151 | 114 | 43.205298 | 0.776049 | 0.192826 | 0 | 0.10989 | 0 | 0 | 0.129396 | 0.004149 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021978 | false | 0 | 0.032967 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
984f29cb3a06a2a9327f4655badc0fb6832ecefe | 5,198 | py | Python | data.py | Simon-Swenson-8351/hs-bot | af2613ffa71c34e1d66c9f9d65249962d21a6b4a | [
"BSD-3-Clause"
] | null | null | null | data.py | Simon-Swenson-8351/hs-bot | af2613ffa71c34e1d66c9f9d65249962d21a6b4a | [
"BSD-3-Clause"
] | null | null | null | data.py | Simon-Swenson-8351/hs-bot | af2613ffa71c34e1d66c9f9d65249962d21a6b4a | [
"BSD-3-Clause"
] | null | null | null | import os
import PIL.Image
import numpy
import cv2
import threading
import datetime
import config
import game_region as game_region_module
class BoundingBoxInfo(object):
def __init__(self, name, source_coords):
self.name = name
self.source_coords = source_coords
class BoundingBox(object):
def __init__(self, bounding_box_info, game_region):
self.name = bounding_box_info.name
self.coords = game_region.transform_bounding_box(bounding_box_info.source_coords, game_region.SCALE_TYPE_BOUNDING_BOX)
self.dimensions = [self.coords[2] - self.coords[0], self.coords[3] - self.coords[1]]
class TemplateInfo(object):
def __init__(self, name, input_image, threshold = 0):
self.name = name
self.input_image = input_image
self.threshold = threshold
@classmethod
def create_and_fetch_positional_image(cls, name: str, resolution: list[int], threshold: float = 0):
path = os.path.join(
"res",
"templates",
str(resolution[0]) + "x" + str(resolution[1]),
name + ".png")
im = PIL.Image.open(path)
return TemplateInfo(name, im, threshold)
class Template(object):
def __init__(self, name, template_image, search_region, threshold = 0):
self.name = name
self.image = template_image
self.search_region = search_region
self.threshold = threshold
# A "positional image" is a screenshot with all irrelevant data deleted
# (set to alpha). It simplifies things, as we now know where in the
# game screen to look for the template.
@classmethod
def create_from_template_info(cls, template_info, game_region):
im = template_info.input_image.copy()
im_total_size = im.size
for i in range(2):
if im_total_size[i] != game_region.template_source_resolution[i]:
raise Exception("Floating templates currently not supported. "
"Input template image size must be equal to the source "
"resolution.")
old_bb = im.getbbox()
im = im.crop(old_bb)
old_size = im.size
new_size = tuple(game_region.transform_size(old_size, game_region.SCALE_TYPE_TEMPLATE))
im = im.resize(new_size, PIL.Image.LANCZOS)
im = im.convert("L")
im = numpy.array(im) # converts to OpenCV format
# the reason we use transform_size here is because, when we get a
# comparison image, it will simply be a cropped image of the game
# region, and transform_size does not add the screen-space offset,
# whereas transform_coords does
new_bb = game_region.transform_size([old_bb[0], old_bb[1]], game_region.SCALE_TYPE_TEMPLATE)
new_bb += [new_bb[0] + new_size[0], new_bb[1] + new_size[1]]
new_bb[0] -= 3
new_bb[1] -= 3
new_bb[2] += 3
new_bb[3] += 3
return Template(template_info.name, im, tuple(new_bb), template_info.threshold)
def get_response(self, image):
cropped = image[self.search_region[1] : self.search_region[3], self.search_region[0] : self.search_region[2]]
r = cv2.minMaxLoc(cv2.matchTemplate(cropped, self.image, cv2.TM_CCOEFF_NORMED))[1]
if config.debug:
if self.name == "menu-main-modes" or \
self.name == "menu-modes-header" or \
self.name == "menu-modes-bg":
print("template response: " + str(r))
#timestamp = datetime.datetime.now()
#cv2.imwrite(str(timestamp) + "_screen.png", cropped)
#cv2.imwrite(str(timestamp) + "_template.png", self.image)
return r
def threshold_reached(self, image):
return self.get_response(image) > self.threshold
def create_bounding_box_infos():
tmp_list = []
tmp_list.append(BoundingBoxInfo("play", [1342, 836, 1457, 942]))
tmp_list.append(BoundingBoxInfo("end-turn", [1506, 475, 1615, 511]))
tmp_list.append(BoundingBoxInfo("hero-power", [1090, 780, 1185, 871]))
tmp_list.append(BoundingBoxInfo("menu-dead-zone", [1665, 47, 1892, 1010]))
tmp_list.append(BoundingBoxInfo("concede", [880, 367, 1044, 399]))
tmp_list.append(BoundingBoxInfo("options", [1858, 1042, 1896, 1070]))
tmp_list.append(BoundingBoxInfo("bg-refresh", [1093, 166, 1166, 241]))
result = {}
for i in tmp_list:
result[i.name] = i
return result
def create_template_infos():
tmp_list = []
tmp_list.append(TemplateInfo.create_and_fetch_positional_image("game-over-defeat", 0.78))
tmp_list.append(TemplateInfo.create_and_fetch_positional_image("game-over-victory", 0.78))
tmp_list.append(TemplateInfo.create_and_fetch_positional_image("your-turn-something-to-do", 0.88))
tmp_list.append(TemplateInfo.create_and_fetch_positional_image("play-standard", 0.88))
tmp_list.append(TemplateInfo.create_and_fetch_positional_image("turn-start", 0.8))
result = {}
for i in tmp_list:
result[i.name] = i
return result
mouse_lock = threading.Lock() | 38.503704 | 126 | 0.647172 | 686 | 5,198 | 4.685131 | 0.293003 | 0.034848 | 0.048538 | 0.060983 | 0.220597 | 0.147169 | 0.119477 | 0.119477 | 0.119477 | 0.119477 | 0 | 0.037592 | 0.242593 | 5,198 | 135 | 127 | 38.503704 | 0.778766 | 0.109465 | 0 | 0.177083 | 0 | 0 | 0.071893 | 0.005414 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104167 | false | 0 | 0.083333 | 0.010417 | 0.291667 | 0.010417 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9850c05df27bb10d9bb85ebb3ffbbd1b73871ffa | 1,536 | py | Python | ibt/docker_util.py | rcook/ibt | 6ab5bda2067ef053874938a4ec445ea0e54b30f2 | [
"MIT"
] | null | null | null | ibt/docker_util.py | rcook/ibt | 6ab5bda2067ef053874938a4ec445ea0e54b30f2 | [
"MIT"
] | 13 | 2018-08-08T15:40:54.000Z | 2021-06-22T21:00:36.000Z | ibt/docker_util.py | rcook/ibt | 6ab5bda2067ef053874938a4ec445ea0e54b30f2 | [
"MIT"
] | null | null | null | ###############################################################################
#
# IBT: Isolated Build Tool
# Copyright (C) 2016, Richard Cook. All rights reserved.
#
# Simple wrappers around Docker etc. for fully isolated build environments
#
###############################################################################
from __future__ import print_function
import subprocess32
from ibt.container_util import check_process_in_container
from ibt.util import call_process, check_process
def docker_installed():
try:
return call_process(["docker", "--version"], timeout=1)
except (OSError, subprocess32.TimeoutExpired):
return False
def docker_image_exists(image_id):
return call_process(["docker", "inspect", image_id])
def docker_image_build(image_id, context_dir):
if docker_image_exists(image_id):
print("Docker image {} already built".format(image_id))
else:
print("Building Docker image {}".format(image_id))
check_process(["docker", "build", "-t", image_id, context_dir])
def docker_image_remove(image_id):
if docker_image_exists(image_id):
print("Destroying Docker image {}".format(image_id))
check_process(["docker", "rmi", image_id])
else:
print("No Docker image {} to destroy".format(image_id))
def docker_run(ctx, project, args, container_run_path, alias_args=None):
check_process_in_container(
ctx,
project,
args,
None,
["/bin/sh", container_run_path],
alias_args
)
| 32 | 79 | 0.623698 | 177 | 1,536 | 5.129944 | 0.40113 | 0.092511 | 0.057269 | 0.072687 | 0.242291 | 0.160793 | 0.160793 | 0.092511 | 0 | 0 | 0 | 0.007115 | 0.176432 | 1,536 | 47 | 80 | 32.680851 | 0.710672 | 0.098958 | 0 | 0.125 | 0 | 0 | 0.135357 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15625 | false | 0 | 0.125 | 0.03125 | 0.375 | 0.15625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98523176d76da547436cf4562259ecd15472ff25 | 3,241 | py | Python | tests/systems/test_spins.py | sirmarcel/floq | a456957500c809a5bb4cdae2d7b1d4720c28f2c4 | [
"MIT"
] | 3 | 2020-10-31T00:30:42.000Z | 2021-02-27T14:59:17.000Z | tests/systems/test_spins.py | sirmarcel/floq | a456957500c809a5bb4cdae2d7b1d4720c28f2c4 | [
"MIT"
] | 1 | 2019-06-06T12:56:46.000Z | 2019-06-06T14:41:13.000Z | tests/systems/test_spins.py | sirmarcel/floq | a456957500c809a5bb4cdae2d7b1d4720c28f2c4 | [
"MIT"
] | 5 | 2017-07-25T11:22:56.000Z | 2021-02-27T14:59:24.000Z | from tests.assertions import CustomAssertions
import numpy as np
import floq.systems.spins as spins
def single_hf(controls, omega):
a1 = controls[0]
b1 = controls[1]
a2 = controls[2]
b2 = controls[3]
return np.array([[[0, 0.25*(1j*a2 + b2)],
[0.25*1j*(a2 + 1j*b2), 0]],
[[0, 0.25*(1j*a1 + b1)],
[0.25*1j*(a1 + 1j*b1), 0]],
[[omega/2.0, 0],
[0, -(omega/2.0)]],
[[0, -0.25j*(a1 - 1j*b1)],
[0.25*(-1j*a1 + b1), 0]],
[[0, -0.25j*(a2 - 1j*b2)],
[0.25*(-1j*a2 + b2), 0]]])
def dhf():
dhf_b1 = np.array([[[0., 0.], [0., 0.]],
[[0., 0.25], [-0.25, 0.]],
[[0., 0.], [0., 0.]],
[[0., -0.25], [0.25, 0.]],
[[0., 0.], [0., 0.]]])
dhf_a1 = np.array([[[0., 0.], [0., 0.]],
[[0., 0. + 0.25j], [0. + 0.25j, 0.]],
[[0., 0.], [0., 0.]],
[[0., 0. - 0.25j], [0. - 0.25j, 0.]],
[[0., 0.], [0., 0.]]])
dhf_b2 = np.array([[[0., 0.25], [-0.25, 0.]],
[[0., 0.], [0., 0.]],
[[0., 0.], [0., 0.]],
[[0., 0.], [0., 0.]],
[[0., -0.25], [0.25, 0.]]])
dhf_a2 = np.array([[[0., 0. + 0.25j], [0. + 0.25j, 0.]],
[[0., 0.], [0., 0.]],
[[0., 0.], [0., 0.]],
[[0., 0.], [0., 0.]],
[[0., 0. - 0.25j], [0. - 0.25j, 0.]]])
return np.array([dhf_a1, dhf_b1, dhf_a2, dhf_b2])
class TestSpinHf(CustomAssertions):
def test_build_single_hf(self):
controls = np.array([1.2, 2.3, 3.4, 5.4])
freq = 2.5
target = single_hf(controls, freq)
result = spins.hf(2, freq, controls)
self.assertArrayEqual(target, result)
class TestSpindHf(CustomAssertions):
def test_build_single_dhf(self):
amp = 1.25
target = dhf()
result = spins.dhf(2)
self.assertArrayEqual(target, result)
class TestRandomisedSpinEnsemble(CustomAssertions):
def test_init(self):
# Not a very sophisticated test -- at least verify that it's not broken
rand = spins.RandomisedSpinEnsemble(10, 10, 1.0, 8.2, 0.25)
self.assertIsInstance(rand.systems, list)
class TestSpinEnsemble(CustomAssertions):
def setUp(self):
self.amps = np.array([1.2, 1.1, 0.7, 0.6])
self.freqs = np.array([0.8, 1.1, 0.9, 1.2])
self.ensemble = spins.SpinEnsemble(4, 2, 1.0, self.freqs, self.amps)
self.controls = np.array([1.5, 1.3, 1.4, 1.1])
self.t = 3.0
def test_systems_works(self):
self.assertIsInstance(self.ensemble.systems, list)
def test_single_system_evolves_correctly(self):
system = self.ensemble.systems[0]
result = system.u(self.controls, self.t)
single = spins.SpinSystem(2, self.amps[0], self.freqs[0], 1.0)
target = single.u(self.controls, self.t)
self.assertArrayEqual(result, target, decimals=10)
| 30.575472 | 79 | 0.444924 | 435 | 3,241 | 3.262069 | 0.181609 | 0.108527 | 0.122622 | 0.12685 | 0.326286 | 0.145877 | 0.10007 | 0.10007 | 0.090204 | 0.090204 | 0 | 0.125654 | 0.351743 | 3,241 | 105 | 80 | 30.866667 | 0.549738 | 0.02129 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.138889 | 1 | 0.111111 | false | 0 | 0.041667 | 0 | 0.236111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9852f98b97ad9ca649e132fb523a2aba2132c424 | 877 | py | Python | utils/localization/tests/test_helper/test_utils/test_utils.py | Open-Speech-EkStep/crowdsource-dataplatform | cf2a6c36717cb06f75de7b097959c59699564ad0 | [
"MIT"
] | 22 | 2021-03-23T05:36:51.000Z | 2022-01-28T10:50:23.000Z | utils/localization/tests/test_helper/test_utils/test_utils.py | Open-Speech-EkStep/crowdsource-dataplatform | cf2a6c36717cb06f75de7b097959c59699564ad0 | [
"MIT"
] | 13 | 2021-05-27T07:18:58.000Z | 2022-02-24T06:33:58.000Z | utils/localization/tests/test_helper/test_utils/test_utils.py | Open-Speech-EkStep/crowdsource-dataplatform | cf2a6c36717cb06f75de7b097959c59699564ad0 | [
"MIT"
] | 14 | 2021-03-22T22:47:17.000Z | 2021-12-22T05:00:52.000Z | import json
import pandas as pd
from unittest import TestCase
from pandas._testing import assert_frame_equal
from helper.utils.utils import load_json_as_df, reformat_json
class TestUtils(TestCase):
def test_load_json_as_df(self):
test_json_data = {'text1': 'translation1', 'text2': 'translation2'}
expected_df = pd.DataFrame([['text1', 'translation1'], ['text2', 'translation2']], columns=['Key', 'value'])
actual_df = load_json_as_df(test_json_data)
assert_frame_equal(actual_df, expected_df)
def test_reformat_json(self):
df = pd.DataFrame([['a', 'b']], columns=['x', 'y'])
json_file = df.to_json(orient='values')
test_json_string = json.loads(json_file)
actual_result = reformat_json(test_json_string)
expected_result = {'a': 'b'}
self.assertEqual(actual_result, expected_result)
| 35.08 | 116 | 0.692132 | 116 | 877 | 4.905172 | 0.387931 | 0.056239 | 0.052724 | 0.063269 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011142 | 0.1813 | 877 | 24 | 117 | 36.541667 | 0.781337 | 0 | 0 | 0 | 0 | 0 | 0.100342 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.111111 | false | 0 | 0.277778 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98537b856a259b47a9404a77c28aea3a4de091c5 | 1,954 | py | Python | src/spider/guozhi/guozhi.py | objcat/test-python | 97b3cb610a80b8e00b8a032ca94bb3fead4102fb | [
"MIT"
] | null | null | null | src/spider/guozhi/guozhi.py | objcat/test-python | 97b3cb610a80b8e00b8a032ca94bb3fead4102fb | [
"MIT"
] | null | null | null | src/spider/guozhi/guozhi.py | objcat/test-python | 97b3cb610a80b8e00b8a032ca94bb3fead4102fb | [
"MIT"
] | null | null | null | # description: guozhi
# date: 2021/1/9 9:54
# author: objcat
# version: 1.0
import requests
from bs4 import BeautifulSoup
from win32com.client import Dispatch
import pathlib
import os
class Guozhi:
FC_URL = "http://2018.emu618.net:6180/index.php?controller=site&action=pro_list&cat=23"
SFC_URL = "http://2018.emu618.net:6180/index.php?controller=site&action=pro_list&cat=115"
MD_URL = "http://2018.emu618.net:6180/index.php?controller=site&action=pro_list&cat=47"
@classmethod
def parse_url(cls):
baseurl = "http://2018.emu618.net:6180"
for page in range(79, 80):
print(f"开始爬取第 {page} 页")
res = requests.get(Guozhi.MD_URL + f"&page={page}")
# 转换编码 乱码的时候需要转换
res.encoding = "utf-8"
main_page = BeautifulSoup(res.text, "html.parser")
target = main_page.find("div", class_="games_list").find_all("a", class_="p_name")
for a in target:
child_res = requests.get(baseurl + a.get("href"))
child_page = BeautifulSoup(child_res.text, "html.parser")
child_a = child_page.find("div", class_="detail_down_adress_con_bottom_left_part2_con").find("a")
child_download_btn_url = baseurl + child_a.get("href")
download_res = requests.get(child_download_btn_url)
download_page = BeautifulSoup(download_res.text, "html.parser")
download_url = download_page.find("div", class_="download").find("a").get("href")
print(download_url)
cls.download_ftp(download_url)
# exit()
@staticmethod
def download_ftp(download_url):
# 调用迅雷下载
o = Dispatch("ThunderAgent.Agent64.1")
# AddTask("下载地址", "另存为文件名", "保存目录", "任务注释", "引用地址", "开始模式", "只从原始地址下载", "从原始地址下载线程数")
o.AddTask(download_url)
o.CommitTasks()
if __name__ == '__main__':
Guozhi.parse_url()
| 36.867925 | 113 | 0.623337 | 252 | 1,954 | 4.611111 | 0.416667 | 0.047332 | 0.048193 | 0.05852 | 0.178141 | 0.160069 | 0.160069 | 0.160069 | 0.160069 | 0.160069 | 0 | 0.049865 | 0.240532 | 1,954 | 52 | 114 | 37.576923 | 0.733154 | 0.092119 | 0 | 0 | 0 | 0.085714 | 0.250425 | 0.037394 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.142857 | 0 | 0.314286 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
985612c5152d046b00d7b926e420d9322d0211d3 | 1,066 | py | Python | setup.py | tzickel/forif | be976fdbb854df4e5366341733f5d7c9ecc11cc9 | [
"Apache-2.0"
] | 2 | 2018-04-01T19:33:27.000Z | 2018-04-02T08:09:25.000Z | setup.py | tzickel/forif | be976fdbb854df4e5366341733f5d7c9ecc11cc9 | [
"Apache-2.0"
] | null | null | null | setup.py | tzickel/forif | be976fdbb854df4e5366341733f5d7c9ecc11cc9 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
import codecs
import os
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
return codecs.open(os.path.join(here, *parts), 'r').read()
long_description = read('README.md')
setup(
name='forif',
version='0.0.1',
py_modules=['forif'],
url='https://github.com/tzickel/forif',
license='WTFPLApache License 2.0',
author='tzickel',
author_email='',
# tests_require=['nose'],
description='A C-like condition assignment syntax in python',
long_description=long_description,
platforms='any',
# test_suite='nose.collector',
test_suite='tests.TestForIf',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# extras_require={
# 'testing': ['nose'],
# }
)
| 25.380952 | 71 | 0.62758 | 117 | 1,066 | 5.606838 | 0.65812 | 0.027439 | 0.07622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009581 | 0.216698 | 1,066 | 41 | 72 | 26 | 0.776048 | 0.101313 | 0 | 0 | 0 | 0 | 0.419389 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.103448 | 0.034483 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9856e14fcd9d4be167f509ba97bef98318eeb15a | 2,742 | py | Python | Question_31_40/myanswers/myans_38.py | OverHall27/Gasyori100knock | 341c528eb4c0789034898ee1f7d0a4b2f8b23eff | [
"MIT"
] | null | null | null | Question_31_40/myanswers/myans_38.py | OverHall27/Gasyori100knock | 341c528eb4c0789034898ee1f7d0a4b2f8b23eff | [
"MIT"
] | null | null | null | Question_31_40/myanswers/myans_38.py | OverHall27/Gasyori100knock | 341c528eb4c0789034898ee1f7d0a4b2f8b23eff | [
"MIT"
] | null | null | null | import cv2
import numpy as np
def DCTConstant(u, v):
if u == 0 and v == 0:
return 1 / 2
elif u == 0 or v == 0:
return 1 / np.sqrt(2)
else:
return 1
def DCT(img, block=8):
Ver, Hor, Col = img.shape
result = np.zeros((Ver, Hor, Col)).astype(np.float32)
x = np.tile(np.arange(block), (block, 1))
y = np.arange(block).repeat(block).reshape([block, block])
for c in range(Col):
for u in range(Hor):
cu = u % block
iu = u // block
for v in range(Ver):
cv = v % block
iv = v // block
result[v, u, c] = np.sum(img[iv*block:(iv+1)*block, iu*block:(iu+1)*block, c] * np.cos((2*x+1)*cu*np.pi/(2*block)) * np.cos((2*y+1)*cv*np.pi/(2*block)))
result[v, u, c] *= DCTConstant(cu, cv)
result *= (2/block)
return result
def InvDCT(dct, block=8, K=8):
Ver, Hor, Col = img.shape
result = np.zeros((Ver, Hor, Col)).astype(np.float32)
u = np.tile(np.arange(K), (K, 1))
v = np.arange(K).repeat(K).reshape([K, K])
c_uv = np.zeros((K, K))
for x in range(K):
for y in range(K):
c_uv[y, x] = DCTConstant(y, x)
for c in range(Col):
for x in range(Hor):
cx = x % block
ix = x // block
for y in range(Ver):
cy = y % block
iy = y // block
result[y, x, c] = np.sum(dct[iy*block:iy*block+K, ix*block:ix*block+K, c] * np.cos((2*cx+1)*u*np.pi/(2*block)) * np.cos((2*cy+1)*v*np.pi/(2*block)) * c_uv)
result *= (2/block)
return result
def Quantization(img, block=8):
Ver, Hor, Col = img.shape
N_Ver = Ver // block
N_Hor = Hor // block
Q = np.array(((16, 11, 10, 16, 24, 40, 51, 61),
(12, 12, 14, 19, 26, 58, 60, 55),
(14, 13, 16, 24, 40, 57, 69, 56),
(14, 17, 22, 29, 51, 87, 80, 62),
(18, 22, 37, 56, 68, 109, 103, 77),
(24, 35, 55, 64, 81, 104, 113, 92),
(49, 64, 78, 87, 103, 121, 120, 101),
(72, 92, 95, 98, 112, 100, 103, 99)), dtype=np.float32)
result = np.zeros((Ver, Hor, Col)).astype(np.float32)
for c in range(Col):
for x in range(N_Hor):
for y in range(N_Ver):
result[y*block:(y+1)*block, x*block:(x+1)*block, c] = np.round(img[y*block:(y+1)*block, x*block:(x+1)*block, c] / Q) * Q
return result
img = cv2.imread("../imori.jpg").astype(np.float32)
K_ = 8
dct = DCT(img, block=8)
dct = Quantization(dct, block=8)
result = InvDCT(dct, block=8, K=K_)
#result = np.clip(result, 0, 255).astype(np.uint8)
cv2.imwrite("myans_38.jpg", result)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 29.483871 | 171 | 0.515317 | 474 | 2,742 | 2.959916 | 0.253165 | 0.054882 | 0.038489 | 0.02851 | 0.319316 | 0.27655 | 0.225944 | 0.203136 | 0.178902 | 0.116892 | 0 | 0.104609 | 0.29577 | 2,742 | 92 | 172 | 29.804348 | 0.621958 | 0.01787 | 0 | 0.197183 | 0 | 0 | 0.011148 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056338 | false | 0 | 0.028169 | 0 | 0.169014 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98575fafd78ee322f3d70690d931128909d9c1dc | 19,861 | py | Python | prune.py | poppin-mice/ShiftAddNet | a17369a50da5bba6250fdeac7c065bd00f293f3c | [
"MIT"
] | 55 | 2020-10-04T17:17:46.000Z | 2022-03-31T02:56:51.000Z | prune.py | poppin-mice/ShiftAddNet | a17369a50da5bba6250fdeac7c065bd00f293f3c | [
"MIT"
] | 8 | 2020-12-07T03:37:48.000Z | 2021-07-21T09:26:45.000Z | prune.py | poppin-mice/ShiftAddNet | a17369a50da5bba6250fdeac7c065bd00f293f3c | [
"MIT"
] | 14 | 2020-10-29T16:51:41.000Z | 2021-11-16T01:36:43.000Z | import argparse
import os, time
import torch
import shutil
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import models
import torch.backends.cudnn as cudnn
import deepshift
from deepshift.convert import convert_to_shift, round_shift_weights, count_layer_type
from models import adder as adder_slow
from adder import adder as adder_fast
import collections
from collections import OrderedDict
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Pruning')
parser.add_argument('--dataset', type=str, default='cifar10', help='training dataset')
parser.add_argument('--data_path', type=str, default=None, help='path to dataset')
parser.add_argument('--batch_size', type=int, default=256, metavar='N', help='batch size for training')
parser.add_argument('--test_batch_size', type=int, default=256, metavar='N', help='batch size for testing')
parser.add_argument('--epochs', type=int, default=160, metavar='N', help='number of epochs to train')
parser.add_argument('--start_epoch', type=int, default=0, metavar='N', help='restart point')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed')
parser.add_argument('--save', default='./logs', type=str, metavar='PATH', help='path to save prune model')
parser.add_argument('--arch', default='resnet20', type=str, help='architecture to use')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
# multi-gpus
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
# shift hyper-parameters
parser.add_argument('--shift_depth', type=int, default=0, help='how many layers to convert to shift')
parser.add_argument('--shift_type', type=str, choices=['Q', 'PS'], help='shift type for representing weights')
parser.add_argument('--rounding', default='deterministic', choices=['deterministic', 'stochastic'])
parser.add_argument('--weight_bits', type=int, default=5, help='number of bits to represent the shift weights')
parser.add_argument('--use-kernel', type=lambda x:bool(distutils.util.strtobool(x)), default=False, help='whether using custom shift kernel')
# pruning ratio
parser.add_argument('--percent', default=0.6, type=float, help='percentage of weight to prune')
parser.add_argument('--prune_method', default='magnitude', choices=['random', 'magnitude'])
parser.add_argument('--prune_layer', default='all', choices=['shift', 'add', 'all'])
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if not os.path.exists(args.save):
os.makedirs(args.save)
cudnn.benchmark = True
gpu = args.gpu_ids
gpu_ids = args.gpu_ids.split(',')
args.gpu_ids = []
for gpu_id in gpu_ids:
id = int(gpu_id)
args.gpu_ids.append(id)
print(args.gpu_ids)
if len(args.gpu_ids) > 0:
torch.cuda.set_device(args.gpu_ids[0])
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
if args.dataset == 'cifar10':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
elif args.dataset == 'cifar100':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
elif args.dataset == 'mnist':
trainset = datasets.MNIST('../MNIST', download=True, train=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]
)
)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4)
testset = datasets.MNIST('../MNIST', download=True, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]
)
)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=True, num_workers=4)
else:
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=16, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.test_batch_size, shuffle=False,
num_workers=16, pin_memory=True)
if args.dataset == 'imagenet':
model = models.__dict__[args.arch](num_classes=1000)
elif args.dataset == 'cifar10':
model = models.__dict__[args.arch](num_classes=10)
elif args.dataset == 'cifar100':
model = models.__dict__[args.arch](num_classes=100)
elif args.dataset == 'mnist':
model = models.__dict__[args.arch](num_classes=10)
else:
raise NotImplementedError('No such dataset!')
if 'shift' in args.arch: # no pretrain
model, _ = convert_to_shift(model, args.shift_depth, args.shift_type, convert_weights=False, use_kernel=args.use_kernel, rounding=args.rounding, weight_bits=args.weight_bits)
if args.cuda:
model.cuda()
if len(args.gpu_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
# save name
# name model sub-directory "shift_all" if all layers are converted to shift layers
conv2d_layers_count = count_layer_type(model, nn.Conv2d) #+ count_layer_type(model, unoptimized.UnoptimizedConv2d)
linear_layers_count = count_layer_type(model, nn.Linear) #+ count_layer_type(model, unoptimized.UnoptimizedLinear)
print(conv2d_layers_count)
if (args.shift_depth > 0):
if (args.shift_type == 'Q'):
shift_label = "shift_q"
else:
shift_label = "shift_ps"
else:
shift_label = "shift"
# if (conv2d_layers_count==0 and linear_layers_count==0):
if conv2d_layers_count == 0:
shift_label += "_all"
else:
shift_label += "_%s" % (args.shift_depth)
if (args.shift_depth > 0):
shift_label += "_wb_%s" % (args.weight_bits)
args.save = os.path.join(args.save, shift_label)
args.save = os.path.join(args.save, 'prune_'+str(args.prune_method)+'_'+str(args.prune_layer)+'_'+str(args.percent))
if not os.path.exists(args.save):
os.makedirs(args.save)
def save_checkpoint(state, is_best, epoch, filepath):
filename = os.path.join(filepath, 'pruned.pth.tar')
torch.save(state, filename)
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def test(model):
model.eval()
test_loss = 0
test_acc = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).item() # sum up batch loss
prec1, prec5 = accuracy(output.data, target.data, topk=(1, 5))
test_acc += prec1.item()
test_loss /= len(test_loader.dataset)
return test_loss, np.round(test_acc / len(test_loader), 2)
def change_name(state_dict):
new_state_dict = OrderedDict()
for key, value in state_dict.items():
if 'conv' in key and '.1.weight' in key:
new_key = key.replace('weight', 'adder')
elif 'downsample' in key and '.1.weight' in key:
new_key = key.replace('weight', 'adder')
else:
new_key = key
new_state_dict[new_key] = value
return new_state_dict
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
# args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
if 'add' in args.arch:
checkpoint['state_dict'] = change_name(checkpoint['state_dict'])
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}"
.format(args.resume, checkpoint['epoch'], best_prec1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
save_checkpoint({'state_dict': model.state_dict()}, False, epoch='init', filepath=args.save)
# round weights to ensure that the results are due to powers of 2 weights
model = round_shift_weights(model)
print('\nEvaluation only')
test_loss0, test_acc0 = test(model)
print('Before pruning: Test Loss: %.8f, Test Acc: %.2f' % (test_loss0, test_acc0))
def create_mask(shape, rate):
mask = torch.cuda.FloatTensor(shape).uniform_() > rate
return mask + 0
# -------------------------------------------------------------
if 'shift' in args.arch and args.prune_layer != 'add':
print('prune for shift layer:')
if args.shift_type == 'Q':
shift_module = deepshift.modules_q.Conv2dShiftQ
elif args.shift_type == 'PS':
shift_module = deepshift.modules.Conv2dShift
else:
raise NotImplementedError
# pruning
if args.shift_type == 'Q':
total = 0
for m in model.modules():
if isinstance(m, shift_module):
total += m.weight.data.numel()
shift_weights = torch.zeros(total)
index = 0
for m in model.modules():
if isinstance(m, shift_module):
size = m.weight.data.numel()
shift_weights[index:(index+size)] = m.weight.data.view(-1).abs().clone()
index += size
y, i = torch.sort(shift_weights)
thre_index = int(total * args.percent)
thre = y[thre_index] - 1e-7
pruned = 0
print('Pruning threshold: {}'.format(thre))
zero_flag = False
# ----------------------------------------------------------------
if args.prune_method == 'magnitude':
for k, m in enumerate(model.modules()):
if isinstance(m, shift_module):
shift_copy = m.weight.data.abs().clone()
# prune at boundary (weight == thre)
_mask = torch.eq(shift_copy, thre+1e-7).float().cuda()
_mask = _mask * torch.cuda.FloatTensor(shift_copy.shape).uniform_(-args.percent, 1-args.percent)
shift_copy += _mask
# ---------------------------------
mask = shift_copy.gt(thre).float().cuda()
pruned = pruned + mask.numel() - torch.sum(mask)
m.weight.data = m.weight.data.mul_(mask) + 1 - mask # no shift
if int(torch.sum(mask)) == 0:
zero_flag = True
print('layer index: {:d} \t total params: {:d} \t remaining params: {:d}'.
format(k, mask.numel(), int(torch.sum(mask))))
elif args.prune_method == 'random':
for k, m in enumerate(model.modules()):
if isinstance(m, shift_module):
shift_copy = m.weight.data.abs().clone()
mask = create_mask(shift_copy.shape, args.percent)
pruned = pruned + mask.numel() - torch.sum(mask)
m.weight.data = m.weight.data.mul_(mask) + 1 - mask # no shift
if int(torch.sum(mask)) == 0:
zero_flag = True
print('layer index: {:d} \t total params: {:d} \t remaining params: {:d}'.
format(k, mask.numel(), int(torch.sum(mask))))
else:
raise NotImplementedError
# ----------------------------------------------------------------
elif args.shift_type == 'PS':
total = 0
for m in model.modules():
if isinstance(m, shift_module):
total += m.shift.data.numel()
shift_weights = torch.zeros(total)
index = 0
for m in model.modules():
if isinstance(m, shift_module):
size = m.shift.data.numel()
shift_weights[index:(index+size)] = m.shift.data.view(-1).abs().clone()
index += size
y, i = torch.sort(shift_weights)
thre_index = int(total * args.percent)
thre = y[thre_index] - 1e-7
pruned = 0
print('Pruning threshold: {}'.format(thre))
zero_flag = False
# ----------------------------------------------------------------
if args.prune_method == 'magnitude':
for k, m in enumerate(model.modules()):
if isinstance(m, shift_module):
shift_copy = m.shift.data.abs().clone()
mask = shift_copy.gt(thre).float().cuda()
pruned = pruned + mask.numel() - torch.sum(mask)
m.shift.data.mul_(mask)
m.sign.data.mul_(mask)
if int(torch.sum(mask)) == 0:
zero_flag = True
print('layer index: {:d} \t total params: {:d} \t remaining params: {:d}'.
format(k, mask.numel(), int(torch.sum(mask))))
elif args.prune_method == 'random':
for k, m in enumerate(model.modules()):
if isinstance(m, shift_module):
shift_copy = m.shift.data.abs().clone()
mask = create_mask(shift_copy.shape, args.percent)
pruned = pruned + mask.numel() - torch.sum(mask)
m.shift.data.mul_(mask)
m.sign.data.mul_(mask)
if int(torch.sum(mask)) == 0:
zero_flag = True
print('layer index: {:d} \t total params: {:d} \t remaining params: {:d}'.
format(k, mask.numel(), int(torch.sum(mask))))
else:
raise NotImplementedError
# ----------------------------------------------------------------
print('Total conv params: {}, Pruned conv params: {}, Pruned ratio: {}'.format(total, pruned, float(pruned)/total))
if 'add' in args.arch and args.prune_layer != 'shift':
print('prune for adder layer:')
adder_module = adder_slow.adder2d
adder_module = adder_fast.Adder2D
total = 0
for m in model.modules():
if isinstance(m, adder_module):
total += m.adder.data.numel()
adder_weights = torch.zeros(total)
index = 0
for m in model.modules():
if isinstance(m, adder_module):
size = m.adder.data.numel()
adder_weights[index:(index+size)] = m.adder.data.view(-1).abs().clone()
index += size
y, i = torch.sort(adder_weights)
thre_index = int(total * args.percent)
thre = y[thre_index]
pruned = 0
print('Pruning threshold: {}'.format(thre))
zero_flag = False
# ----------------------------------------------------------------
if args.prune_method == 'magnitude':
for k, m in enumerate(model.modules()):
if isinstance(m, adder_module):
adder_copy = m.adder.data.abs().clone()
mask = adder_copy.gt(thre).float().cuda()
pruned = pruned + mask.numel() - torch.sum(mask)
m.adder.data.mul_(mask)
if int(torch.sum(mask)) == 0:
zero_flag = True
print('layer index: {:d} \t total params: {:d} \t remaining params: {:d}'.
format(k, mask.numel(), int(torch.sum(mask))))
elif args.prune_method == 'random':
for k, m in enumerate(model.modules()):
if isinstance(m, shift_module):
shift_copy = m.adder.data.abs().clone()
mask = create_mask(shift_copy.shape, args.percent)
pruned = pruned + mask.numel() - torch.sum(mask)
m.adder.data.mul_(mask)
if int(torch.sum(mask)) == 0:
zero_flag = True
print('layer index: {:d} \t total params: {:d} \t remaining params: {:d}'.
format(k, mask.numel(), int(torch.sum(mask))))
else:
raise NotImplementedError
# ----------------------------------------------------------------
print('Total conv params: {}, Pruned conv params: {}, Pruned ratio: {}'.format(total, pruned, float(pruned)/total))
# -------------------------------------------------------------
print('\nTesting')
test_loss1, test_acc1 = test(model)
print('After Pruning: Test Loss: %.8f, Test Acc: %.2f' % (test_loss1, test_acc1))
save_checkpoint({
'epoch': 0,
'state_dict': model.state_dict(),
'acc': test_acc1,
'best_acc': 0.,
}, False, epoch=0, filepath=args.save)
with open(os.path.join(args.save, 'prune.txt'), 'w') as f:
f.write('Before pruning: Test Loss: %.8f, Test Acc: %.2f\n' % (test_loss0, test_acc0))
f.write('Total conv params: {}, Pruned conv params: {}, Pruned ratio: {}\n'.format(total, pruned, float(pruned)/total))
f.write('After Pruning: Test Loss: %.8f, Test Acc: %.2f\n' % (test_loss1, test_acc1))
if zero_flag:
f.write("There exists a layer with 0 parameters left.") | 44.431767 | 178 | 0.589698 | 2,459 | 19,861 | 4.629931 | 0.141114 | 0.017391 | 0.029864 | 0.025296 | 0.526658 | 0.48924 | 0.455775 | 0.417918 | 0.391304 | 0.378305 | 0 | 0.023502 | 0.248024 | 19,861 | 447 | 179 | 44.431767 | 0.738801 | 0.057802 | 0 | 0.483204 | 0 | 0.018088 | 0.122578 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01292 | false | 0 | 0.049096 | 0 | 0.072351 | 0.056848 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9857800007ceff368c121bd9101455e6fb400d8d | 6,200 | py | Python | L5NeuronSimulation/raster_maker.py | JuliusvR/L5NeuronSimulation | 1fc68c7367c439e1f9c9b73a15a95609858ec720 | [
"MIT"
] | 2 | 2020-11-12T15:12:31.000Z | 2021-12-09T19:12:55.000Z | L5NeuronSimulation/raster_maker.py | JuliusvR/L5NeuronSimulation | 1fc68c7367c439e1f9c9b73a15a95609858ec720 | [
"MIT"
] | 2 | 2021-05-20T21:36:12.000Z | 2021-08-29T15:32:35.000Z | L5NeuronSimulation/raster_maker.py | JuliusvR/L5NeuronSimulation | 1fc68c7367c439e1f9c9b73a15a95609858ec720 | [
"MIT"
] | 6 | 2021-03-03T22:14:39.000Z | 2021-11-23T13:44:35.000Z | """
Contains the functions and class (SonataWriter) necessary for generating and saving the input spike rasters.
"""
import numpy as np
import scipy.signal as ss
import scipy
import scipy.stats as st
import matplotlib.pyplot as plt
import h5py
from bmtk.utils.reports.spike_trains import PoissonSpikeGenerator
import pandas as pd
from scipy.fft import fft
import matplotlib
import statsmodels.api as sm
class SonataWriter:
"""Class used to dynamically writing spike rasters to an h5 file.
Attributes
----------
file : h5py.File
file object being worked on
group : h5py.Group
gropu where the datasets reside
datasets : dict
datasets that are saved to the file
Methods
-------
append_ds(vals, ds)
appends the given values to the end of the given dataset
append_repeat(ds, val, N)
appends the given value N times to the end of the given dataset
close()
close the h5py file
"""
def __init__(self, f_name, groups, datasets, types):
"""
Parameters
----------
f_name : str
name of file location
groups : list
list of group names (str) that are layered into the h5py file
in the order given.
datasets : list
list of dataset names (str)
types : list
list of data types that corresponds to the datasets list
"""
self.file = h5py.File(f_name, 'w')
self.group = self.file
for group in groups:
self.group = self.group.create_group(group)
self.datasets = {}
for i, ds in enumerate(datasets):
self.datasets[ds] = self.group.create_dataset(ds, data=[], dtype=types[i], chunks=True, maxshape=(None,))
def append_ds(self, vals, ds):
"""appends the given values to the end of the given dataset
Parameters
----------
vals : list
list of values to be appended to the dataset
ds : str
key of the dataset to append to
"""
length = len(self.datasets[ds])
self.datasets[ds].resize((length + len(vals), ))
self.datasets[ds][length:] = vals
def append_repeat(self, ds, val, N):
"""appends the given value N times to the end of the given dataset
Parameters
----------
ds : str
key of the dataset to append to
val : [type]
value to be appended N times
N : int
number of vals to append to the dataset
"""
self.append_ds([val for i in range(N)], ds)
def close(self):
"""Closes the h5py File
"""
self.file.close()
def zscore(x):
"""z scores the given array"""
return (x-np.mean(x))/np.std(x)
def minmax(x):
"""min max normalizes the given array"""
return (x - np.min(x))/(np.max(x)-np.min(x))
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
def make_noise(num_traces=100,num_samples=4999):
"""Creates a noise trace used in generating spike rasters.
Parameters
----------
num_traces : int, optional
number of noise traces to create (first dimension), by default 100
num_samples : int, optional
length of the trace (second dimension), by default 4999
Returns
-------
np.array
noise trace
"""
B = [0.049922035, -0.095993537, 0.050612699, -0.004408786]
A = [1, -2.494956002, 2.017265875, -0.522189400]
invfn = np.zeros((num_traces,num_samples))
for i in np.arange(0,num_traces):
wn = np.random.normal(loc=1,
scale=0.5,size=num_samples+2000)
invfn[i,:] = minmax(ss.lfilter(B, A, wn)[2000:])+0.5 # Create '1/f' Noise
return invfn
def shift_exc_noise(ts, nid, seconds, time_shift=4):
"""Creates a shifted, min-max normalized average traces of the given spike raster.
Parameters
----------
ts : list
times (float) where spikes occur
nid : int
node id associated with each spike
seconds : float
length of the raster in seconds
time_shift : int, optional
how many ms to shift the average trace by, by default 4
Returns
-------
[type]
[description]
"""
h = np.histogram(ts,bins=np.arange(0,seconds*1000,1))
fr_prof = h[0]/(0.001*(np.max(nid)+1))
wrap = fr_prof[-4:]
fr_prof[4:] = fr_prof[0:-4]
fr_prof[0:4] = wrap
fr_prof = minmax(fr_prof)+0.5
return fr_prof
def make_save_spikes(writer, exp, dist, numUnits=100,rateProf=None,start_id=0,start_time=0):
"""Creates and saves spikes for the given nodes using
the provided noise trace and a random mean firing rate generated with
the given distribution.
Parameters
----------
writer : SonataWriter
how the spikes are saved
exp : bool
whether the value from dist should be fed to np.exp()
dist : np.array()
array of firing rates of shape (numUnits,)
numUnits : int, optional
number of nodes to generate spikes for, by default 100
rateProf : np.array(), optional
noise trace for each unit must have numUnits rows, by default None
start_id : int, optional
node_id that the first unit/node should be associated with, by default 0
start_time : int, optional
at what time the spikes should start being generated, by default 0
"""
for i in np.arange(0,numUnits):
try:
r = rateProf[i,:]
except:
import pdb; pdb.set_trace()
r[r<0] = 0#Can't have negative firing rates.
rate_temp=[];simSpks_temp=[]
#Multiplies the noise trace by the randomly generated firing rate.
if exp:
rate_temp = r*np.exp(dist[i])
else:
rate_temp = r*dist[i]
numbPoints = scipy.stats.poisson(rate_temp/1000).rvs()#Poisson number of points
simSpks=np.where(numbPoints>0)[0]
writer.append_repeat("node_ids", i + start_id, len(simSpks))
writer.append_ds(simSpks + start_time, "timestamps")
| 30.243902 | 117 | 0.602581 | 865 | 6,200 | 4.255491 | 0.286705 | 0.028253 | 0.013583 | 0.010867 | 0.109209 | 0.101059 | 0.080956 | 0.080956 | 0.075523 | 0.059223 | 0 | 0.03412 | 0.295645 | 6,200 | 204 | 118 | 30.392157 | 0.808793 | 0.474194 | 0 | 0 | 0 | 0 | 0.008827 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.149254 | false | 0 | 0.179104 | 0.014925 | 0.41791 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98579a29b18e47304be5f1259c1a91c9aae8a65f | 8,086 | py | Python | Task.py | omercsp/taskrunner | f972c7879ce9f4e390524a332388ea8c478ea19b | [
"MIT"
] | 5 | 2022-01-04T13:35:48.000Z | 2022-03-20T11:34:32.000Z | Task.py | omercsp/taskrunner | f972c7879ce9f4e390524a332388ea8c478ea19b | [
"MIT"
] | null | null | null | Task.py | omercsp/taskrunner | f972c7879ce9f4e390524a332388ea8c478ea19b | [
"MIT"
] | null | null | null | from config import *
from argparse import Namespace as Args
from schemas import Schema
import shlex
import subprocess
import signal
class Task(object):
def _check_empty_setting(self, s, title):
if len(s) > 0:
return
raise TaskException("Expanded {} for task '{}' is empty".format(title, self.name))
def __init__(self, name: str, config: Config) -> None:
super().__init__()
info("Initializing task '{}'", name)
task_descriptor = config.get_task_desc(name, True)
self.name = name
self.config = config
self.short_desc = task_descriptor.get(Schema.Keys.Task.ShortDesc, None)
self.long_desc = task_descriptor.get(Schema.Keys.Task.LongDesc, None)
self.hidden = task_descriptor.get(Schema.Keys.Task.Hidden, False)
self.stop_on_error = task_descriptor.get(Schema.Keys.Task.StopOnError, True)
self.commands = task_descriptor.get(Schema.Keys.Task.Commands, [])
self.cwd = task_descriptor.get(Schema.Keys.Task.Cwd, None)
self.shell = task_descriptor.get(Schema.Keys.Task.Shell, False)
self.shell_path = task_descriptor.get(
Schema.Keys.Task.ShellPath, config.default_shell_path())
self.env = task_descriptor.get(Schema.Keys.Task.Env, None)
self.c_image = task_descriptor.get(Schema.Keys.Task.CImage, None)
self.c_volumes = task_descriptor.get(Schema.Keys.Task.CVolumes, [])
self.c_interactive = task_descriptor.get(Schema.Keys.Task.CInteractive, False)
self.c_tty = task_descriptor.get(Schema.Keys.Task.CTty, False)
self.c_flags = task_descriptor.get(Schema.Keys.Task.CFlags, "")
self.c_exec = task_descriptor.get(Schema.Keys.Task.CExec, False)
self.c_rm = task_descriptor.get(Schema.Keys.Task.CRemove, True)
self.c_tool = task_descriptor.get(Schema.Keys.Task.CTool,
self.config.default_container_tool())
self.c_shell = task_descriptor.get(Schema.Keys.Task.CShell, False)
self.c_shell_path = task_descriptor.get(Schema.Keys.Task.CShellPath,
self.config.default_container_shell_path())
self.c_cwd = task_descriptor.get(Schema.Keys.Task.CCwd, None)
self.c_env = task_descriptor.get(Schema.Keys.Task.CEnv, {})
self.c_sudo = task_descriptor.get(Schema.Keys.Task.CSudo, False)
self.expanded = False
def args_update(self, args: Args) -> None:
if args.stop_on_error:
self.stop_on_error = args.stop_on_error
if args.command:
self.commands = args.command
if args.cwd:
self.cwd = args.cwd
if args.shell:
self.shell = (args.shell == TASK_YES_TOKEN)
if args.shell_path:
self.shell_path = args.shell_path
if args.env:
self.env = {}
for e in args.env:
e_name, e_value = parse_assignment_str(e)
self.env[e_name] = e_value
if args.c_image:
self.c_image = args.c_image
if args.c_volume:
self.c_volumes = args.c_volume
if args.c_interactive:
self.c_interactive = (args.c_interactive == TASK_YES_TOKEN)
if args.c_tty:
self.c_tty = (args.c_tty == TASK_YES_TOKEN)
if args.c_flags:
self.c_flags = args.c_flags
if args.c_exec:
self.c_exec = args.c_exec
if args.c_rm:
self.c_rm = (args.c_rm == TASK_YES_TOKEN)
if args.c_tool:
self.c_tool = args.container_tool
if args.c_shell:
self.c_shell = (args.c_shell == TASK_YES_TOKEN)
if args.c_shell_path:
self.c_shell_path = args.c_shell_path
if args.c_cwd:
self.c_cwd = args.c_cwd
if args.c_env:
self.c_env = {}
for e in args.c_env:
e_name, e_value = parse_assignment_str(e)
self.c_env[e_name] = e_value
def expand_args(self, expander: StringVarExpander) -> None:
if self.expanded:
warn("Task '{}' is already expanded", self.name)
return
info("Expanding task '{}'".format(self.name))
self.expanded = True
if self.env is not None:
self.env = {expander(k): expander(v) for k, v in self.env.items()}
for k, v in self.env.items():
info("Environment variable will be set as '{}={}'", k, v)
if self.cwd:
self.cwd = expander(self.cwd)
self.commands = [expander(c) for c in self.commands]
if self.c_cwd:
self.c_cwd = expander(self.c_cwd)
if self.c_image:
self.c_image = expander(self.c_image)
self.c_volumes = [expander(v) for v in self.c_volumes]
self.c_env = {expander(k): expander(v) for k, v in self.c_env.items()}
def _simple_cmd_arr(self, cmd) -> list:
info("Preparing simple command")
if self.shell:
return [cmd]
try:
return shlex.split(cmd)
except ValueError as e:
raise TaskException("Illegal command '{}' for task '{}' - {}".format(cmd, self.name, e))
def _container_cmd_arr(self, cmd) -> list:
info("Preparing container command")
cmd_array = ["sudo", self.c_tool] if self.c_sudo else [self.c_tool]
cmd_array.append("exec" if self.c_exec else "run")
if self.c_cwd:
cmd_array += ["-w", self.c_cwd]
if self.c_interactive:
cmd_array.append("-i")
if self.c_tty:
cmd_array.append("-t")
if not self.c_exec:
if self.c_rm:
cmd_array.append("--rm")
for v in self.c_volumes:
cmd_array += ["-v", v]
for k, v in self.c_env.items():
cmd_array += ["-e", "{}={}".format(k, v)]
info("Setting container environment variable will '{}={}'", k, v)
cmd_array += self.c_flags.split()
cmd_array.append(self.c_image)
if self.c_shell and cmd is not None:
cmd_array += [self.c_shell_path, "-c"]
if cmd:
cmd_array += [cmd]
info("Command is {}", cmd_array)
return cmd_array
def _run_cmd(self, cmd: list, cmd_str: str) -> int:
info("Running command (joined):")
raw_msg(cmd_str)
p = None
try:
p = subprocess.Popen(cmd, shell=self.shell, executable=self.shell_path, env=self.env,
cwd=self.cwd)
return p.wait()
except (OSError, FileNotFoundError) as e:
raise TaskException("Error occurred running command '{}' - {}".format(cmd_str, e))
except KeyboardInterrupt:
if p:
p.send_signal(signal.SIGINT)
p.wait()
raise TaskException("User interrupt")
def run(self) -> int:
if not self.expanded:
raise TaskException("Task must be expanded before run") # Should never happen
if self.cwd:
info("Working directory will be set to '{}'", self.cwd)
else:
info("No working directory will be set")
if len(self.commands) == 0:
if self.c_image:
info("Running container's default command")
return self._run_cmd(self._container_cmd_arr(None), "<CONTAINER_DEFAULT>")
print("No commands defined for task '{}'. Nothing to do.".format(self.name))
return 0
rc = 0
for cmd in self.commands:
info("Command is '{}'", cmd)
cmd_arr = self._container_cmd_arr(cmd) if self.c_image else self._simple_cmd_arr(cmd)
cmd_rc = self._run_cmd(cmd_arr, cmd)
if cmd_rc == 0:
continue
info("Command had failed")
if self.stop_on_error:
info("Stopping of first error")
return cmd_rc
if rc == 0:
rc = cmd_rc
return rc
| 40.029703 | 100 | 0.581499 | 1,080 | 8,086 | 4.155556 | 0.152778 | 0.06016 | 0.083333 | 0.112745 | 0.296569 | 0.260918 | 0.136586 | 0.055258 | 0.037433 | 0.030749 | 0 | 0.00107 | 0.306827 | 8,086 | 201 | 101 | 40.228856 | 0.799643 | 0.00235 | 0 | 0.066298 | 0 | 0 | 0.083323 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044199 | false | 0 | 0.033149 | 0 | 0.138122 | 0.005525 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
985e8bfeca006e01603b2e2887b6636296128621 | 7,488 | py | Python | CUT/experiments/tmux_launcher.py | Theomat/colorization-av-enseirb-2020 | c54c2388ea39a62289fa2f1c51b4757bf55d3c4f | [
"Apache-2.0"
] | 1,422 | 2020-07-31T00:31:19.000Z | 2022-03-31T11:35:26.000Z | CUT/experiments/tmux_launcher.py | Theomat/colorization-av-enseirb-2020 | c54c2388ea39a62289fa2f1c51b4757bf55d3c4f | [
"Apache-2.0"
] | 123 | 2020-07-31T04:16:03.000Z | 2022-03-21T14:02:20.000Z | CUT/experiments/tmux_launcher.py | Theomat/colorization-av-enseirb-2020 | c54c2388ea39a62289fa2f1c51b4757bf55d3c4f | [
"Apache-2.0"
] | 324 | 2020-07-31T00:40:11.000Z | 2022-03-31T10:01:10.000Z | """
experiment launcher using tmux panes
"""
import os
import math
import GPUtil
import re
available_gpu_devices = None
class Options():
def __init__(self, *args, **kwargs):
self.args = []
self.kvs = {"gpu_ids": "0"}
self.set(*args, **kwargs)
def set(self, *args, **kwargs):
for a in args:
self.args.append(a)
for k, v in kwargs.items():
self.kvs[k] = v
return self
def remove(self, *args):
for a in args:
if a in self.args:
self.args.remove(a)
if a in self.kvs:
del self.kvs[a]
return self
def update(self, opt):
self.args += opt.args
self.kvs.update(opt.kvs)
return self
def __str__(self):
final = " ".join(self.args)
for k, v in self.kvs.items():
final += " --{} {}".format(k, v)
return final
def clone(self):
opt = Options()
opt.args = self.args.copy()
opt.kvs = self.kvs.copy()
return opt
def grab_pattern(pattern, text):
found = re.search(pattern, text)
if found is not None:
return found[1]
else:
None
# http://code.activestate.com/recipes/252177-find-the-common-beginning-in-a-list-of-strings/
def findcommonstart(strlist):
prefix_len = ([min([x[0] == elem for elem in x])
for x in zip(*strlist)] + [0]).index(0)
prefix_len = max(1, prefix_len - 4)
return strlist[0][:prefix_len]
class TmuxLauncher():
def __init__(self):
super().__init__()
self.tmux_prepared = False
def prepare_tmux_panes(self, num_experiments, dry=False):
self.pane_per_window = 1
self.n_windows = int(math.ceil(num_experiments / self.pane_per_window))
print('preparing {} tmux panes'.format(num_experiments))
for w in range(self.n_windows):
if dry:
continue
window_name = "experiments_{}".format(w)
os.system("tmux new-window -n {}".format(window_name))
self.tmux_prepared = True
def refine_command(self, command, which_epoch, continue_train, gpu_id=None):
command = str(command)
if "--gpu_ids" in command:
gpu_ids = re.search(r'--gpu_ids ([\d,?]+)', command)[1]
else:
gpu_ids = "0"
gpu_ids = gpu_ids.split(",")
num_gpus = len(gpu_ids)
global available_gpu_devices
if available_gpu_devices is None and gpu_id is None:
available_gpu_devices = [str(g) for g in GPUtil.getAvailable(limit=8, maxMemory=0.5)]
if gpu_id is not None:
available_gpu_devices = [i for i in str(gpu_id)]
if len(available_gpu_devices) < num_gpus:
raise ValueError("{} GPU(s) required for the command {} is not available".format(num_gpus, command))
active_devices = ",".join(available_gpu_devices[:num_gpus])
if which_epoch is not None:
which_epoch = " --epoch %s " % which_epoch
else:
which_epoch = ""
command = "CUDA_VISIBLE_DEVICES={} {} {}".format(active_devices, command, which_epoch)
if continue_train:
command += " --continue_train "
# available_gpu_devices = [str(g) for g in GPUtil.getAvailable(limit=8, maxMemory=0.8)]
available_gpu_devices = available_gpu_devices[num_gpus:]
return command
def send_command(self, exp_id, command, dry=False, continue_train=False):
command = self.refine_command(command, None, continue_train=continue_train)
pane_name = "experiments_{windowid}.{paneid}".format(windowid=exp_id // self.pane_per_window,
paneid=exp_id % self.pane_per_window)
if dry is False:
os.system("tmux send-keys -t {} \"{}\" Enter".format(pane_name, command))
print("{}: {}".format(pane_name, command))
return pane_name
def run_command(self, command, ids, which_epoch=None, continue_train=False, gpu_id=None):
if type(command) is not list:
command = [command]
if ids is None:
ids = list(range(len(command)))
if type(ids) is not list:
ids = [ids]
for id in ids:
this_command = command[id]
refined_command = self.refine_command(this_command, which_epoch, continue_train=continue_train, gpu_id=gpu_id)
print(refined_command)
os.system(refined_command)
def commands(self):
return []
def launch(self, ids, test=False, dry=False, continue_train=False):
commands = self.test_commands() if test else self.commands()
if type(ids) is list:
commands = [commands[i] for i in ids]
if not self.tmux_prepared:
self.prepare_tmux_panes(len(commands), dry)
assert self.tmux_prepared
for i, command in enumerate(commands):
self.send_command(i, command, dry, continue_train=continue_train)
def dry(self):
self.launch(dry=True)
def stop(self):
num_experiments = len(self.commands())
self.pane_per_window = 4
self.n_windows = int(math.ceil(num_experiments / self.pane_per_window))
for w in range(self.n_windows):
window_name = "experiments_{}".format(w)
for i in range(self.pane_per_window):
os.system("tmux send-keys -t {window}.{pane} C-c".format(window=window_name, pane=i))
def close(self):
num_experiments = len(self.commands())
self.pane_per_window = 1
self.n_windows = int(math.ceil(num_experiments / self.pane_per_window))
for w in range(self.n_windows):
window_name = "experiments_{}".format(w)
os.system("tmux kill-window -t {}".format(window_name))
def print_names(self, ids, test=False):
if test:
cmds = self.test_commands()
else:
cmds = self.commands()
if type(ids) is list:
cmds = [cmds[i] for i in ids]
for cmdid, cmd in enumerate(cmds):
name = grab_pattern(r'--name ([^ ]+)', cmd)
print(name)
def create_comparison_html(self, expr_name, ids, subdir, title, phase):
cmds = self.test_commands()
if type(ids) is list:
cmds = [cmds[i] for i in ids]
no_easy_label = True
dirs = []
labels = []
for cmdid, cmd in enumerate(cmds):
name = grab_pattern(r'--name ([^ ]+)', cmd)
which_epoch = grab_pattern(r'--epoch ([^ ]+)', cmd)
if which_epoch is None:
which_epoch = "latest"
label = grab_pattern(r'--easy_label "([^"]+)"', cmd)
if label is None:
label = name
else:
no_easy_label = False
labels.append(label)
dir = "results/%s/%s_%s/%s/" % (name, phase, which_epoch, subdir)
dirs.append(dir)
commonprefix = findcommonstart(labels) if no_easy_label else ""
labels = ['"' + label[len(commonprefix):] + '"' for label in labels]
dirstr = ' '.join(dirs)
labelstr = ' '.join(labels)
command = "python ~/tools/html.py --web_dir_prefix results/comparison_ --name %s --dirs %s --labels %s --image_width 256" % (expr_name + '_' + title, dirstr, labelstr)
print(command)
os.system(command)
| 34.666667 | 175 | 0.581063 | 964 | 7,488 | 4.325726 | 0.182573 | 0.028777 | 0.045564 | 0.036691 | 0.259233 | 0.211271 | 0.190647 | 0.177698 | 0.165228 | 0.165228 | 0 | 0.005323 | 0.297543 | 7,488 | 215 | 176 | 34.827907 | 0.787452 | 0.028579 | 0 | 0.197674 | 0 | 0.005814 | 0.079284 | 0.007433 | 0 | 0 | 0 | 0 | 0.005814 | 1 | 0.116279 | false | 0 | 0.023256 | 0.005814 | 0.209302 | 0.034884 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
985ff6ad544a34fd7123b7189fde0bbabfec1f58 | 1,557 | py | Python | code/strats/punitiveDetective.py | yasirroni/PrisonersDilemmaTournament | ce3de71ff2ccb647aa00129473ff60f985e16e17 | [
"MIT"
] | null | null | null | code/strats/punitiveDetective.py | yasirroni/PrisonersDilemmaTournament | ce3de71ff2ccb647aa00129473ff60f985e16e17 | [
"MIT"
] | null | null | null | code/strats/punitiveDetective.py | yasirroni/PrisonersDilemmaTournament | ce3de71ff2ccb647aa00129473ff60f985e16e17 | [
"MIT"
] | null | null | null | def strategy(history, memory):
"""
Orannis's punitive detective:
Cooperate but when the other player defects, cooperate one more turn to
see if they defect again. If they do, defect for 10 turns.
Cooperate twice more and if they defect the second time, defect forever.
memory is a tuple of (state, counter)
where state is one of:
"initial_cooperation"
"first_punishment"
"second_cooperation"
"final_punishment"
"""
num_rounds = history.shape[1]
if memory is None or memory[0] == "initial_cooperation":
# If they defected twice in a row, transition to first punishment
if num_rounds >= 2 and history[1, -1] == 0 and history[1, -2] == 0:
return 0, ("first_punishment", 9)
# Otherwise keep cooperating
return 1, ("initial_cooperation", 0)
elif memory[0] == "first_punishment":
# Punish until the counter runs out
if memory[1] > 0:
return 0, ("first_punishment", memory[1] - 1)
# Once done, transition to second cooperation
else:
return 1, ("second_cooperation", 0)
elif memory[0] == "second_cooperation":
# If they defected twice in a row, transition to final punishment
if num_rounds >= 2 and history[1, -1] == 0 and history[1, -2] == 0:
return 0, ("final_punishment", 0)
# Otherwise keep cooperating
return 1, ("second_cooperation", 0)
elif memory[0] == "final_punishment":
return 0, ("final_punishment", 0)
| 40.973684 | 80 | 0.619139 | 203 | 1,557 | 4.660099 | 0.344828 | 0.031712 | 0.046512 | 0.069767 | 0.457717 | 0.293869 | 0.293869 | 0.293869 | 0.217759 | 0.217759 | 0 | 0.034081 | 0.283879 | 1,557 | 37 | 81 | 42.081081 | 0.81435 | 0.427103 | 0 | 0.352941 | 0 | 0 | 0.22515 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.470588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9861a9c527d45b483c922148595e6eb6f8bdd786 | 4,743 | py | Python | src/main/python/salento/reports/map_computation/get_state_call_values.py | khanhgithead/salento | b4933a44ce4a18639b207db0bd555785cee2cc94 | [
"Apache-2.0"
] | 4 | 2019-01-14T06:21:49.000Z | 2019-12-10T08:55:23.000Z | src/main/python/salento/reports/map_computation/get_state_call_values.py | khanhgithead/salento | b4933a44ce4a18639b207db0bd555785cee2cc94 | [
"Apache-2.0"
] | 9 | 2017-12-03T18:27:07.000Z | 2018-06-08T19:43:38.000Z | src/main/python/salento/reports/map_computation/get_state_call_values.py | khanhgithead/salento | b4933a44ce4a18639b207db0bd555785cee2cc94 | [
"Apache-2.0"
] | 3 | 2017-12-18T19:39:25.000Z | 2019-01-17T11:00:05.000Z | """
# ****************************************************************************
#
# GOVERNMENT PURPOSE RIGHTS
#
# Contract Number: FA8750-15-2-0270 (Prime: William Marsh Rice University)
# Contractor Name: GrammaTech, Inc. (Right Holder - subaward R18683)
# Contractor Address: 531 Esty Street, Ithaca, NY 14850
# Expiration Date: 22 September 2023
#
# The Government's rights to use, modify, reproduce, release, perform,
# display, or disclose this software are restricted by DFARS 252.227-7014
# Rights in Noncommercial Computer Software and Noncommercial Computer Software
# Documentation clause contained in the above identified contract.
# No restrictions apply after the expiration date shown above.
# Any reproduction of the software or portions thereof marked with this legend
# must also reproduce the markings and any copyright.
#
# ****************************************************************************
# ****************************************************************************
#
# (c) 2014-2018 GrammaTech, Inc. All rights reserved.
#
# ****************************************************************************
"""
from __future__ import print_function
import argparse
import json
from salento.aggregators.base import Aggregator
class RawProbAggregator(Aggregator):
"""
This is based on the simple sequence aggregator, here for each call
the probability is retrieved. The schema of the output is below
{
"title" : "Schema File for representation of the probability values",
"type" : "object",
"properties" : {
"type" : "object",
"description" : "Each unit",
"properties" : {
"type" : "object",
"description" : "Each Sequence",
"properties" : {
"type" : "object",
"description" : "Each Call",
"properties" : {
"type" : "number",
"description" : "raw probability values"
}
}
}
}
}
"""
def __init__(self, data_file, model_dir):
Aggregator.__init__(self, data_file, model_dir)
def run(self):
"""
invoke the RNN to get the probability
return combined call and state probability values
"""
result_data = {}
# iterate over units
for k, package in enumerate(self.packages()):
result_data[str(k)] = {}
spec = self.get_latent_specification(package)
# iterate over sequence
for j, sequence in enumerate(self.sequences(package)):
events = self.events(sequence)
seq_calls = "--".join(x['call'] for x in events)
event_key = str(j) + '--' + seq_calls
event_data = {}
# iterate over calls
for i, event in enumerate(events):
call_key = (str(i) + '--' + event['call'])
call_prob = float(self.distribution_next_call(
spec, events[:i+1], call=self.call(event)))
# next state probability
dist = self.distribution_next_state(spec, events[:i+1], None)
# use the probability summation rule on conditional
# probability to get a unified probability value
# Pr(Call, States) = Pr(State0| Call)Pr(Call) +
# Pr(State1| Call)Pr(Call) +
# Pr(State2| Call)Pr(Call)
prob_value = 0
# get the individual states
for key, value in dist.items():
if '#' in key:
prob_value += call_prob*value
event_data[call_key] = prob_value
result_data[str(k)][event_key] = event_data
return result_data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_file', type=str, required=True,
help='input data file')
parser.add_argument('--model_dir', type=str, required=True,
help='directory to load model from')
parser.add_argument('--result_file', type=str, default=None,
help='write out result in json file')
clargs = parser.parse_args()
with RawProbAggregator(clargs.data_file, clargs.model_dir) as aggregator:
result = aggregator.run()
if clargs.result_file:
with open(clargs.result_file, 'w') as fwrite:
json.dump(result, fwrite)
else:
print(json.dumps(result))
| 41.243478 | 81 | 0.528779 | 474 | 4,743 | 5.164557 | 0.42827 | 0.01634 | 0.02451 | 0.03799 | 0.081291 | 0.019608 | 0 | 0 | 0 | 0 | 0 | 0.016713 | 0.318786 | 4,743 | 114 | 82 | 41.605263 | 0.740947 | 0.479865 | 0 | 0 | 0 | 0 | 0.056685 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.088889 | 0 | 0.177778 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98659147aeb87e203d9832403d6ea41ee4d2ded3 | 17,679 | py | Python | src/cascade/model/priors.py | adolgert/cascade | 2084e07c9ee5e901dd407b817220de882c7246a3 | [
"MIT"
] | null | null | null | src/cascade/model/priors.py | adolgert/cascade | 2084e07c9ee5e901dd407b817220de882c7246a3 | [
"MIT"
] | null | null | null | src/cascade/model/priors.py | adolgert/cascade | 2084e07c9ee5e901dd407b817220de882c7246a3 | [
"MIT"
] | null | null | null | from copy import copy
from functools import total_ordering
import numpy as np
import scipy.stats as stats
from cascade.core import getLoggers
CODELOG, MATHLOG = getLoggers(__name__)
# A description of how dismod interprets these distributions and their parameters can be found here:
# https://bradbell.github.io/dismod_at/doc/prior_table.htm
class PriorError(ValueError):
"""Wrong value passed into the priors."""
@total_ordering
class _Prior:
"""The base for all Priors
"""
density = None
def __init__(self, name=None):
self.name = name
def _parameters(self):
raise NotImplementedError()
def parameters(self):
return dict(density=self.density, **self._parameters())
def assign(self, **kwargs):
"""Create a new distribution with modified parameters."""
modified = copy(self)
if set(kwargs.keys()) - set(self.__dict__.keys()):
missing = list(sorted(set(kwargs.keys()) - set(self.__dict__.keys())))
raise AttributeError(f"The prior doesn't have these attributes {missing}.")
modified.__dict__.update(kwargs)
return modified
def __hash__(self):
return hash((frozenset(self.parameters().items()), self.name))
def __eq__(self, other):
if not isinstance(other, _Prior):
return NotImplemented
return self.name == other.name and self.parameters() == other.parameters()
def __lt__(self, other):
if not isinstance(other, _Prior):
return NotImplemented
self_dict = sorted([(k, v) for k, v in dict(name=self.name, **self.parameters()).items() if v is not None])
other_dict = sorted([(k, v) for k, v in dict(name=other.name, **other.parameters()).items() if v is not None])
return self_dict < other_dict
def __repr__(self):
return f"<{type(self).__name__} {self.parameters()}>"
def _validate_bounds(lower, mean, upper):
any_nones = lower is None or mean is None or upper is None
any_invalid = any_nones or np.isnan(lower) or np.isnan(mean) or np.isnan(upper)
if any_invalid:
raise PriorError(f"Bounds contain invalid values: lower={lower} mean={mean} upper={upper}")
if not lower <= mean <= upper:
raise PriorError(f"Bounds are inconsistent: lower={lower} mean={mean} upper={upper}")
def _validate_standard_deviation(standard_deviation):
if standard_deviation is None or np.isnan(standard_deviation) or standard_deviation < 0:
raise PriorError(f"Standard deviation must be positive: standard deviation={standard_deviation}")
def _validate_nu(nu):
if nu is None or np.isnan(nu) or nu <= 2:
raise PriorError(f"Nu must be greater than 2: nu={nu}")
class Uniform(_Prior):
density = "uniform"
def __init__(self, lower, upper, mean=None, eta=None, name=None):
"""
Args:
lower (float): Lower bound
upper (float): Upper bound
mean (float): Doesn't make sense, but it's used to seed solver.
eta (float): Used for logarithmic distributions.
name (str): A name in case this is a pet prior.
"""
super().__init__(name=name)
if mean is None:
mean = (upper + lower) / 2
_validate_bounds(lower, mean, upper)
self.lower = lower
self.upper = upper
self.mean = mean
self.eta = eta
def mle(self, draws):
"""Using draws, assign a new mean, guaranteed between lower and upper.
Args:
draws (np.ndarray): 1D array of floats.
Returns:
Uniform: A new distribution with the mean set to the mean of draws.
"""
return self.assign(mean=min(self.upper, max(self.lower, np.mean(draws))))
def rvs(self, size=1, random_state=None):
"""Sample from this distribution.
Args:
size (int): Number of random variates, default 1.
random_state (numpy.random.RandomState): For repeatable draws.
Returns:
np.ndarray: Of size=size with floats.
"""
return stats.uniform.rvs(loc=self.lower, scale=self.upper - self.lower, size=size, random_state=random_state)
def _parameters(self):
return {"lower": self.lower, "upper": self.upper, "mean": self.mean, "eta": self.eta}
class Constant(_Prior):
density = "uniform"
def __init__(self, mean, name=None):
"""
Args:
mean (float): The const value.
name (str): A name for this prior, e.g. Susan.
"""
super().__init__(name=name)
self.mean = mean
def mle(self, _=None):
"""Don't change the const value. It is unaffected by this call."""
return copy(self)
def rvs(self, size=1, random_state=None):
"""Sample from this distribution.
Args:
size (int): Number of random variates, default 1.
random_state (numpy.random.RandomState): For repeatable draws.
Returns:
np.ndarray: Of size=size with floats.
"""
return np.full((size,), self.mean, dtype=np.float)
def _parameters(self):
return {"lower": self.mean, "upper": self.mean, "mean": self.mean}
class Gaussian(_Prior):
r"""A Gaussian is
.. math::
f(x) = \frac{1}{2\pi \sigma^2} e^{-(x-\mu)^2/(2\sigma^2)}
where :math:`\sigma` is the variance and :math:`\mu` the mean.
Args:
mean (float): This is :math:`\mu`.
standard_deviation (float): This is :math:`\sigma`.
lower (float): lower limit.
upper (float): upper limit.
eta (float): Offset for calculating standard deviation.
name (str): Name for this prior.
"""
density = "gaussian"
def __init__(self, mean, standard_deviation, lower=float("-inf"), upper=float("inf"), eta=None, name=None):
super().__init__(name=name)
_validate_bounds(lower, mean, upper)
_validate_standard_deviation(standard_deviation)
self.lower = lower
self.upper = upper
self.mean = mean
self.standard_deviation = standard_deviation
self.eta = eta
def mle(self, draws):
"""Assign new mean and stdev, with mean clamped between
upper and lower.
Args:
draws (np.ndarray): A 1D array of floats.
Returns:
Gaussian: With mean and stdev set, where mean is between upper
and lower, by force. Upper and lower are unchanged.
"""
# The mean and standard deviation for Dismod-AT match the location
# and scale used by Scipy.
mean, std = stats.norm.fit(draws)
return self.assign(
mean=min(self.upper, max(self.lower, mean)),
standard_deviation=std
)
def rvs(self, size=1, random_state=None):
"""Sample from this distribution.
Args:
size (int): Number of random variates, default 1.
random_state (numpy.random.RandomState): For repeatable draws.
Returns:
np.ndarray: Of size=size with floats.
"""
vals = np.empty((0,), dtype=np.float)
while vals.shape[0] < size:
redraw_cnt = size - vals.shape[0] + 10
draws = stats.norm.rvs(
loc=self.mean, scale=self.standard_deviation,
size=redraw_cnt, random_state=random_state)
draws = draws[(self.lower < draws) & (draws < self.upper)]
vals = np.concatenate([vals, draws])
return vals[:size]
def _parameters(self):
return {
"lower": self.lower,
"upper": self.upper,
"mean": self.mean,
"std": self.standard_deviation,
"eta": self.eta,
}
class Laplace(Gaussian):
r"""
This version of the Laplace distribution is parametrized by its variance
instead of by scaling of the axis. Usually, the Laplace distribution is
.. math::
f(x) = \frac{1}{2b}e^{-|x-\mu|/b}
where :math:`\mu` is the mean and :math:`b` is the scale, but the
variance is :math:`\sigma^2=2b^2`, so the Dismod-AT version looks like
.. math::
f(x) = \frac{1}{\sqrt{2\pi\sigma^2}}e^{-\sqrt{2}|x-\mu|/\sigma}.
The standard deviation assigned is :math:`\sigma`.
"""
density = "laplace"
def mle(self, draws):
"""Assign new mean and stdev, with mean clamped between
upper and lower.
Args:
draws (np.ndarray): A 1D array of floats.
Returns:
Gaussian: With mean and stdev set, where mean is between upper
and lower, by force. Upper and lower are unchanged.
"""
mean, scale = stats.laplace.fit(draws)
return self.assign(
mean=min(self.upper, max(self.lower, mean)),
standard_deviation=scale * np.sqrt(2) # This is the adjustment.
)
def rvs(self, size=1, random_state=None):
"""Sample from this distribution.
Args:
size (int): Number of random variates, default 1.
random_state (numpy.random.RandomState): For repeatable draws.
Returns:
np.ndarray: Of size=size with floats.
"""
vals = np.empty((0,), dtype=np.float)
while vals.shape[0] < size:
redraw_cnt = size - vals.shape[0] + 10
draws = stats.laplace.rvs(
loc=self.mean, scale=self.standard_deviation / np.sqrt(2), size=redraw_cnt, random_state=random_state)
draws = draws[(self.lower < draws) & (draws < self.upper)]
vals = np.concatenate([vals, draws])
return vals[:size]
class StudentsT(_Prior):
r"""
This Students-t must have :math:`\nu>2`.
Students-t distribution is usually
.. math::
f(x,\nu) = \frac{\Gamma((\nu+1)/2)}{\sqrt{\pi\nu}\Gamma(\nu)}(1+x^2/\nu)^{-(\nu+1)/2}
with mean 0 for :math:`\nu>1`. The variance is :math:`\nu/(\nu-2)` for
:math:`\nu>2`. Dismod-AT rewrites this using :math:`\sigma^2=\nu/(\nu-2)`
to get
.. math::
f(x) = \frac{\Gamma((\nu+1)/2)}{\sqrt(\pi\nu)\Gamma(\nu/2)}
\left(1 + (x-\mu)^2/(\sigma^2(\nu-2))\right)^{-(\nu+1)/2}
"""
density = "students"
def __init__(self, mean, standard_deviation, nu, lower=float("-inf"), upper=float("inf"), eta=None, name=None):
super().__init__(name=name)
_validate_bounds(lower, mean, upper)
_validate_standard_deviation(standard_deviation)
_validate_nu(nu)
self.lower = lower
self.upper = upper
self.mean = mean
self.standard_deviation = standard_deviation
self.nu = nu
self.eta = eta
def mle(self, draws):
"""Assign new mean and stdev, with mean clamped between
upper and lower.
Args:
draws (np.ndarray): A 1D array of floats.
Returns:
Gaussian: With mean and stdev set, where mean is between upper
and lower, by force. Upper and lower are unchanged.
"""
# This fixes the nu value.
nu, mean, scale = stats.t.fit(draws, fix_df=self.nu)
return self.assign(
mean=min(self.upper, max(self.lower, mean)),
standard_deviation=scale * np.sqrt(nu / (nu - 2))
)
def rvs(self, size=1, random_state=None):
"""Sample from this distribution.
Args:
size (int): Number of random variates, default 1.
random_state (numpy.random.RandomState): For repeatable draws.
Returns:
np.ndarray: Of size=size with floats.
"""
vals = np.empty((0,), dtype=np.float)
std_scale = np.sqrt(self.nu / (self.nu - 2))
while vals.shape[0] < size:
redraw_cnt = size - vals.shape[0] + 10
draws = stats.t.rvs(
loc=self.mean, scale=self.standard_deviation / std_scale, df=self.nu,
size=redraw_cnt, random_state=random_state)
draws = draws[(self.lower < draws) & (draws < self.upper)]
vals = np.concatenate([vals, draws])
return vals[:size]
def _parameters(self):
return {
"lower": self.lower,
"upper": self.upper,
"mean": self.mean,
"std": self.standard_deviation,
"nu": self.nu,
"eta": self.eta,
}
class LogGaussian(_Prior):
r"""
Dismod-AT parametrizes the Log-Gaussian with the standard deviation
as
.. math::
f(x) = \frac{1}{\sqrt{2\pi\sigma^2}} e^{-\log((x-\mu)/\sigma)^2/2}
"""
density = "log_gaussian"
def __init__(self, mean, standard_deviation, eta, lower=float("-inf"), upper=float("inf"), name=None):
super().__init__(name=name)
_validate_bounds(lower, mean, upper)
_validate_standard_deviation(standard_deviation)
self.lower = lower
self.upper = upper
self.mean = mean
self.standard_deviation = standard_deviation
self.eta = eta
def mle(self, draws):
"""Assign new mean and stdev, with mean clamped between
upper and lower. This does a fit using a normal distribution.
Args:
draws (np.ndarray): A 1D array of floats.
Returns:
Gaussian: With mean and stdev set, where mean is between upper
and lower, by force. Upper and lower are unchanged.
"""
# The mean and standard deviation for Dismod-AT match the location
# and scale used by Scipy.
mean, std = stats.norm.fit(draws)
return self.assign(
mean=min(self.upper, max(self.lower, mean)),
standard_deviation=std
)
def rvs(self, size=1, random_state=None):
"""Sample from this distribution.
Args:
size (int): Number of random variates, default 1.
random_state (numpy.random.RandomState): For repeatable draws.
Returns:
np.ndarray: Of size=size with floats.
"""
vals = np.empty((0,), dtype=np.float)
while vals.shape[0] < size:
redraw_cnt = size - vals.shape[0] + 10
draws = stats.lognorm.rvs(
loc=self.mean, s=self.standard_deviation, scale=np.exp(self.mean),
size=redraw_cnt, random_state=random_state)
draws = draws[(self.lower < draws) & (draws < self.upper)]
vals = np.concatenate([vals, draws])
return vals[:size]
def _parameters(self):
return {
"lower": self.lower,
"upper": self.upper,
"mean": self.mean,
"std": self.standard_deviation,
"eta": self.eta,
}
class LogLaplace(LogGaussian):
density = "log_laplace"
class LogStudentsT(_Prior):
density = "log_students"
def __init__(self, mean, standard_deviation, nu, eta, lower=float("-inf"), upper=float("inf"), name=None):
super().__init__(name=name)
_validate_bounds(lower, mean, upper)
_validate_standard_deviation(standard_deviation)
_validate_nu(nu)
self.lower = lower
self.upper = upper
self.mean = mean
self.standard_deviation = standard_deviation
self.nu = nu
self.eta = eta
def mle(self, draws):
"""Assign new mean and stdev, with mean clamped between
upper and lower. This does a fit using a normal distribution.
Args:
draws (np.ndarray): A 1D array of floats.
Returns:
Gaussian: With mean and stdev set, where mean is between upper
and lower, by force. Upper and lower are unchanged.
"""
# The mean and standard deviation for Dismod-AT match the location
# and scale used by Scipy.
mean, std = stats.norm.fit(draws)
return self.assign(
mean=min(self.upper, max(self.lower, mean)),
standard_deviation=std
)
def _parameters(self):
return {
"lower": self.lower,
"upper": self.upper,
"mean": self.mean,
"std": self.standard_deviation,
"nu": self.nu,
"eta": self.eta,
}
# Useful predefined priors
NO_PRIOR = Uniform(float("-inf"), float("inf"), 0, name="null_prior")
ZERO = Uniform(0, 0, 0, name="constrain_to_zero")
ZERO_TO_ONE = Uniform(0, 1, 0.1, name="uniform_zero_to_one")
MINUS_ONE_TO_ONE = Uniform(-1, 1, 0, name="uniform_negative_one_to_one")
DENSITY_ID_TO_PRIOR = {
0: Uniform,
1: Gaussian,
2: Laplace,
3: StudentsT,
4: LogGaussian,
5: LogLaplace,
6: LogStudentsT,
}
def prior_distribution(parameters):
density, lower, upper, value, stdev, eta, nu = [
parameters[name] for name in
[
"density", "lower", "upper", "mean", "std", "eta", "nu"
]
]
if np.isclose(lower, upper):
return Constant(value)
elif density == "uniform":
return Uniform(lower, upper, value, eta)
elif density == "gaussian":
return Gaussian(value, stdev, lower, upper, eta)
elif density == "laplace":
return Laplace(value, stdev, lower, upper, eta)
elif density == "students":
return StudentsT(value, stdev, nu, lower, upper, eta)
elif density == "log_gaussian":
return LogGaussian(value, stdev, eta, lower, upper)
elif density == "log_laplace":
return LogLaplace(value, stdev, eta, lower, upper)
elif density == "log_students":
return LogStudentsT(value, stdev, nu, eta, lower, upper)
else:
return None
| 32.085299 | 118 | 0.591606 | 2,253 | 17,679 | 4.533511 | 0.115846 | 0.07989 | 0.019091 | 0.033288 | 0.615528 | 0.595457 | 0.578422 | 0.551106 | 0.523889 | 0.513315 | 0 | 0.008417 | 0.287686 | 17,679 | 550 | 119 | 32.143636 | 0.802668 | 0.303071 | 0 | 0.5 | 0 | 0 | 0.064123 | 0.006978 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134058 | false | 0 | 0.018116 | 0.032609 | 0.351449 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
986599e9146af7fc917f4235236600f156665da2 | 2,581 | py | Python | pubsubat/pubsub/schema.py | zerobased-co/pubsub.at | c53ee698d3d2beced0147a8aa9707f69c3ef46c1 | [
"MIT"
] | null | null | null | pubsubat/pubsub/schema.py | zerobased-co/pubsub.at | c53ee698d3d2beced0147a8aa9707f69c3ef46c1 | [
"MIT"
] | null | null | null | pubsubat/pubsub/schema.py | zerobased-co/pubsub.at | c53ee698d3d2beced0147a8aa9707f69c3ef46c1 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from graphene import relay
from graphene_django.filter import DjangoFilterConnectionField
from graphene_django.types import DjangoObjectType
from .models import User, Category, Publisher, Subscription
import graphene
class UserType(DjangoObjectType):
class Meta:
model = User
filter_fields = ['id', 'name', 'email']
interfaces = (relay.Node, )
class CategoryType(DjangoObjectType):
class Meta:
model = Category
filter_fields = {
'id': ['exact'],
'name': ['exact', 'icontains', 'istartswith'],
}
interfaces = (relay.Node, )
class PublisherType(DjangoObjectType):
class Meta:
model = Publisher
filter_fields = {
'id': ['exact'],
'name': ['exact', 'icontains', 'istartswith'],
}
interfaces = (relay.Node, )
class SubscriptionType(DjangoObjectType):
class Meta:
model = Subscription
filter_fields = {
'id': ['exact'],
'is_active': ['exact'],
'user__id': ['exact'],
'user__username': ['exact'],
'publisher__name': ['exact'],
}
exclude_fields = ('start', 'end')
interfaces = (relay.Node, )
class PubSubQuery(graphene.ObjectType):
user = relay.Node.Field(UserType)
category = relay.Node.Field(CategoryType)
categories = DjangoFilterConnectionField(CategoryType)
publisher = relay.Node.Field(PublisherType)
publishers = DjangoFilterConnectionField(PublisherType)
subscription = relay.Node.Field(SubscriptionType)
subscriptions = DjangoFilterConnectionField(SubscriptionType)
my_subscriptions = DjangoFilterConnectionField(SubscriptionType)
def resolve_my_subscriptions(self, info):
if not info.context.user.is_authenticated:
return Subscription.objects.none()
else:
return Subscription.objects.filter(user=info.context.user)
class CreateUser(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
username = graphene.String(required=True)
password = graphene.String(required=True)
email = graphene.String(required=True)
def mutate(self, info, username, password, email):
user = get_user_model()(
username=username,
email=email,
)
user.set_password(password)
user.save()
return CreateUser(user=user)
class PubSubMutation(graphene.ObjectType):
create_user = CreateUser.Field()
| 28.362637 | 70 | 0.646649 | 235 | 2,581 | 7 | 0.297872 | 0.043769 | 0.06079 | 0.072948 | 0.087538 | 0.087538 | 0.087538 | 0.087538 | 0.087538 | 0.087538 | 0 | 0 | 0.247578 | 2,581 | 90 | 71 | 28.677778 | 0.847065 | 0 | 0 | 0.231884 | 0 | 0 | 0.063541 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028986 | false | 0.043478 | 0.086957 | 0 | 0.478261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98697fdbc551569cdb320c6bb06e86e48be7478f | 389 | py | Python | test.py | bpetterborg/vex_vacuum | f4a6dc69b4a10eebcf247d34d9155ec74c3f9f99 | [
"MIT"
] | null | null | null | test.py | bpetterborg/vex_vacuum | f4a6dc69b4a10eebcf247d34d9155ec74c3f9f99 | [
"MIT"
] | null | null | null | test.py | bpetterborg/vex_vacuum | f4a6dc69b4a10eebcf247d34d9155ec74c3f9f99 | [
"MIT"
] | null | null | null | # file where i test everything
import pi_vex_393, screen # fix this
import time
motor = pi_vex_393.Motor()
screen = screen.Screen()
while True:
screen.clearScreen()
screen.drawIP()
screen.drawSystemLoad()
screen.drawDebugLine(motor.currentStatus())
try:
motor.spin('left', 50)
motor.spin('right', 50)
except KeyboardInterrupt:
motor.stop('left')
motor.stop('right') | 16.208333 | 44 | 0.719794 | 51 | 389 | 5.411765 | 0.568627 | 0.036232 | 0.057971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030211 | 0.1491 | 389 | 24 | 45 | 16.208333 | 0.803625 | 0.095116 | 0 | 0 | 0 | 0 | 0.051429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
986ac397932279d6a6baa2e1d1a81f4621e3adb3 | 2,193 | py | Python | bin/watcher.py | kitaro-tn/lambda_deploy | 7da5d604f214daf3d14ada5f3a69f324130c821b | [
"MIT"
] | null | null | null | bin/watcher.py | kitaro-tn/lambda_deploy | 7da5d604f214daf3d14ada5f3a69f324130c821b | [
"MIT"
] | 1 | 2021-06-01T23:00:26.000Z | 2021-06-01T23:00:26.000Z | bin/watcher.py | tanish-kr/lambda_deploy | 7da5d604f214daf3d14ada5f3a69f324130c821b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import time
import re
import logging
import subprocess
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
from watchdog.events import FileSystemEventHandler
class CIHandler(FileSystemEventHandler):
def __init__(self, context):
super(FileSystemEventHandler, self).__init__()
self.context = context
def on_created(self, event):
test(event)
def on_modified(self, event):
test(event)
def test(context):
try:
if not context.is_directory and re.compile(".py$").search(context.src_path):
logging.info("Static code analysis with pep8 :%s", context.src_path)
subprocess.call(["pep8", context.src_path])
prefix = "" if re.compile("^test_").search(context.src_path.split("/")[-1]) else "test_"
test_file_name = prefix + context.src_path.split("/")[-1]
test_file_path = os.path.join(current_path(), "tests", test_file_name)
logging.info("Unit test :%s", test_file_path)
if os.path.exists(test_file_path):
subprocess.call(["python", "-m", "unittest", test_file_path])
else:
logging.warn("No such file %s", test_file_path)
except Exception as err:
logging.exception("Error dosomething: %s", err)
pass
def current_path():
return os.path.abspath(
os.path.join(
os.path.dirname(__file__), '..'
)
)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d%H:%M:%S')
path = current_path() + "/lambda_deploy"
test_path = current_path() + "/tests"
event_handler = CIHandler(path)
test_event_handler = CIHandler(test_path)
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.schedule(test_event_handler, test_path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join
| 30.887324 | 100 | 0.622891 | 256 | 2,193 | 5.109375 | 0.375 | 0.042813 | 0.053517 | 0.036697 | 0.062691 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00367 | 0.254446 | 2,193 | 70 | 101 | 31.328571 | 0.79633 | 0.009576 | 0 | 0.070175 | 0 | 0 | 0.090323 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087719 | false | 0.017544 | 0.157895 | 0.017544 | 0.280702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
986b4eb3404a6056f028d5b2e0888b2f1504a9e7 | 1,009 | py | Python | src/python/strelka/scanners/scan_bzip2.py | weslambert/strelka | a941020a9f363774017cb045c4ec173f144c13a0 | [
"Apache-2.0"
] | 513 | 2018-09-26T18:57:40.000Z | 2022-03-31T18:13:53.000Z | src/python/strelka/scanners/scan_bzip2.py | weslambert/strelka | a941020a9f363774017cb045c4ec173f144c13a0 | [
"Apache-2.0"
] | 79 | 2018-09-28T01:05:25.000Z | 2022-03-02T12:22:23.000Z | src/python/strelka/scanners/scan_bzip2.py | weslambert/strelka | a941020a9f363774017cb045c4ec173f144c13a0 | [
"Apache-2.0"
] | 88 | 2018-09-26T20:10:56.000Z | 2022-03-28T02:06:22.000Z | import bz2
import io
from strelka import strelka
class ScanBzip2(strelka.Scanner):
"""Decompresses bzip2 files."""
def scan(self, data, file, options, expire_at):
with io.BytesIO(data) as bzip2_io:
with bz2.BZ2File(filename=bzip2_io) as bzip2_obj:
try:
decompressed = bzip2_obj.read()
self.event['size'] = len(decompressed)
extract_file = strelka.File(
source=self.name,
)
for c in strelka.chunk_string(decompressed):
self.upload_to_coordinator(
extract_file.pointer,
c,
expire_at,
)
self.files.append(extract_file)
except EOFError:
self.flags.append('eof_error')
except OSError:
self.flags.append('os_error')
| 30.575758 | 64 | 0.474727 | 92 | 1,009 | 5.054348 | 0.554348 | 0.070968 | 0.064516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016216 | 0.44995 | 1,009 | 32 | 65 | 31.53125 | 0.821622 | 0.024777 | 0 | 0 | 0 | 0 | 0.021472 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
986e12c15ea58c908f90556561a79c323476a15d | 2,985 | py | Python | tgext/evolve/__init__.py | axant/tgext.evolve | 9072a156f63c22445be385014541bb5c0ad065f2 | [
"MIT"
] | null | null | null | tgext/evolve/__init__.py | axant/tgext.evolve | 9072a156f63c22445be385014541bb5c0ad065f2 | [
"MIT"
] | null | null | null | tgext/evolve/__init__.py | axant/tgext.evolve | 9072a156f63c22445be385014541bb5c0ad065f2 | [
"MIT"
] | null | null | null | from tg.configuration import milestones
from tg.appwrappers.base import ApplicationWrapper
from .evolver import Evolution
from webob.exc import HTTPServiceUnavailable
from threading import Thread
__all__ = ['plugme', 'Evolution']
import logging
log = logging.getLogger('tgext.evolve')
def plugme(configurator, options=None):
if options is None:
options = {}
evolutions = options.get('evolutions')
if not evolutions:
raise ValueError('"evolutions" option is required and must be a list of tgext.evolve.Evolution subclasses')
log.info('Setting up tgext.evolve extension...')
milestones.config_ready.register(_SetupExtension(configurator, evolutions))
# This is required to be compatible with the
# tgext.pluggable interface
return dict(appid='tgext.evolve')
class _SetupExtension(object):
def __init__(self, configurator, evolutions):
self._configurator = configurator
self._evolutions = evolutions
def __call__(self):
from tg import hooks
hooks.register('configure_new_app', self.on_app_configured)
self._configurator.register_wrapper(_MaintenanceApplicationWrapper)
def on_app_configured(self, app):
config = app.config
enabled = config.get('tgext.evolve.enabled', 'True').lower() == 'true'
log.info('tgext.evolve enabled: %s', enabled)
if not enabled:
return
model = config['package'].model
if config.get('use_sqlalchemy', False):
log.info('Configuring tgext.evolve for SQLAlchemy')
from .sqla_evolver import SQLAEvolver
config['tgext.evolve._evolver'] = SQLAEvolver(model, self._evolutions)
elif config.get('use_ming', False):
log.info('Configuring tgext.evolve for Ming')
from .ming_evolver import MingEvolver
config['tgext.evolve._evolver'] = MingEvolver(model, self._evolutions)
else:
raise ValueError('tgext.evolve should be used with sqlalchemy or ming')
evolver = config['tgext.evolve._evolver']
evolution_thread = Thread(target=lambda *args, **kwargs: evolver.evolve())
evolution_thread.daemon = True
evolution_thread.start()
class _MaintenanceApplicationWrapper(ApplicationWrapper):
def __init__(self, handler, config):
super(_MaintenanceApplicationWrapper, self).__init__(handler, config)
self._should_check = True
def __call__(self, controller, environ, context):
if not self._should_check:
return self.next_handler(controller, environ, context)
if self._should_check:
evolver = context.config.get('tgext.evolve._evolver', None)
if evolver is None or not evolver.is_locked():
self._should_check = False
return self.next_handler(controller, environ, context)
return HTTPServiceUnavailable(detail='System is currently undergoing maintenance')
| 36.402439 | 115 | 0.688442 | 326 | 2,985 | 6.107362 | 0.343558 | 0.071823 | 0.036163 | 0.036163 | 0.082371 | 0.082371 | 0.082371 | 0 | 0 | 0 | 0 | 0 | 0.220101 | 2,985 | 81 | 116 | 36.851852 | 0.855241 | 0.022781 | 0 | 0.033898 | 0 | 0 | 0.178106 | 0.036376 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0.152542 | 0 | 0.372881 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
986f4f99c75768f4e311bdc0d8bfb4d47dcf11f2 | 1,189 | py | Python | simple linear regression/simpleLinearReg.py | Sammy-Barasa/Artificial-Inteligence-and-Machine-Learning | 7668ec502f13fa0ec36bb909da1160c19106c4fc | [
"MIT"
] | null | null | null | simple linear regression/simpleLinearReg.py | Sammy-Barasa/Artificial-Inteligence-and-Machine-Learning | 7668ec502f13fa0ec36bb909da1160c19106c4fc | [
"MIT"
] | null | null | null | simple linear regression/simpleLinearReg.py | Sammy-Barasa/Artificial-Inteligence-and-Machine-Learning | 7668ec502f13fa0ec36bb909da1160c19106c4fc | [
"MIT"
] | null | null | null | #imports
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
#Loading the data from a csv
dataset=pd.read_csv("Salary_Data.csv")
#Data pre processing for salary vs experience
x=dataset.iloc[:,:-1].values
print(x)
y=dataset.iloc[:,1].values
print(y)
#spliting the data into Training set and Test set
linear_regressor=LinearRegression()
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=0,test_size=1/3)
#Generating the model on training dataset
linear_regressor.fit(x_train,y_train)
#Evaluating model on testing dataset
y_predicted=linear_regressor.predict(x_test)
#Visualising the results
#for training
plt.scatter(x_train,y_train,color="red")
plt.plot(x_train,linear_regressor.predict(x_train),color="blue")
plt.title("Salary vs Experience(training set)")
plt.xlabel("Years of Experience")
plt.ylabel("Salary")
plt.show()
#for test
plt.scatter(x_test,y_test,color="red")
plt.plot(x_train,linear_regressor.predict(x_train),color="blue")
plt.title("Salary vs Experience(Test set)")
plt.xlabel("Years of Experience")
plt.ylabel("Salary")
plt.show() | 23.78 | 80 | 0.787216 | 194 | 1,189 | 4.664948 | 0.345361 | 0.046409 | 0.059669 | 0.076243 | 0.349171 | 0.298343 | 0.298343 | 0.298343 | 0.298343 | 0.298343 | 0 | 0.004647 | 0.095038 | 1,189 | 50 | 81 | 23.78 | 0.836431 | 0.206056 | 0 | 0.32 | 0 | 0 | 0.152778 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16 | 0 | 0.16 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
986fb6556cd010b2a735198814dbb2a544ab1e34 | 2,235 | py | Python | py/1041.robot-bounded-in-circle.py | ck2w/leetcode | 2d411530b690a2e51b0ae518bf3efaad2edc1083 | [
"MIT"
] | null | null | null | py/1041.robot-bounded-in-circle.py | ck2w/leetcode | 2d411530b690a2e51b0ae518bf3efaad2edc1083 | [
"MIT"
] | null | null | null | py/1041.robot-bounded-in-circle.py | ck2w/leetcode | 2d411530b690a2e51b0ae518bf3efaad2edc1083 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=1041 lang=python3
#
# [1041] Robot Bounded In Circle
#
# https://leetcode.com/problems/robot-bounded-in-circle/description/
#
# algorithms
# Medium (54.22%)
# Likes: 557
# Dislikes: 176
# Total Accepted: 39.9K
# Total Submissions: 73.7K
# Testcase Example: '"GGLLGG"'
#
# On an infinite plane, a robot initially stands at (0, 0) and faces north.
# The robot can receive one of three instructions:
#
#
# "G": go straight 1 unit;
# "L": turn 90 degrees to the left;
# "R": turn 90 degress to the right.
#
#
# The robot performs the instructions given in order, and repeats them
# forever.
#
# Return true if and only if there exists a circle in the plane such that the
# robot never leaves the circle.
#
#
#
# Example 1:
#
#
# Input: "GGLLGG"
# Output: true
# Explanation:
# The robot moves from (0,0) to (0,2), turns 180 degrees, and then returns to
# (0,0).
# When repeating these instructions, the robot remains in the circle of radius
# 2 centered at the origin.
#
#
# Example 2:
#
#
# Input: "GG"
# Output: false
# Explanation:
# The robot moves north indefinitely.
#
#
# Example 3:
#
#
# Input: "GL"
# Output: true
# Explanation:
# The robot moves from (0, 0) -> (0, 1) -> (-1, 1) -> (-1, 0) -> (0, 0) ->
# ...
#
#
#
#
# Note:
#
#
# 1 <= instructions.length <= 100
# instructions[i] is in {'G', 'L', 'R'}
#
#
#
# @lc code=start
class Solution:
def isRobotBounded(self, instructions: str) -> bool:
'''
It's a limit cycle trajectory if the robot is back to the
center: x = y = 0 or if the robot doesn't face north: idx != 0.
'''
directions = [[0, 1], [1, 0], [0, -1], [-1, 0]]
# Initial position is in the center
x = y = 0
# facing north
idx = 0
for i in instructions:
if i == "L":
idx = (idx + 3) % 4
elif i == "R":
idx = (idx + 1) % 4
else:
x += directions[idx][0]
y += directions[idx][1]
# after one cycle:
# robot returns into initial position
# or robot doesn't face north
return (x == 0 and y == 0) or idx != 0
# @lc code=end
| 21.699029 | 78 | 0.561074 | 314 | 2,235 | 3.993631 | 0.455414 | 0.057416 | 0.045455 | 0.057416 | 0.114833 | 0.063796 | 0.063796 | 0.063796 | 0.063796 | 0 | 0 | 0.051216 | 0.301119 | 2,235 | 102 | 79 | 21.911765 | 0.751601 | 0.67472 | 0 | 0 | 0 | 0 | 0.003273 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9871044a50f6b8ea1c144e417127b1e4c3c0e043 | 1,035 | py | Python | Day10-19/11.py | bcongdon/advent_of_code_2017 | ad9a9b028716c9387dddc3ef9ee34c3a70fea151 | [
"MIT"
] | 15 | 2017-12-05T16:01:39.000Z | 2020-11-03T00:01:03.000Z | Day10-19/11.py | bcongdon/advent_of_code_2017 | ad9a9b028716c9387dddc3ef9ee34c3a70fea151 | [
"MIT"
] | null | null | null | Day10-19/11.py | bcongdon/advent_of_code_2017 | ad9a9b028716c9387dddc3ef9ee34c3a70fea151 | [
"MIT"
] | null | null | null | def manhatten_hex_dist(orig, dest):
dist_x = dest[0] - orig[0]
dist_y = dest[1] - orig[1]
if dist_x > 0 == dist_y > 0:
return abs(dist_x + dist_y)
else:
return max(abs(dist_x), abs(dist_y))
def next_loc(orig, direc):
if direc == 'n':
return (orig[0], orig[1]+1)
elif direc == 'ne':
return (orig[0]+1, orig[1])
elif direc == 'se':
return (orig[0]+1, orig[1]-1)
elif direc == 's':
return (orig[0], orig[1]-1)
elif direc == 'sw':
return (orig[0]-1, orig[1])
elif direc == 'nw':
return (orig[0]-1, orig[1]+1)
def path_dist(path):
loc = (0, 0)
m = 0
for direc in path:
loc = next_loc(loc, direc)
m = max(m, manhatten_hex_dist((0, 0), loc))
return manhatten_hex_dist((0, 0), loc), m
if __name__ == '__main__':
with open('11.txt') as f:
directions = f.read().split(',')
part1, part2 = path_dist(directions)
print("Part 1: {}".format(part1))
print("Part 2: {}".format(part2))
| 25.243902 | 51 | 0.535266 | 164 | 1,035 | 3.219512 | 0.280488 | 0.066288 | 0.125 | 0.090909 | 0.361742 | 0.344697 | 0.265152 | 0.19697 | 0 | 0 | 0 | 0.055034 | 0.280193 | 1,035 | 40 | 52 | 25.875 | 0.653691 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.363636 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
987212e9935b27eb8454286fdd3a99fa1b5c3a93 | 3,809 | py | Python | handwritten_ocr/pdf_to_images.py | testigos2022/ocr-forms | da4e4b70975d799754bdc470526295cdefc77d34 | [
"MIT"
] | null | null | null | handwritten_ocr/pdf_to_images.py | testigos2022/ocr-forms | da4e4b70975d799754bdc470526295cdefc77d34 | [
"MIT"
] | null | null | null | handwritten_ocr/pdf_to_images.py | testigos2022/ocr-forms | da4e4b70975d799754bdc470526295cdefc77d34 | [
"MIT"
] | null | null | null | import os
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Callable, Union, Iterator, Iterable
from PIL.PpmImagePlugin import PpmImageFile
from misc_utils.cached_data import CachedData
from misc_utils.dataclass_utils import _UNDEFINED, UNDEFINED
from misc_utils.prefix_suffix import PrefixSuffix, BASE_PATHES
from pdf2image import convert_from_path
from tqdm import tqdm
from misc_utils.buildable import Buildable
@dataclass
class CroppedImages(CachedData, Iterable[str]):
pdf_file: Union[_UNDEFINED, str] = UNDEFINED
x_window_scale: int = 1
y_window_scale: int = 3
x_step_size_fun: Callable[[PpmImageFile], int] = field(
default=lambda page: round(page.width - 1)
)
y_step_size_fun: Callable[[PpmImageFile], int] = field(
default=lambda page: round(1.41 * page.width / 30)
)
@property
def name(self):
return Path(self.pdf_file).name
@property
def output_dir(self):
return self.prefix_cache_dir("cropped_images")
def _build_cache(self):
pages = convert_from_path(pdf_file, 200)
for k, page in tqdm(enumerate(pages)):
self._process_page(k, page)
def generate_cropboxes(self, page, x_step, y_step):
for x in range(0, page.width - x_step, x_step):
for y in range(0, page.height - y_step, y_step):
x1, y1 = (
x + x_step * self.x_window_scale,
y + y_step * self.y_window_scale,
)
yield x, y, x1, y1
def _process_page(self, k, page: PpmImageFile):
x_step = self.x_step_size_fun(page)
y_step = self.y_step_size_fun(page)
page_dir = f"{self.output_dir}/{Path(pdf_file).name}-{k}"
os.makedirs(page_dir, exist_ok=True)
for b in tqdm(self.generate_cropboxes(page, x_step, y_step)):
(x, y, x1, y1) = b
cropped = page.crop(b)
cropped.save(f"{page_dir}/cropped_{x}_{y}.jpg", "JPEG")
page.save(f"{self.output_dir}/{Path(pdf_file).name}-{k}.jpg", "JPEG")
def __iter__(self) -> Iterator[str]:
for p in Path(self.output_dir).rglob("cropped*.jpg"):
yield str(p)
@dataclass
class ImagesFromPdf(CachedData, Iterable[PrefixSuffix]):
pdf_file: Union[_UNDEFINED, PrefixSuffix] = UNDEFINED
cache_base: PrefixSuffix = field(
default_factory=lambda: PrefixSuffix("cache_root", "pdf_page_images")
)
@property
def name(self):
return Path(str(self.pdf_file)).name
@property
def output_dir(self):
return self.prefix_cache_dir("data")
def _build_cache(self):
os.makedirs(self.output_dir, exist_ok=True)
pages = convert_from_path(str(self.pdf_file), 200)
for k, page in tqdm(enumerate(pages)):
page.save(
f"{self.output_dir}/{Path(str(self.pdf_file)).name}-{k}.jpg", "JPEG"
)
def __iter__(self) -> Iterator[PrefixSuffix]:
for p in Path(self.output_dir).rglob(f"*.jpg"):
yield self.cache_dir.from_str_same_prefix(str(p))
if __name__ == "__main__":
# pip install pdf2image
data_path = os.environ["DATA_PATH"]
BASE_PATHES["cache_root"] = f"{data_path}/cache"
# pdf_file = f"{data_path}/esc_cong_2018/data/esc_cong_2018_archivos_divulgacion_AGE_XXX_2_01_004_XXX_XX_XX_X_1052_F_49.pdf"
# pdf_file = f"{data_path}/esc_cong_2018_archivos_divulgacion_AGE_XXX_2_01_004_XXX_XX_XX_X_1052_F_49.pdf"
pdf_file = f"{data_path}/e14_cong_2018__e14_divulgacion_01_001_001_CAM_E14_CAM_X_01_001_001_XX_01_005_X_XXX.pdf"
CroppedImages(
pdf_file=pdf_file,
cache_base=PrefixSuffix("cache_root", "cropped_images"),
x_window_scale=1,
y_window_scale=3,
).build()
| 34.627273 | 128 | 0.667104 | 550 | 3,809 | 4.28 | 0.216364 | 0.041631 | 0.033135 | 0.019116 | 0.350042 | 0.333475 | 0.304588 | 0.290144 | 0.254885 | 0.234494 | 0 | 0.031419 | 0.222893 | 3,809 | 109 | 129 | 34.944954 | 0.763851 | 0.065109 | 0 | 0.164706 | 0 | 0 | 0.11677 | 0.077378 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.129412 | 0.047059 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98725bcf75d90a74eac93b39fd6ebe4b9b3823b0 | 1,654 | py | Python | pd_parallel/tools.py | xbanke/pandas-parallel | f3b5f0cba1b639551cc9a1e04af58f4015053c99 | [
"MIT"
] | 1 | 2019-05-11T22:11:46.000Z | 2019-05-11T22:11:46.000Z | pd_parallel/tools.py | xbanke/pandas-parallel | f3b5f0cba1b639551cc9a1e04af58f4015053c99 | [
"MIT"
] | null | null | null | pd_parallel/tools.py | xbanke/pandas-parallel | f3b5f0cba1b639551cc9a1e04af58f4015053c99 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version: 0.1
@author: quantpy
@file: tools.py
@time: 2018/4/11 16:25
"""
import os
from functools import partial
import numpy as np
import pandas as pd
from .apply_parallel import df_group_apply_parallel
def get_grouper(df: pd.DataFrame, by=None, axis=0, level=None,
section_size=None, section_count=os.cpu_count(), **kwargs):
"""regroup by df.groupby"""
df_group = df.groupby(by=by, axis=axis, level=level, **kwargs)
# n_groups = df_group.ngroups
n_groups = df_group.count().shape[0]
if section_size:
section_count = int(np.ceil(n_groups / section_size))
# section_size = n_groups // section_count
grouper = [pd.Series(i % section_count, index=d.index) for i, (k, d) in enumerate(df_group)]
grouper = pd.concat(grouper)
return grouper
def double_groupby_apply_parallel(df: pd.DataFrame, func, *args, grouper_kws: dict, parallel_kws: dict = None, **kwargs):
""""""
grouper = get_grouper(df, **grouper_kws)
df_group = df.groupby(grouper)
_ = grouper_kws.pop('section_size', None)
_ = grouper_kws.pop('section_count', None)
f = partial(_f, func=func, args=args, grouper_kws=grouper_kws, kwargs=kwargs)
ret = df_group_apply_parallel(df_group, f, **(parallel_kws or {}))
# ret = df_group_apply_parallel(df_group, func, *args, **kwargs)
ret.index = ret.index.droplevel(0)
return ret
def _f(df: pd.DataFrame, func, args, grouper_kws, kwargs):
return df.groupby(**grouper_kws).apply(func, *args, **kwargs)
pd.DataFrame.double_groupby_apply_parallel = double_groupby_apply_parallel
| 31.207547 | 121 | 0.689843 | 244 | 1,654 | 4.442623 | 0.315574 | 0.064576 | 0.03321 | 0.055351 | 0.112546 | 0.112546 | 0.112546 | 0 | 0 | 0 | 0 | 0.012445 | 0.174123 | 1,654 | 52 | 122 | 31.807692 | 0.781113 | 0.165659 | 0 | 0 | 0 | 0 | 0.018437 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.192308 | 0.038462 | 0.423077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9873a808f133176d36f13d62415361d7e99b6f93 | 3,563 | py | Python | general/lies-damnlies-stats/server/stats/tests.py | jeremyosborne/examples-python | 5900b3a4f47d59de0a32d3257a8b90a44e80fdcd | [
"MIT"
] | null | null | null | general/lies-damnlies-stats/server/stats/tests.py | jeremyosborne/examples-python | 5900b3a4f47d59de0a32d3257a8b90a44e80fdcd | [
"MIT"
] | null | null | null | general/lies-damnlies-stats/server/stats/tests.py | jeremyosborne/examples-python | 5900b3a4f47d59de0a32d3257a8b90a44e80fdcd | [
"MIT"
] | null | null | null | """
Automated tests for the ldls application.
"""
import json
from datetime import datetime
from django.utils import unittest
from django.db import models
from jsonserializermixin import JSONSerializerMixin
from jsonencoderdelegator import JSONEncoderDelegator
class TestModel(models.Model, JSONSerializerMixin):
"""A sample test model.
"""
count = models.IntegerField()
class TestRelatedModel(models.Model, JSONSerializerMixin):
"""A sample model related to the test model.
"""
owner = models.ForeignKey(TestModel)
description = models.TextField()
class TestDescribedModel(models.Model, JSONSerializerMixin):
"""A sample model related to the test model, but doesn't describe the
relation.
"""
owner = models.ForeignKey(TestModel)
description = models.TextField()
def describe(self):
"""Testing out the whitelist for only the description.
"""
return {
"description": "string",
}
class JSONSerializerMixinTest(unittest.TestCase):
"""Test the json serializer mixin, ensure that it returns a JSON
friendly object.
"""
def setUp(self):
self.t = TestModel.objects.create(count=42)
self.d = TestDescribedModel.objects.create(owner=self.t, description=24)
def tearDown(self):
self.d.delete()
self.t.delete()
def test_sanity(self):
self.assertEqual(self.t.tojsonobject(),
{"count": 42, "id": 1},
"Serialized model matches JSON friendly object.")
self.assertEqual(json.dumps(self.t.tojsonobject(), sort_keys=True),
'{"count": 42, "id": 1}',
"Serialized model behaves correctly in json.dumps.")
def test_describe(self):
self.assertEqual(self.d.tojsonobject(),
{"description": "24"},
"White list correctly ignores the owner attribute.")
class JSONEncoderDelegatorTest(unittest.TestCase):
def setUp(self):
self.testlist = [TestModel.objects.create(count=42),
TestModel.objects.create(count=42),]
self.relatedTestModel = TestRelatedModel.objects.create(
owner=self.testlist[0],
description="42"
)
def tearDown(self):
# Remove models with relations, first.
self.relatedTestModel.delete()
del self.relatedTestModel
# Remove non related models.
for t in self.testlist:
t.delete()
del self.testlist
def test_sanity(self):
# Expected iterators work as expected.
testobject = [42, 42]
json = JSONEncoderDelegator()
output = json.encode(testobject)
self.assertEqual(output,
"[42, 42]",
"Standard items serialized correctly.")
def test_list(self):
json = JSONEncoderDelegator()
output = json.encode(self.testlist)
self.assertEqual(output,
'[{"count": 42, "id": 1}, {"count": 42, "id": 2}]',
"jsonserializer in a list works as expected.")
def test_related(self):
self.assertEqual(self.relatedTestModel.tojsonobject(),
{'owner': {'fk': 1}, 'id': 1, 'description': u'42'},
"Models return a simple object with related fk.")
| 33.613208 | 80 | 0.579848 | 345 | 3,563 | 5.971014 | 0.310145 | 0.023786 | 0.017476 | 0.045146 | 0.242718 | 0.171845 | 0.115534 | 0.061165 | 0.061165 | 0.061165 | 0 | 0.01522 | 0.31771 | 3,563 | 105 | 81 | 33.933333 | 0.832168 | 0.121527 | 0 | 0.208955 | 0 | 0 | 0.132597 | 0 | 0 | 0 | 0 | 0 | 0.089552 | 1 | 0.149254 | false | 0 | 0.089552 | 0 | 0.402985 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98750fb5aa7191d7cee8eb6e8dd2026b54b8928a | 344 | py | Python | test/test_pettingzoo.py | PettingZoo-Team/hanabi-learning-environment | 2c91462cbdd8df94e6e63f24aec746343b48b664 | [
"Apache-2.0"
] | 8 | 2020-05-29T04:21:29.000Z | 2020-06-05T20:24:39.000Z | test/test_pettingzoo.py | PettingZoo-Team/hanabi-learning-environment | 2c91462cbdd8df94e6e63f24aec746343b48b664 | [
"Apache-2.0"
] | null | null | null | test/test_pettingzoo.py | PettingZoo-Team/hanabi-learning-environment | 2c91462cbdd8df94e6e63f24aec746343b48b664 | [
"Apache-2.0"
] | 4 | 2020-05-29T04:31:28.000Z | 2020-06-05T20:24:43.000Z | import pytest
from pettingzoo.classic import hanabi_v4
def test_pettingzoo():
env = hanabi_v4.env()
env.reset()
for agent in env.agent_iter():
observation, reward, done, info = env.last()
if done:
return
else:
action = env.action_space(agent).sample()
env.step(action)
| 21.5 | 53 | 0.59593 | 42 | 344 | 4.761905 | 0.642857 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008368 | 0.305233 | 344 | 15 | 54 | 22.933333 | 0.828452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98802c9f1ac3cac5655a6b81c0649a8621ae583b | 38,401 | py | Python | bucky/model/main.py | ragram88/bucky | 7840e3821af0124bedf4d806a1af34b8ebb20647 | [
"MIT"
] | 1 | 2021-08-11T20:31:35.000Z | 2021-08-11T20:31:35.000Z | bucky/model/main.py | ragram88/bucky | 7840e3821af0124bedf4d806a1af34b8ebb20647 | [
"MIT"
] | null | null | null | bucky/model/main.py | ragram88/bucky | 7840e3821af0124bedf4d806a1af34b8ebb20647 | [
"MIT"
] | null | null | null | """The main module handling the simulation"""
import copy
import datetime
import logging
import os
import pickle
import queue
import random
import sys
import threading
import warnings
from functools import lru_cache
from pprint import pformat # TODO set some defaults for width/etc with partial?
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pap
import tqdm
from ..numerical_libs import enable_cupy, reimport_numerical_libs, xp, xp_ivp
from ..util.distributions import approx_mPERT_sample, truncnorm
from ..util.util import TqdmLoggingHandler, _banner
from .arg_parser_model import parser
from .estimation import estimate_Rt
from .exceptions import SimulationException
from .graph import buckyGraphData
from .mc_instance import buckyMCInstance
from .npi import get_npi_params
from .parameters import buckyParams
from .rhs import RHS_func
from .state import buckyState
# supress pandas warning caused by pyarrow
warnings.simplefilter(action="ignore", category=FutureWarning)
# TODO we do alot of allowing div by 0 and then checking for nans later, we should probably refactor that
warnings.simplefilter(action="ignore", category=RuntimeWarning)
@lru_cache(maxsize=None)
def get_runid(): # TODO move to util and rename to timeid or something
"""Gets a UUID based of the current datatime and caches it"""
dt_now = datetime.datetime.now()
return str(dt_now).replace(" ", "__").replace(":", "_").split(".")[0]
def frac_last_n_vals(arr, n, axis=0, offset=0): # TODO assumes come from end of array currently, move to util
"""Return the last n values along an axis of an array; if n is a float, include the fractional amount of the int(n)-1 element"""
int_slice_ind = (
[slice(None)] * (axis)
+ [slice(-int(n + offset), -int(xp.ceil(offset)) or None)]
+ [slice(None)] * (arr.ndim - axis - 1)
)
ret = arr[int_slice_ind]
# handle fractional element before the standard slice
if (n + offset) % 1:
frac_slice_ind = (
[slice(None)] * (axis)
+ [slice(-int(n + offset + 1), -int(n + offset))]
+ [slice(None)] * (arr.ndim - axis - 1)
)
ret = xp.concatenate((((n + offset) % 1) * arr[frac_slice_ind], ret), axis=axis)
# handle fractional element after the standard slice
if offset % 1:
frac_slice_ind = (
[slice(None)] * (axis)
+ [slice(-int(offset + 1), -int(offset) or None)]
+ [slice(None)] * (arr.ndim - axis - 1)
)
ret = xp.concatenate((ret, (1.0 - (offset % 1)) * arr[frac_slice_ind]), axis=axis)
return ret
class buckyModelCovid:
"""Class that handles one full simulation (both time integration and managing MC states)"""
def __init__(
self,
debug=False,
sparse_aij=False,
t_max=None,
graph_file=None,
par_file=None,
npi_file=None,
disable_npi=False,
reject_runs=False,
):
"""Initialize the class, do some bookkeeping and read in the input graph"""
self.debug = debug
self.sparse = sparse_aij # we can default to none and autodetect
# w/ override (maybe when #adm2 > 5k and some sparsity critera?)
# Integrator params
self.t_max = t_max
self.run_id = get_runid()
logging.info(f"Run ID: {self.run_id}")
self.npi_file = npi_file
self.disable_npi = disable_npi
self.reject_runs = reject_runs
self.output_dates = None
# COVID/model params from par file
self.bucky_params = buckyParams(par_file)
self.consts = self.bucky_params.consts
self.dists = self.bucky_params.dists
self.g_data = self.load_graph(graph_file)
def update_params(self, update_dict):
self.bucky_params.update_params(update_dict)
self.consts = self.bucky_params.consts
self.dists = self.bucky_params.dists
def load_graph(self, graph_file):
"""Load the graph data and calculate all the variables that are static across MC runs"""
# TODO refactor to just have this return g_data
logging.info("loading graph")
with open(graph_file, "rb") as f:
G = pickle.load(f) # nosec
# Load data from input graph
# TODO we should go through an replace lots of math using self.g_data.* with function IN buckyGraphData
g_data = buckyGraphData(G, self.sparse)
# Make contact mats sym and normalized
self.contact_mats = G.graph["contact_mats"]
if self.debug:
logging.debug(f"graph contact mats: {G.graph['contact_mats'].keys()}")
for mat in self.contact_mats:
c_mat = xp.array(self.contact_mats[mat])
c_mat = (c_mat + c_mat.T) / 2.0
self.contact_mats[mat] = c_mat
# remove all_locations so we can sum over the them ourselves
if "all_locations" in self.contact_mats:
del self.contact_mats["all_locations"]
# Remove unknown contact mats
valid_contact_mats = ["home", "work", "other_locations", "school"]
self.contact_mats = {k: v for k, v in self.contact_mats.items() if k in valid_contact_mats}
self.Cij = xp.vstack([self.contact_mats[k][None, ...] for k in sorted(self.contact_mats)])
# Get stratified population (and total)
self.Nij = g_data.Nij
self.Nj = g_data.Nj
self.n_age_grps = self.Nij.shape[0] # TODO factor out
self.init_date = datetime.date.fromisoformat(G.graph["start_date"])
self.base_mc_instance = buckyMCInstance(self.init_date, self.t_max, self.Nij, self.Cij)
# fill in npi_params either from file or as ones
self.npi_params = get_npi_params(g_data, self.init_date, self.t_max, self.npi_file, self.disable_npi)
if self.npi_params["npi_active"]:
self.base_mc_instance.add_npi(self.npi_params)
self.adm0_cfr_reported = None
self.adm1_cfr_reported = None
self.adm2_cfr_reported = None
# If HHS hospitalization data is on the graph, use it to rescale initial H counts and CHR
# self.rescale_chr = "hhs_data" in G.graph
if self.consts.rescale_chr:
self.adm1_current_hosp = xp.zeros((g_data.max_adm1 + 1,), dtype=float)
# TODO move hosp data to the graph nodes and handle it with graph.py the way cases/deaths are
hhs_data = G.graph["hhs_data"].reset_index()
hhs_data["date"] = pd.to_datetime(hhs_data["date"])
hhs_data = (
hhs_data.set_index("date")
.sort_index()
.groupby("adm1")
.rolling(7)
.mean()
.drop(columns="adm1")
.reset_index()
)
hhs_curr_data = hhs_data.loc[hhs_data.date == pd.Timestamp(self.init_date)]
hhs_curr_data = hhs_curr_data.set_index("adm1").sort_index()
tot_hosps = (
hhs_curr_data.total_adult_patients_hospitalized_confirmed_covid
+ hhs_curr_data.total_pediatric_patients_hospitalized_confirmed_covid
)
self.adm1_current_hosp[tot_hosps.index.to_numpy()] = tot_hosps.to_numpy()
if self.debug:
logging.debug("Current hospitalizations: " + pformat(self.adm1_current_hosp))
# Estimate the recent CFR during the period covered by the historical data
cfr_delay = 25 # 14 # TODO This should come from CDC and Nij
n_cfr = 14
last_cases = (
g_data.rolling_cum_cases[-cfr_delay - n_cfr : -cfr_delay] - g_data.rolling_cum_cases[-cfr_delay - n_cfr - 1]
)
last_deaths = g_data.rolling_cum_deaths[-n_cfr:] - g_data.rolling_cum_deaths[-n_cfr - 1]
adm1_cases = g_data.sum_adm1(last_cases.T)
adm1_deaths = g_data.sum_adm1(last_deaths.T)
negative_mask = (adm1_deaths < 0.0) | (adm1_cases < 0.0)
adm1_cfr = adm1_deaths / adm1_cases
adm1_cfr[negative_mask] = xp.nan
# take mean over n days
self.adm1_current_cfr = xp.nanmedian(adm1_cfr, axis=1)
# Estimate recent CHR
if self.consts.rescale_chr:
chr_delay = 20 # TODO This should come from I_TO_H_TIME and Nij as a float (it's ~5.8)
n_chr = 7
tmp = hhs_data.loc[hhs_data.date > pd.Timestamp(self.init_date - datetime.timedelta(days=n_chr))]
tmp = tmp.loc[tmp.date <= pd.Timestamp(self.init_date)]
tmp = tmp.set_index(["adm1", "date"]).sort_index()
tmp = (
tmp.previous_day_admission_adult_covid_confirmed + tmp.previous_day_admission_pediatric_covid_confirmed
)
cum_hosps = xp.zeros((adm1_cfr.shape[0], n_chr))
tmp = tmp.unstack()
tmp_data = tmp.T.cumsum().to_numpy()
tmp_ind = tmp.index.to_numpy()
cum_hosps[tmp_ind] = tmp_data.T
last_cases = (
g_data.rolling_cum_cases[-chr_delay - n_chr : -chr_delay]
- g_data.rolling_cum_cases[-chr_delay - n_chr - 1]
)
adm1_cases = g_data.sum_adm1(last_cases.T)
adm1_hosps = cum_hosps # g_data.sum_adm1(last_hosps.T)
adm1_chr = adm1_hosps / adm1_cases
# take mean over n days
self.adm1_current_chr = xp.mean(adm1_chr, axis=1)
# self.adm1_current_chr = self.calc_lagged_rate(g_data.adm1_cum_case_hist, cum_hosps.T, chr_delay, n_chr)
if self.debug:
logging.debug("Current CFR: " + pformat(self.adm1_current_cfr))
return g_data
def reset(self, seed=None, params=None):
"""Reset the state of the model and generate new inital data from a new random seed"""
# TODO we should refactor reset of the compartments to be real pop numbers then /Nij at the end
if seed is not None:
random.seed(int(seed))
np.random.seed(seed)
xp.random.seed(seed)
# reroll model params if we're doing that kind of thing
self.g_data.Aij.perturb(self.consts.reroll_variance)
self.params = self.bucky_params.generate_params()
if params is not None:
self.params = copy.deepcopy(params)
if self.debug:
logging.debug("params: " + pformat(self.params, width=120))
for k in self.params:
if type(self.params[k]).__module__ == np.__name__:
self.params[k] = xp.asarray(self.params[k])
# TODO consolidate all the broadcast_to calls
self.params.H = xp.broadcast_to(self.params.H[:, None], self.Nij.shape)
self.params.F = xp.broadcast_to(self.params.F[:, None], self.Nij.shape)
if self.consts.rescale_chr:
# TODO this needs to be cleaned up BAD
adm1_Ni = self.g_data.adm1_Nij
adm1_N = self.g_data.adm1_Nj
# estimate adm2 expected CFR weighted by local age demo
tmp = self.params.F[:, 0][..., None] * self.g_data.adm1_Nij / self.g_data.adm1_Nj
adm1_F = xp.sum(tmp, axis=0)
# get ratio of actual CFR to expected CFR
adm1_F_fac = self.adm1_current_cfr / adm1_F
adm0_F_fac = xp.nanmean(adm1_N * adm1_F_fac) / xp.sum(adm1_N)
adm1_F_fac[xp.isnan(adm1_F_fac)] = adm0_F_fac
F_RR_fac = truncnorm(1.0, self.dists.F_RR_var, size=adm1_F_fac.size, a_min=1e-6)
if self.debug:
logging.debug("adm1 cfr rescaling factor: " + pformat(adm1_F_fac))
self.params.F = self.params.F * F_RR_fac[self.g_data.adm1_id] * adm1_F_fac[self.g_data.adm1_id]
self.params.F = xp.clip(self.params.F, a_min=1.0e-10, a_max=1.0)
adm1_Hi = self.g_data.sum_adm1((self.params.H * self.Nij).T).T
adm1_Hi = adm1_Hi / adm1_Ni
adm1_H = xp.nanmean(adm1_Hi, axis=0)
adm1_H_fac = self.adm1_current_chr / adm1_H
adm0_H_fac = xp.nanmean(adm1_N * adm1_H_fac) / xp.sum(adm1_N)
adm1_H_fac[xp.isnan(adm1_H_fac)] = adm0_H_fac
H_RR_fac = truncnorm(1.0, self.dists.H_RR_var, size=adm1_H_fac.size, a_min=1e-6)
adm1_H_fac = adm1_H_fac * H_RR_fac
# adm1_H_fac = xp.clip(adm1_H_fac, a_min=0.1, a_max=10.0) # prevent extreme values
if self.debug:
logging.debug("adm1 chr rescaling factor: " + pformat(adm1_H_fac))
self.params.H = self.params.H * adm1_H_fac[self.g_data.adm1_id]
self.params.H = xp.clip(self.params.H, a_min=self.params.F, a_max=1.0)
# crr_days_needed = max( #TODO this depends on all the Td params, and D_REPORT_TIME...
case_reporting = self.estimate_reporting(
self.g_data,
self.params,
cfr=self.params.F,
# case_lag=14,
days_back=25,
min_deaths=self.consts.case_reporting_min_deaths,
)
self.case_reporting = approx_mPERT_sample( # TODO these facs should go in param file
mu=xp.clip(case_reporting, a_min=0.05, a_max=0.95),
a=xp.clip(0.7 * case_reporting, a_min=0.01, a_max=0.9),
b=xp.clip(1.3 * case_reporting, a_min=0.1, a_max=1.0),
gamma=50.0,
)
mean_case_reporting = xp.nanmean(self.case_reporting[-self.consts.case_reporting_N_historical_days :], axis=0)
self.params["CASE_REPORT"] = mean_case_reporting
self.params["THETA"] = xp.broadcast_to(
self.params["THETA"][:, None], self.Nij.shape
) # TODO move all the broadcast_to's to one place, they're all over reset()
self.params["GAMMA_H"] = xp.broadcast_to(self.params["GAMMA_H"][:, None], self.Nij.shape)
self.params["F_eff"] = xp.clip(self.params["F"] / self.params["H"], 0.0, 1.0)
# state building init state vector (self.y)
yy = buckyState(self.consts, self.Nij)
if self.debug:
logging.debug("case init")
Ti = self.params.Ti
current_I = xp.sum(frac_last_n_vals(self.g_data.rolling_inc_cases, Ti, axis=0), axis=0)
current_I[xp.isnan(current_I)] = 0.0
current_I[current_I < 0.0] = 0.0
current_I *= 1.0 / (self.params["CASE_REPORT"])
# Roll some random factors for the init compartment values
R_fac = approx_mPERT_sample(**(self.dists.R_fac_dist))
E_fac = approx_mPERT_sample(**(self.dists.E_fac_dist))
H_fac = approx_mPERT_sample(**(self.dists.H_fac_dist))
age_dist_fac = self.Nij / xp.sum(self.Nij, axis=0, keepdims=True)
I_init = E_fac * current_I[None, :] * age_dist_fac / self.Nij # / self.n_age_grps
D_init = self.g_data.cum_death_hist[-1][None, :] * age_dist_fac / self.Nij # / self.n_age_grps
recovered_init = (self.g_data.cum_case_hist[-1] / self.params["SYM_FRAC"]) * R_fac
R_init = (
(recovered_init) * age_dist_fac / self.Nij - D_init - I_init / self.params["SYM_FRAC"]
) # Rh is factored in later
Rt = estimate_Rt(self.g_data, self.params, 7, self.case_reporting)
Rt_fac = approx_mPERT_sample(**(self.dists.Rt_dist))
Rt = Rt * Rt_fac
self.params["R0"] = Rt
self.params["BETA"] = Rt * self.params["GAMMA"] / self.g_data.Aij.diag
exp_frac = (
E_fac
* xp.ones(I_init.shape[-1])
* (self.params.R0)
* self.params.GAMMA
/ self.params.SIGMA
/ (1.0 - R_init)
/ self.params["SYM_FRAC"]
)
yy.I = (1.0 - self.params.H) * I_init / yy.Im
yy.Ic = self.params.H * I_init / yy.Im
# TODO this is an estimate, we should rescale it to the real data if we have it
rh_fac = 1.0 # .4
yy.Rh = self.params.H * I_init / yy.Rhn
if self.consts.rescale_chr:
adm1_hosp = xp.zeros((self.g_data.max_adm1 + 1,), dtype=float)
xp.scatter_add(adm1_hosp, self.g_data.adm1_id, xp.sum(yy.Rh * self.Nij, axis=(0, 1)))
adm2_hosp_frac = (self.adm1_current_hosp / adm1_hosp)[self.g_data.adm1_id]
adm0_hosp_frac = xp.nansum(self.adm1_current_hosp) / xp.nansum(adm1_hosp)
adm2_hosp_frac[xp.isnan(adm2_hosp_frac) | (adm2_hosp_frac == 0.0)] = adm0_hosp_frac
adm2_hosp_frac = xp.sqrt(adm2_hosp_frac * adm0_hosp_frac)
scaling_F = F_RR_fac[self.g_data.adm1_id] * self.consts.F_scaling / H_fac
scaling_H = adm2_hosp_frac * H_fac
self.params["F"] = xp.clip(self.params["F"] * scaling_F, 0.0, 1.0)
self.params["H"] = xp.clip(self.params["H"] * scaling_H, self.params["F"], 1.0) / 1.2
self.params["F_eff"] = xp.clip(self.params["F"] / self.params["H"], 0.0, 1.0)
# TODO rename F_eff to HFR
adm2_chr_delay = xp.sum(self.params["I_TO_H_TIME"][:, None] * self.g_data.Nij / self.g_data.Nj, axis=0)
adm2_chr_delay_int = adm2_chr_delay.astype(int) # TODO temp, this should be a distribution of floats
adm2_chr_delay_mod = adm2_chr_delay % 1
inc_case_h_delay = (1.0 - adm2_chr_delay_mod) * xp.take_along_axis(
self.g_data.rolling_inc_cases, -adm2_chr_delay_int[None, :], axis=0
)[0] + adm2_chr_delay_mod * xp.take_along_axis(
self.g_data.rolling_inc_cases, -adm2_chr_delay_int[None, :] - 1, axis=0
)[
0
]
inc_case_h_delay[(inc_case_h_delay > 0.0) & (inc_case_h_delay < 1.0)] = 1.0
inc_case_h_delay[inc_case_h_delay < 0.0] = 0.0
adm2_chr = xp.sum(self.params["H"] * self.g_data.Nij / self.g_data.Nj, axis=0)
tmp = xp.sum(self.params.H * I_init / yy.Im * self.g_data.Nij, axis=0) / 3.0 # 1/3 is mean sigma
tmp2 = inc_case_h_delay * adm2_chr # * 3.0 # 3 == mean sigma, these should be read from base_params
ic_fac = tmp2 / tmp
ic_fac[~xp.isfinite(ic_fac)] = xp.nanmean(ic_fac[xp.isfinite(ic_fac)])
yy.I = (1.0 - self.params.H) * I_init / yy.Im
yy.Ic = ic_fac * self.params.H * I_init / yy.Im
yy.Rh = (
rh_fac
* self.params.H
* I_init
/ yy.Rhn
# * 1.15 # fit to runs, we should be able to calculate this somehow...
)
R_init -= xp.sum(yy.Rh, axis=0)
yy.Ia = self.params.ASYM_FRAC / self.params.SYM_FRAC * I_init / yy.Im
yy.E = exp_frac[None, :] * I_init / yy.En # this should be calcable from Rt and the time before symp
yy.R = xp.clip(R_init, a_min=0.0, a_max=None)
yy.D = D_init
# TMP
mask = xp.sum(yy.N, axis=0) > 1.0
yy.state[:, mask] /= xp.sum(yy.N, axis=0)[mask]
yy.init_S()
# init the bin we're using to track incident cases
# (it's filled with cumulatives until we diff it later)
# TODO should this come from the rolling hist?
yy.incC = xp.clip(self.g_data.cum_case_hist[-1][None, :], a_min=0.0, a_max=None) * age_dist_fac / self.Nij
self.y = yy
# Sanity check state vector
self.y.validate_state()
if self.debug:
logging.debug("done reset()")
# return y
# @staticmethod need to move the caching out b/c its in the self namespace
def estimate_reporting(self, g_data, params, cfr, days_back=14, case_lag=None, min_deaths=100.0):
"""Estimate the case reporting rate based off observed vs. expected CFR"""
if case_lag is None:
adm0_cfr_by_age = xp.sum(cfr * g_data.Nij, axis=1) / xp.sum(g_data.Nj, axis=0)
adm0_cfr_total = xp.sum(
xp.sum(cfr * g_data.Nij, axis=1) / xp.sum(g_data.Nj, axis=0),
axis=0,
)
case_lag = xp.sum(params["D_REPORT_TIME"] * adm0_cfr_by_age / adm0_cfr_total, axis=0)
case_lag_int = int(case_lag)
recent_cum_cases = g_data.rolling_cum_cases - g_data.rolling_cum_cases[0]
recent_cum_deaths = g_data.rolling_cum_deaths - g_data.rolling_cum_deaths[0]
case_lag_frac = case_lag % 1 # TODO replace with util function for the indexing
cases_lagged = frac_last_n_vals(recent_cum_cases, days_back + case_lag_frac, offset=case_lag_int)
if case_lag_frac:
cases_lagged = cases_lagged[0] + cases_lagged[1:]
# adm0
adm0_cfr_param = xp.sum(xp.sum(cfr * g_data.Nij, axis=1) / xp.sum(g_data.Nj, axis=0), axis=0)
if self.adm0_cfr_reported is None:
self.adm0_cfr_reported = xp.sum(recent_cum_deaths[-days_back:], axis=1) / xp.sum(cases_lagged, axis=1)
adm0_case_report = adm0_cfr_param / self.adm0_cfr_reported
if self.debug:
logging.debug("Adm0 case reporting rate: " + pformat(adm0_case_report))
if xp.any(~xp.isfinite(adm0_case_report)):
if self.debug:
logging.debug("adm0 case report not finite")
logging.debug(adm0_cfr_param)
logging.debug(self.adm0_cfr_reported)
raise SimulationException
case_report = xp.repeat(adm0_case_report[:, None], cases_lagged.shape[-1], axis=1)
# adm1
adm1_cfr_param = xp.zeros((g_data.max_adm1 + 1,), dtype=float)
adm1_totpop = g_data.adm1_Nj # xp.zeros((self.g_data.max_adm1 + 1,), dtype=float)
tmp_adm1_cfr = xp.sum(cfr * g_data.Nij, axis=0)
xp.scatter_add(adm1_cfr_param, g_data.adm1_id, tmp_adm1_cfr)
# xp.scatter_add(adm1_totpop, self.g_data.adm1_id, self.Nj)
adm1_cfr_param /= adm1_totpop
# adm1_cfr_reported is const, only calc it once and cache it
if self.adm1_cfr_reported is None:
self.adm1_deaths_reported = xp.zeros((g_data.max_adm1 + 1, days_back), dtype=float)
adm1_lagged_cases = xp.zeros((g_data.max_adm1 + 1, days_back), dtype=float)
xp.scatter_add(
self.adm1_deaths_reported,
g_data.adm1_id,
recent_cum_deaths[-days_back:].T,
)
xp.scatter_add(adm1_lagged_cases, g_data.adm1_id, cases_lagged.T)
self.adm1_cfr_reported = self.adm1_deaths_reported / adm1_lagged_cases
adm1_case_report = (adm1_cfr_param[:, None] / self.adm1_cfr_reported)[g_data.adm1_id].T
valid_mask = (self.adm1_deaths_reported > min_deaths)[g_data.adm1_id].T & xp.isfinite(adm1_case_report)
case_report[valid_mask] = adm1_case_report[valid_mask]
# adm2
adm2_cfr_param = xp.sum(cfr * (g_data.Nij / g_data.Nj), axis=0)
if self.adm2_cfr_reported is None:
self.adm2_cfr_reported = recent_cum_deaths[-days_back:] / cases_lagged
adm2_case_report = adm2_cfr_param / self.adm2_cfr_reported
valid_adm2_cr = xp.isfinite(adm2_case_report) & (recent_cum_deaths[-days_back:] > min_deaths)
case_report[valid_adm2_cr] = adm2_case_report[valid_adm2_cr]
return case_report
def run_once(self, seed=None):
"""Perform one complete run of the simulation"""
# rename to integrate or something? it also resets...
# reset everything
logging.debug("Resetting state")
self.reset(seed=seed)
logging.debug("Done reset")
self.base_mc_instance.epi_params = self.params
self.base_mc_instance.state = self.y
self.base_mc_instance.Aij = self.g_data.Aij.A
self.base_mc_instance.rhs = RHS_func
self.base_mc_instance.dy = self.y.zeros_like()
# TODO this logic needs to go somewhere else (its rescaling beta to account for S/N term)
# TODO R0 need to be changed before reset()...
S_eff = self.base_mc_instance.S_eff(0, self.base_mc_instance.state)
adm2_S_eff = xp.sum(S_eff * self.g_data.Nij / self.g_data.Nj, axis=0)
adm2_beta_scale = xp.clip(1.0 / (adm2_S_eff + 1e-10), a_min=1.0, a_max=5.0)
self.base_mc_instance.epi_params["R0"] = self.base_mc_instance.epi_params["R0"] * adm2_beta_scale
self.base_mc_instance.epi_params["BETA"] = self.base_mc_instance.epi_params["BETA"] * adm2_beta_scale
adm2_E_tot = xp.sum(self.y.E * self.g_data.Nij / self.g_data.Nj, axis=(0, 1))
adm2_new_E_tot = adm2_beta_scale * adm2_E_tot
S_dist = S_eff / (xp.sum(S_eff, axis=0) + 1e-10)
new_E = xp.tile(
(S_dist * adm2_new_E_tot / self.g_data.Nij * self.g_data.Nj / self.params.consts["En"])[None, ...],
(xp.to_cpu(self.params.consts["En"]), 1, 1),
)
new_S = self.y.S - xp.sum(new_E - self.y.E, axis=0)
self.base_mc_instance.state.E = new_E
self.base_mc_instance.state.S = new_S
# do integration
logging.debug("Starting integration")
sol = xp_ivp.solve_ivp(
# self.RHS_func,
# y0=self.y.state.ravel(),
# args=(
# #self.g_data.Aij.A,
# self.base_mc_instance,
# #self.base_mc_instance.state,
# ),
**self.base_mc_instance.integrator_args
)
logging.debug("Done integration")
return sol
def run_multiple(self, n_mc, base_seed=42, out_columns=None):
"""Perform multiple monte carlos and return their postprocessed results"""
seed_seq = np.random.SeedSequence(base_seed)
success = 0
ret = []
pbar = tqdm.tqdm(total=n_mc, desc="Performing Monte Carlos", dynamic_ncols=True)
while success < n_mc:
mc_seed = seed_seq.spawn(1)[0].generate_state(1)[0] # inc spawn key then grab next seed
pbar.set_postfix_str(
"seed=" + str(mc_seed),
refresh=True,
)
try:
with xp.optimize_kernels():
sol = self.run_once(seed=mc_seed)
df_data = self.postprocess_run(sol, mc_seed, out_columns)
ret.append(df_data)
success += 1
pbar.update(1)
except SimulationException:
pass
pbar.close()
return ret
# TODO Move this to a class thats like run_parser or something (that caches all the info it needs like Nij, and manages the write thread/queue)
# Also give it methods like to_dlpack, to_pytorch, etc
def save_run(self, sol, base_filename, seed, output_queue):
"""Postprocess and write to disk the output of run_once"""
df_data = self.postprocess_run(sol, seed)
# flatten the shape
for c in df_data:
df_data[c] = df_data[c].ravel()
# push the data off to the write thread
data_folder = os.path.join(base_filename, "data")
output_queue.put((data_folder, df_data))
metadata_folder = os.path.join(base_filename, "metadata")
if not os.path.exists(metadata_folder):
os.mkdir(metadata_folder)
# write dates
uniq_dates = pd.Series(self.output_dates)
pd.DataFrame({"date": uniq_dates}).to_csv(os.path.join(metadata_folder, "dates.csv"), index=False)
# write out adm mapping
adm_map = pd.DataFrame(
{
"adm2": xp.to_cpu(self.g_data.adm2_id),
"adm1": xp.to_cpu(self.g_data.adm1_id),
"adm0": self.g_data.adm0_name,
}
)
adm_map.to_csv(os.path.join(metadata_folder, "adm_mapping.csv"), index=False)
# TODO write params out (to yaml?) in another subfolder
# TODO we should output the per monte carlo param rolls, this got lost when we switched from hdf5
def postprocess_run(self, sol, seed, columns=None):
"""Process the output of a run (sol, returned by the integrator) into the requested output vars"""
if columns is None:
columns = [
"adm2_id",
"date",
"rid",
"total_population",
"current_hospitalizations",
"active_asymptomatic_cases",
"cumulative_deaths",
"daily_hospitalizations",
"daily_cases",
"daily_reported_cases",
"daily_deaths",
"cumulative_cases",
"cumulative_reported_cases",
"current_icu_usage",
"current_vent_usage",
"case_reporting_rate",
"R_eff",
]
columns = set(columns)
df_data = {}
out = buckyState(self.consts, self.Nij)
y = sol.y.reshape(self.y.state_shape + (sol.y.shape[-1],))
# rescale by population
out.state = self.Nij[None, ..., None] * y
# collapse age groups
out.state = xp.sum(out.state, axis=1)
# population_conserved = (xp.diff(xp.around(xp.sum(out.N, axis=(0, 1)), 1)) == 0.0).all()
# if not population_conserved:
# pass # TODO we're getting small fp errors here
# # print(xp.sum(xp.diff(xp.around(xp.sum(out[:incH], axis=(0, 1)), 1))))
# # logging.error("Population not conserved!")
# # print(xp.sum(xp.sum(y[:incH],axis=0)-1.))
# # raise SimulationException
if "adm2_id" in columns:
adm2_ids = np.broadcast_to(self.g_data.adm2_id[:, None], out.state.shape[1:])
df_data["adm2_id"] = adm2_ids
if "date" in columns:
if self.output_dates is None:
t_output = xp.to_cpu(sol.t)
dates = [str(self.init_date + datetime.timedelta(days=np.round(t))) for t in t_output]
self.output_dates = dates
df_data["date"] = np.broadcast_to(np.arange(len(self.output_dates)), out.state.shape[1:])
if "rid" in columns:
df_data["rid"] = np.broadcast_to(seed, out.state.shape[1:])
if "current_icu_usage" in columns or "current_vent_usage" in columns:
icu = self.Nij[..., None] * self.params["ICU_FRAC"][:, None, None] * xp.sum(y[out.indices["Rh"]], axis=0)
if "current_icu_usage" in columns:
df_data["current_icu_usage"] = xp.sum(icu, axis=0)
if "current_vent_usage" in columns:
vent = self.params.ICU_VENT_FRAC[:, None, None] * icu
df_data["current_vent_usage"] = xp.sum(vent, axis=0)
if "daily_deaths" in columns:
daily_deaths = xp.gradient(out.D, axis=-1, edge_order=2)
df_data["daily_deaths"] = daily_deaths
if self.reject_runs:
init_inc_death_mean = xp.mean(xp.sum(daily_deaths[:, 1:4], axis=0))
hist_inc_death_mean = xp.mean(xp.sum(self.g_data.inc_death_hist[-7:], axis=-1))
inc_death_rejection_fac = 2.0 # TODO These should come from the cli arg -r
if (init_inc_death_mean > inc_death_rejection_fac * hist_inc_death_mean) or (
inc_death_rejection_fac * init_inc_death_mean < hist_inc_death_mean
):
logging.info("Inconsistent inc deaths, rejecting run")
raise SimulationException
if "daily_cases" in columns or "daily_reported_cases" in columns:
daily_reported_cases = xp.gradient(out.incC, axis=-1, edge_order=2)
if self.reject_runs:
init_inc_case_mean = xp.mean(xp.sum(daily_reported_cases[:, 1:4], axis=0))
hist_inc_case_mean = xp.mean(xp.sum(self.g_data.inc_case_hist[-7:], axis=-1))
inc_case_rejection_fac = 1.5 # TODO These should come from the cli arg -r
if (init_inc_case_mean > inc_case_rejection_fac * hist_inc_case_mean) or (
inc_case_rejection_fac * init_inc_case_mean < hist_inc_case_mean
):
logging.info("Inconsistent inc cases, rejecting run")
raise SimulationException
if "daily_reported_cases" in columns:
df_data["daily_reported_cases"] = daily_reported_cases
if "daily_cases" in columns:
daily_cases_total = daily_reported_cases / self.params.CASE_REPORT[:, None]
df_data["daily_cases"] = daily_cases_total
if "cumulative_reported_cases" in columns:
cum_cases_reported = out.incC
df_data["cumulative_reported_cases"] = cum_cases_reported
if "cumulative_cases" in columns:
cum_cases_total = out.incC / self.params.CASE_REPORT[:, None]
df_data["cumulative_cases"] = cum_cases_total
if "daily_hospitalizations" in columns:
out.incH[:, 0] = out.incH[:, 1]
daily_hosp = xp.gradient(out.incH, axis=-1, edge_order=2)
df_data["daily_hospitalizations"] = daily_hosp
if "total_population" in columns:
N = xp.broadcast_to(self.g_data.Nj[..., None], out.state.shape[1:])
df_data["total_population"] = N
if "current_hospitalizations" in columns:
hosps = xp.sum(out.Rh, axis=0) # why not just using .H?
df_data["current_hospitalizations"] = hosps
if "cumulative_deaths" in columns:
cum_deaths = out.D
df_data["cumulative_deaths"] = cum_deaths
if "active_asymptomatic_cases" in columns:
asym_I = xp.sum(out.Ia, axis=0)
df_data["active_asymptomatic_cases"] = asym_I
if "case_reporting_rate" in columns:
crr = xp.broadcast_to(self.params.CASE_REPORT[:, None], adm2_ids.shape)
df_data["case_reporting_rate"] = crr
if "R_eff" in columns:
r_eff = self.npi_params["r0_reduct"].T * np.broadcast_to(
(self.params.R0 * self.g_data.Aij.diag)[:, None], adm2_ids.shape
)
df_data["R_eff"] = r_eff
# Collapse the gamma-distributed compartments and move everything to cpu
negative_values = False
for k in df_data:
# if df_data[k].ndim == 2:
# df_data[k] = xp.sum(df_data[k], axis=0)
if k != "date" and xp.any(xp.around(df_data[k], 2) < 0.0):
logging.info("Negative values present in " + k)
negative_values = True
if negative_values and self.reject_runs:
logging.info("Rejecting run b/c of negative values in output")
raise SimulationException
return df_data
def main(args=None):
"""Main method for a complete simulation called with a set of CLI args"""
if args is None:
args = sys.argv[1:]
args = parser.parse_args(args=args)
if args.gpu:
logging.info("Using GPU backend")
enable_cupy(optimize=args.opt)
reimport_numerical_libs("model.main.main")
warnings.simplefilter(action="ignore", category=xp.ExperimentalWarning)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
loglevel = 30 - 10 * min(args.verbosity, 2)
runid = get_runid()
# Setup output folder TODO change over to pathlib
output_folder = os.path.join(args.output_dir, runid)
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# fh = logging.FileHandler(output_folder + "/stdout")
# fh.setLevel(logging.DEBUG)
logging.basicConfig(
level=loglevel,
format="%(asctime)s - %(levelname)s - %(filename)s:%(funcName)s:%(lineno)d - %(message)s",
handlers=[TqdmLoggingHandler()],
)
debug_mode = loglevel < 20
# TODO we should output the logs to output_dir too...
_banner()
# TODO move the write_thread stuff to a util (postprocess uses something similar)
to_write = queue.Queue(maxsize=100)
def writer():
"""Write thread loop that pulls from an async queue"""
# Call to_write.get() until it returns None
stream = xp.cuda.Stream(non_blocking=True) if args.gpu else None
pinned_mem = {}
for base_fname, df_data in iter(to_write.get, None):
for k, v in df_data.items():
if k not in pinned_mem:
pinned_mem[k] = xp.empty_like_pinned(v)
xp.to_cpu(v, stream=stream, out=pinned_mem[k])
if stream is not None:
stream.synchronize()
pa_data = {k: pa.array(v) for k, v in pinned_mem.items()}
table = pa.table(pa_data)
pap.write_to_dataset(table, base_fname, partition_cols=["date"])
write_thread = threading.Thread(target=writer, daemon=True)
write_thread.start()
logging.info(f"command line args: {args}")
env = buckyModelCovid(
debug=debug_mode,
sparse_aij=(not args.dense),
t_max=args.days,
graph_file=args.graph_file,
par_file=args.par_file,
npi_file=args.npi_file,
disable_npi=args.disable_npi,
reject_runs=args.reject_runs,
)
seed_seq = np.random.SeedSequence(args.seed)
total_start = datetime.datetime.now()
success = 0
n_runs = 0
pbar = tqdm.tqdm(total=args.n_mc, desc="Performing Monte Carlos", dynamic_ncols=True)
try:
while success < args.n_mc:
mc_seed = seed_seq.spawn(1)[0].generate_state(1)[0] # inc spawn key then grab next seed
pbar.set_postfix_str(
"seed="
+ str(mc_seed)
+ ", rej%=" # TODO disable rej% if not -r
+ str(np.around(float(n_runs - success) / (n_runs + 0.00001) * 100, 1)),
refresh=True,
)
try:
n_runs += 1
with xp.optimize_kernels():
sol = env.run_once(seed=mc_seed)
env.save_run(sol, output_folder, mc_seed, output_queue=to_write)
success += 1
pbar.update(1)
except SimulationException:
pass
except (KeyboardInterrupt, SystemExit):
logging.warning("Caught SIGINT, cleaning up")
to_write.put(None)
write_thread.join()
finally:
to_write.put(None)
write_thread.join()
pbar.close()
logging.info(f"Total runtime: {datetime.datetime.now() - total_start}")
if __name__ == "__main__":
main()
| 41.922489 | 147 | 0.609672 | 5,544 | 38,401 | 3.978535 | 0.129509 | 0.019948 | 0.019586 | 0.014689 | 0.318856 | 0.224691 | 0.161536 | 0.113569 | 0.093168 | 0.069184 | 0 | 0.019496 | 0.281373 | 38,401 | 915 | 148 | 41.968306 | 0.779787 | 0.156715 | 0 | 0.112324 | 0 | 0.00156 | 0.061517 | 0.011806 | 0 | 0 | 0 | 0.001093 | 0 | 1 | 0.020281 | false | 0.00312 | 0.046802 | 0 | 0.079563 | 0.00156 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
988147fa4143728a5e12261688a08199610fecd9 | 10,557 | py | Python | bootini_star/views.py | rseichter/bootini-star | a80258f01a05e4df38748b8cb47dfadabd42c20d | [
"MIT"
] | null | null | null | bootini_star/views.py | rseichter/bootini-star | a80258f01a05e4df38748b8cb47dfadabd42c20d | [
"MIT"
] | null | null | null | bootini_star/views.py | rseichter/bootini-star | a80258f01a05e4df38748b8cb47dfadabd42c20d | [
"MIT"
] | null | null | null | """
Application views/routes are based on Flask's MethodView. All primary views are
combined into a Flask blueprint.
"""
__author__ = 'Ralph Seichter'
import datetime
from operator import attrgetter
import flask_login
from flask import Blueprint, flash, redirect, url_for
from flask.templating import render_template
from flask.views import MethodView, View
from flask_login.utils import current_user
from pymongo.errors import OperationFailure
import swagger_client
from bootini_star import esi
from bootini_star.esi import Cache, EveGroup, EveType
from bootini_star.forms import AdminForm
from swagger_client.rest import ApiException
from .extensions import app_config, log
from .models import User, create_unique_index
from .sso import EveSso
ADMIN_REQUIRED = 'Admin privileges are required.'
eveCache = esi.IdNameCache()
class RenderTemplate(View):
def __init__(self, template):
self.template = template
def dispatch_request(self):
return render_template(self.template, config=app_config)
def flash_form_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(error, 'danger')
class Dashboard(MethodView):
methods = ['GET']
@flask_login.login_required
def get(self):
auth_url, auth_state = EveSso().auth_url_state()
cu: User = current_user
characters = sorted(cu.characters,
key=attrgetter('name')) if cu.characters else None
return render_template(
'dashboard.html',
characters=characters,
auth_url=auth_url,
auth_state=auth_state,
config=app_config
)
class Character(MethodView):
methods = ['GET']
@flask_login.login_required
def get(self, character_id):
api = swagger_client.CharacterApi()
try:
return render_template(
'character.html',
character=esi.get_character(api, character_id)
)
except ApiException as e:
return api_fail(e)
def refresh_token(api, character: Character):
es = EveSso(character.token)
rt = es.refresh_token()
if rt.token_changed:
log.debug(f'Updating token for character {character.eve_id}')
character.token = rt.token
character.modified_at = datetime.datetime.utcnow()
if current_user.update() != 1:
log.error(
f'Error updating token for character {character.eve_id}')
client = api.api_client
client.set_default_header('User-Agent', app_config['USER_AGENT'])
client.configuration.access_token = rt.token['access_token']
return api
def mail_api(current_character):
return refresh_token(swagger_client.MailApi(), current_character)
def skills_api(current_character):
return refresh_token(swagger_client.SkillsApi(), current_character)
def api_fail(api_exception):
flash('EVE Swagger Interface call failed: ' +
api_exception.reason + '.', 'danger')
return redirect(url_for('.index'))
class MailList(MethodView):
methods = ['GET']
@flask_login.login_required
def get(self, character_id, label=None):
cc = current_user.get_character(character_id)
if cc: # pragma: no cover (Needs live character)
api = mail_api(cc)
kwargs = {'labels': [label]} if isinstance(label, int) else {}
try:
labels = esi.get_mail_labels(api, character_id)
mails = esi.get_mails(api, character_id, **kwargs)
mail_ids = {m._from for m in mails}
eveCache.eve_characters(mail_ids)
except ApiException as e:
return api_fail(e)
sl = sorted(labels.labels, key=attrgetter('label_id'))
sm = sorted(mails, key=attrgetter('timestamp'), reverse=True)
return render_template('maillist.html', eveCache=eveCache,
character_id=character_id, labels=sl,
maillist=sm)
else:
flash('Please select one of your characters.', 'warning')
return redirect(url_for('.dashboard'))
class Mail(MethodView):
methods = ['GET']
@flask_login.login_required
def get(self, character_id: int, mail_id: int):
cc = current_user.get_character(character_id)
if cc:
api = mail_api(cc)
try:
rv = api.get_characters_character_id_mail_mail_id(
character_id, mail_id)
return render_template('mail.html', character_id=character_id,
mail_id=mail_id, eveCache=eveCache,
mail=rv)
except ApiException as e:
return api_fail(e)
else:
flash('Please select one of your characters.', 'warning')
return redirect(url_for('.dashboard'))
class MarkMailRead(MethodView):
methods = ['GET']
@flask_login.login_required
def get(self, character_id: int, mail_id: int, read: int):
cc = current_user.get_character(character_id)
if cc: # pragma: no cover (Needs live character)
api = mail_api(cc)
try:
status = 'read' if read else 'unread'
esi.mark_mail_read(api, character_id, mail_id, read)
flash(f'Mail has been marked as {status}.', 'success')
except ApiException as e:
return api_fail(e)
return redirect(url_for('.maillist', character_id=character_id))
else:
flash('Please select one of your characters.', 'warning')
return redirect(url_for('.dashboard'))
class RemoveMail(MethodView):
methods = ['GET']
@flask_login.login_required
def get(self, character_id: int, mail_id: int):
cc = current_user.get_character(character_id)
if cc: # pragma: no cover (Needs live character)
api = mail_api(cc)
try:
api.delete_characters_character_id_mail_mail_id(
character_id, mail_id)
flash('Mail has been deleted.', 'success')
except ApiException as e:
return api_fail(e)
return redirect(url_for('.maillist', character_id=character_id))
else:
flash('Please select one of your characters.', 'warning')
return redirect(url_for('.dashboard'))
class RemoveCharacter(MethodView):
methods = ['GET']
@flask_login.login_required
def get(self, character_id):
try:
log.debug(f'Remove character {character_id}')
current_user.remove_character(character_id)
if current_user.update() == 1:
flash(f'Character {character_id} was removed.', 'success')
else:
log.warning(f'Character {character_id} could not be removed')
flash(f'Character {character_id} could not be removed.',
'danger')
except OperationFailure as e:
log.error(f'Error removing character: {e}')
flash(f'Character {character_id} could not be removed.', 'danger')
return redirect(url_for('.dashboard'))
class Skills(MethodView):
methods = ['GET']
@flask_login.login_required
def get(self, character_id):
cc = current_user.get_character(character_id)
if cc: # pragma: no cover (Needs live character)
api = skills_api(cc)
try:
rv = api.get_characters_character_id_skillqueue(character_id)
return render_template('skillqueue.html', eveCache=eveCache,
character_id=character_id,
skillq=sorted(rv, key=attrgetter(
'queue_position')))
except ApiException as e:
return api_fail(e)
else:
flash('Please select one of your characters.', 'warning')
return redirect(url_for('.dashboard'))
class Skill(MethodView):
methods = ['GET']
@staticmethod
def get(skill_id):
skill = eveCache.eve_type(skill_id)
return render_template('skill.html', skill=skill)
class Admin(MethodView):
methods = ['GET', 'POST']
@flask_login.fresh_login_required
def get(self):
if current_user.is_admin:
return render_template('quickform.html', form=AdminForm())
flash(ADMIN_REQUIRED, 'warning')
return redirect(url_for('.login'))
@flask_login.fresh_login_required
def post(self):
if current_user.is_admin:
create_unique_index(Cache().collection, 'eve_id')
create_unique_index(EveGroup().collection, 'eve_id')
create_unique_index(EveType().collection, 'eve_id')
create_unique_index(User().collection, 'email')
flash('Created database indexes.', 'success')
return redirect(url_for('.dashboard'))
flash(ADMIN_REQUIRED, 'warning')
return redirect(url_for('.login'))
blueprint = Blueprint('bs', __name__)
blueprint.add_url_rule(
'/', view_func=RenderTemplate.as_view('index', template='index.html'))
blueprint.add_url_rule('/admin', view_func=Admin.as_view('admin'))
blueprint.add_url_rule('/dashboard', view_func=Dashboard.as_view('dashboard'))
blueprint.add_url_rule('/dashboard/rm/<int:character_id>',
view_func=RemoveCharacter.as_view('rmcharacter'))
blueprint.add_url_rule('/character/<int:character_id>',
view_func=Character.as_view('character'))
blueprint.add_url_rule(
'/mail/<int:character_id>/<int:mail_id>', view_func=Mail.as_view('mail'))
blueprint.add_url_rule('/mail/rm/<int:character_id>/<int:mail_id>',
view_func=RemoveMail.as_view('rmmail'))
blueprint.add_url_rule('/mail/rd/<int:character_id>/<int:mail_id>/<int:read>',
view_func=MarkMailRead.as_view('mailread'))
blueprint.add_url_rule('/maillist/<int:character_id>/<int:label>',
view_func=MailList.as_view('maillabel'))
blueprint.add_url_rule('/maillist/<int:character_id>',
view_func=MailList.as_view('maillist'))
blueprint.add_url_rule('/skill/<int:skill_id>',
view_func=Skill.as_view('skill'))
blueprint.add_url_rule('/skillqueue/<int:character_id>',
view_func=Skills.as_view('skillqueue'))
| 36.278351 | 79 | 0.62783 | 1,236 | 10,557 | 5.131877 | 0.165049 | 0.079773 | 0.028693 | 0.037837 | 0.486836 | 0.426139 | 0.387829 | 0.357402 | 0.301908 | 0.277787 | 0 | 0.000259 | 0.268353 | 10,557 | 290 | 80 | 36.403448 | 0.820948 | 0.02586 | 0 | 0.387234 | 0 | 0 | 0.150297 | 0.030274 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076596 | false | 0 | 0.068085 | 0.012766 | 0.357447 | 0.059574 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9881570c2307209bb16898489ef2c768f90b46b1 | 514 | py | Python | 3_corpus_chatterbot.py | OtacilioMaia/Chatbots-Desenvolvimento-Orientado-a-Conversas | aadb7384d64549749508d594138e07221c200f68 | [
"MIT"
] | null | null | null | 3_corpus_chatterbot.py | OtacilioMaia/Chatbots-Desenvolvimento-Orientado-a-Conversas | aadb7384d64549749508d594138e07221c200f68 | [
"MIT"
] | null | null | null | 3_corpus_chatterbot.py | OtacilioMaia/Chatbots-Desenvolvimento-Orientado-a-Conversas | aadb7384d64549749508d594138e07221c200f68 | [
"MIT"
] | null | null | null | __author__ = "Otacilio Maia"
__version__ = "1.0.0"
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
def main():
chatbot = ChatBot('Chat AI')
trainer = ChatterBotCorpusTrainer(chatbot)
trainer.train("chatterbot.corpus.portuguese")
user_text = ""
while(user_text != "sair"):
user_text = input("Voce: ")
response = chatbot.get_response(user_text).text.encode('utf-8')
print(response.decode())
if __name__ == "__main__":
main() | 25.7 | 71 | 0.684825 | 57 | 514 | 5.807018 | 0.614035 | 0.096677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009569 | 0.18677 | 514 | 20 | 72 | 25.7 | 0.782297 | 0 | 0 | 0 | 0 | 0 | 0.147573 | 0.054369 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.2 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9883506bbef48623a9d156fc156d445016a20d6a | 5,103 | py | Python | examples/test_wf/test_convergence.py | yakutovicha/yambo-aiida | 5c722961ebabe5ea14fbcb20a866c6fb99398a7d | [
"MIT",
"BSD-3-Clause"
] | null | null | null | examples/test_wf/test_convergence.py | yakutovicha/yambo-aiida | 5c722961ebabe5ea14fbcb20a866c6fb99398a7d | [
"MIT",
"BSD-3-Clause"
] | null | null | null | examples/test_wf/test_convergence.py | yakutovicha/yambo-aiida | 5c722961ebabe5ea14fbcb20a866c6fb99398a7d | [
"MIT",
"BSD-3-Clause"
] | null | null | null | from aiida.backends.utils import load_dbenv, is_dbenv_loaded
if not is_dbenv_loaded():
load_dbenv()
from aiida_yambo.workflows.yamboconvergence import YamboConvergenceWorkflow
try:
from aiida.orm.data.base import Float, Str, NumericType, BaseType, List
from aiida.work.run import run, submit
except ImportError:
from aiida.workflows2.db_types import Float, Str, NumericType, SimpleData, Bool
from aiida.workflows2.db_types import SimpleData as BaseType
from aiida.orm.data.simple import SimpleData as SimpleData_
from aiida.workflows2.run import run
from aiida.orm.utils import DataFactory
ParameterData = DataFactory("parameter")
yambo_parameters = {'ppa': True,
'gw0': True,
'HF_and_locXC': True,
'em1d': True,
'DIP_Threads': 0 ,
'BndsRnXp': (1,16),
'NGsBlkXp': 1,
'NGsBlkXp_units': 'RL',
'PPAPntXp': 20,
'PPAPntXp_units': 'eV',
'GbndRnge': (1,16),
'GDamping': 0.1,
'GDamping_units': 'eV',
'dScStep': 0.1,
'dScStep_units': 'eV',
'DysSolver': "n",
'QPkrange': [(1,1,16,18)],
}
calculation_set_p2y ={'resources': {"num_machines": 1,"num_mpiprocs_per_machine": 1}, 'max_wallclock_seconds': 60*29,
'max_memory_kb': 1*80*1000000 ,"queue_name":"s3par8cv3" ,#'custom_scheduler_commands': u"#PBS -A Pra14_3622" ,
'environment_variables': {"omp_num_threads": "1" } }
calculation_set_yambo ={'resources': {"num_machines": 1,"num_mpiprocs_per_machine": 16}, 'max_wallclock_seconds': 6*60*60,
'max_memory_kb': 1*80*1000000 , "queue_name":"s3par8cv3" ,#'custom_scheduler_commands': u"#PBS -A Pra14_3622" ,
'environment_variables': {"omp_num_threads": "0" } }
settings_pw = ParameterData(dict= {'cmdline':['-npool', '2' , '-ndiag', '8', '-ntg', '2' ]})
settings_p2y = ParameterData(dict={"ADDITIONAL_RETRIEVE_LIST":[
'r-*','o-*','l-*','l_*','LOG/l-*_CPU_1','aiida/ndb.QP','aiida/ndb.HF_and_locXC'], 'INITIALISE':True})
settings_yambo = ParameterData(dict={"ADDITIONAL_RETRIEVE_LIST":[
'r-*','o-*','l-*','l_*','LOG/l-*_CPU_1','aiida/ndb.QP','aiida/ndb.HF_and_locXC'], 'INITIALISE':False })
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='GW QP calculation.')
parser.add_argument('--precode', type=str, dest='precode', required=True,
help='The p2y codename to use')
parser.add_argument('--yambocode', type=str, dest='yambocode', required=True,
help='The yambo codename to use')
parser.add_argument('--pwcode', type=str, dest='pwcode', required=True,
help='The pw codename to use')
parser.add_argument('--pseudo', type=str, dest='pseudo', required=True,
help='The pesudo to use')
parser.add_argument('--structure', type=int, dest='structure', required=True,
help='The structure to use')
parser.add_argument('--parent', type=int, dest='parent', required=False,
help='The parent to use')
parser.add_argument('--parent_nscf', type=int, dest='parent_nscf', required=False,
help='The parent nscf to use')
args = parser.parse_args()
structure = load_node(int(args.structure))
parentcalc = parent_folder_ = parentnscfcalc = parent_nscf_folder_ = False
if args.parent:
parentcalc = load_node(int(args.parent))
parent_folder_ = parentcalc.out.remote_folder
parentnscfcalc = load_node(int(args.parent_nscf))
parent_nscf_folder_ = parentnscfcalc.out.remote_folder
convergence_parameters = {'variable_to_converge': 'kpoints', 'conv_tol':0.1,
'start_value': .9 , 'step':.1 , 'max_value': 0.017 }
p2y_result =run(YamboConvergenceWorkflow,
pwcode= Str( args.pwcode),
precode= Str( args.precode),
yambocode=Str(args.yambocode),
calculation_set= ParameterData(dict=calculation_set_yambo),
settings = settings_yambo,
convergence_parameters = ParameterData(dict=convergence_parameters),
#parent_scf_folder = parent_folder_,
#parent_nscf_folder = parent_nscf_folder_,
parameters = ParameterData(dict=yambo_parameters),
structure = structure ,
pseudo = Str(args.pseudo),
)
print ("Workflow launched: ", p2y_result)
| 50.524752 | 130 | 0.562806 | 529 | 5,103 | 5.189036 | 0.31758 | 0.029508 | 0.043352 | 0.030601 | 0.293625 | 0.251366 | 0.174863 | 0.174863 | 0.144262 | 0.144262 | 0 | 0.027659 | 0.312757 | 5,103 | 100 | 131 | 51.03 | 0.755061 | 0.035469 | 0 | 0.02439 | 0 | 0 | 0.209156 | 0.045575 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.134146 | 0 | 0.134146 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9883b6e8d4d307c1b52e2f3a77df6fc639e8d98d | 4,936 | py | Python | lungSegmentation/evalutate_performance.py | slowy07/medical-BCDU | dab1ddcacbe093b78e6830d52db2a4e6fabc3d52 | [
"MIT"
] | null | null | null | lungSegmentation/evalutate_performance.py | slowy07/medical-BCDU | dab1ddcacbe093b78e6830d52db2a4e6fabc3d52 | [
"MIT"
] | null | null | null | lungSegmentation/evalutate_performance.py | slowy07/medical-BCDU | dab1ddcacbe093b78e6830d52db2a4e6fabc3d52 | [
"MIT"
] | null | null | null | import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import models as M
import numpy as np
import scipy
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import f1_score
from scipy.ndimage.morphology import binary_erosion
#load data
folder = './processed_data/'
te_data = np.load(folder+'data_test.npy')
FOV = np.load(folder+'FOV_te.npy')
te_mask = np.load(folder+'mask_test.npy')
te_data = np.expand_dims(te_data, axis=3)
print('Dataset loaded')
#te_data2 = dataset_normalized(te_data)
te_data2 = te_data /255.
model = M.BCDU_net_D3(input_size = (512,512,1))
model.summary()
model.load_weights('weight_lung')
predictions = model.predict(te_data2, batch_size=2, verbose=1)
# Post-processing
predictions = np.squeeze(predictions)
predictions = np.where(predictions>0.5, 1, 0)
Estimated_lung = np.where((FOV - predictions)>0.5, 1, 0)
# Performance checking
y_scores = Estimated_lung.reshape(Estimated_lung.shape[0]*Estimated_lung.shape[1]*Estimated_lung.shape[2], 1)
print(y_scores.shape)
y_true = te_mask.reshape(te_mask.shape[0]*te_mask.shape[1]*te_mask.shape[2], 1)
y_scores = np.where(y_scores>0.5, 1, 0)
y_true = np.where(y_true>0.5, 1, 0)
output_folder = 'output/'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
#Area under the ROC curve
fpr, tpr, thresholds = roc_curve((y_true), y_scores)
AUC_ROC = roc_auc_score(y_true, y_scores)
print ("\nArea under the ROC curve: " +str(AUC_ROC))
roc_curve =plt.figure()
plt.plot(fpr,tpr,'-',label='Area Under the Curve (AUC = %0.4f)' % AUC_ROC)
plt.title('ROC curve')
plt.xlabel("FPR (False Positive Rate)")
plt.ylabel("TPR (True Positive Rate)")
plt.legend(loc="lower right")
plt.savefig(output_folder+"ROC.png")
#Precision-recall curve
precision, recall, thresholds = precision_recall_curve(y_true, y_scores)
precision = np.fliplr([precision])[0]
recall = np.fliplr([recall])[0]
AUC_prec_rec = np.trapz(precision,recall)
print ("\nArea under Precision-Recall curve: " +str(AUC_prec_rec))
prec_rec_curve = plt.figure()
plt.plot(recall,precision,'-',label='Area Under the Curve (AUC = %0.4f)' % AUC_prec_rec)
plt.title('Precision - Recall curve')
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.legend(loc="lower right")
plt.savefig(output_folder+"Precision_recall.png")
#Confusion matrix
threshold_confusion = 0.5
print ("\nConfusion matrix: Custom threshold (for positive) of " +str(threshold_confusion))
y_pred = np.empty((y_scores.shape[0]))
for i in range(y_scores.shape[0]):
if y_scores[i]>=threshold_confusion:
y_pred[i]=1
else:
y_pred[i]=0
confusion = confusion_matrix(y_true, y_pred)
print (confusion)
accuracy = 0
if float(np.sum(confusion))!=0:
accuracy = float(confusion[0,0]+confusion[1,1])/float(np.sum(confusion))
print ("Global Accuracy: " +str(accuracy))
specificity = 0
if float(confusion[0,0]+confusion[0,1])!=0:
specificity = float(confusion[0,0])/float(confusion[0,0]+confusion[0,1])
print ("Specificity: " +str(specificity))
sensitivity = 0
if float(confusion[1,1]+confusion[1,0])!=0:
sensitivity = float(confusion[1,1])/float(confusion[1,1]+confusion[1,0])
print ("Sensitivity: " +str(sensitivity))
precision = 0
if float(confusion[1,1]+confusion[0,1])!=0:
precision = float(confusion[1,1])/float(confusion[1,1]+confusion[0,1])
print ("Precision: " +str(precision))
#Jaccard similarity index
jaccard_index = jaccard_similarity_score(y_true, y_pred, normalize=True)
print ("\nJaccard similarity score: " +str(jaccard_index))
#F1 score
F1_score = f1_score(y_true, y_pred, labels=None, average='binary', sample_weight=None)
print ("\nF1 score (F-measure): " +str(F1_score))
#Save the results
file_perf = open(output_folder+'performances.txt', 'w')
file_perf.write("Area under the ROC curve: "+str(AUC_ROC)
+ "\nArea under Precision-Recall curve: " +str(AUC_prec_rec)
+ "\nJaccard similarity score: " +str(jaccard_index)
+ "\nF1 score (F-measure): " +str(F1_score)
+"\n\nConfusion matrix:"
+str(confusion)
+"\nACCURACY: " +str(accuracy)
+"\nSENSITIVITY: " +str(sensitivity)
+"\nSPECIFICITY: " +str(specificity)
+"\nPRECISION: " +str(precision)
)
file_perf.close()
# Sample results
fig,ax = plt.subplots(5, 3, figsize=[15,15])
all_ind = [1, 100, 200, 253, 193] # random samples
all_ind = np.array(all_ind)
for idx in range(5):
ax[idx, 0].imshow(np.uint8(np.squeeze(te_data[all_ind[idx]])))
ax[idx, 1].imshow(np.squeeze(te_mask[all_ind[idx]]), cmap='gray')
ax[idx, 2].imshow(np.squeeze(Estimated_lung[all_ind[idx]]), cmap='gray')
plt.savefig('sample_results.png') | 36.562963 | 109 | 0.709887 | 755 | 4,936 | 4.480795 | 0.235762 | 0.020692 | 0.022761 | 0.042566 | 0.289388 | 0.181496 | 0.15903 | 0.093999 | 0.093999 | 0 | 0 | 0.030075 | 0.137763 | 4,936 | 135 | 110 | 36.562963 | 0.764803 | 0.045583 | 0 | 0.018519 | 0 | 0 | 0.167872 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
988492255fff6a05146e48a47f6f7316997bd490 | 600 | py | Python | A_source_code/carbon/code/mocsy_module.py | vanHoek-dgnm/CARBON-DISC | 3ecd5f4efba5e032d43679ee977064d6b25154a9 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | A_source_code/carbon/code/mocsy_module.py | vanHoek-dgnm/CARBON-DISC | 3ecd5f4efba5e032d43679ee977064d6b25154a9 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | A_source_code/carbon/code/mocsy_module.py | vanHoek-dgnm/CARBON-DISC | 3ecd5f4efba5e032d43679ee977064d6b25154a9 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ******************************************************
## Copyright 2019, PBL Netherlands Environmental Assessment Agency and Utrecht University.
## Reuse permitted under Gnu Public License, GPL v3.
# ******************************************************
'''
imports mocsy module
'''
try:
import mocsy
except ModuleNotFoundError:
import os
import sys
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
if "/" in root:
# working in linux OS
path = os.path.join(root,"libs","mocsy","linux")
if (os.path.exists(path)):
sys.path.insert(3, path)
| 31.578947 | 90 | 0.55 | 66 | 600 | 4.939394 | 0.621212 | 0.110429 | 0.06135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011765 | 0.15 | 600 | 18 | 91 | 33.333333 | 0.627451 | 0.481667 | 0 | 0 | 0 | 0 | 0.063973 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98862016069a5935ad27d5ce8118f56358c9629c | 1,548 | py | Python | 1.5-one-away.py | jmartenstein/interview-questions | 0e2dca36b02f82e7a30453ef7ae2165f04e50710 | [
"MIT"
] | null | null | null | 1.5-one-away.py | jmartenstein/interview-questions | 0e2dca36b02f82e7a30453ef7ae2165f04e50710 | [
"MIT"
] | null | null | null | 1.5-one-away.py | jmartenstein/interview-questions | 0e2dca36b02f82e7a30453ef7ae2165f04e50710 | [
"MIT"
] | null | null | null | import unittest
class OneAwayTest(unittest.TestCase):
def test_oneaway_missing_letter1(self):
actual = one_away("pale", "ple")
self.assertTrue(actual)
def test_oneaway_missing_letter2(self):
actual = one_away("p", "")
self.assertTrue(actual)
def test_oneaway_same_letters(self):
actual = one_away("justin", "justin")
self.assertTrue(actual)
def test_oneaway_missing_letter3(self):
actual = one_away("jstin", "jsti")
self.assertTrue(actual)
def test_oneaway_changed_letter1(self):
actual = one_away("pale", "bake")
self.assertFalse(actual)
def test_oneaway_changed_letters1(self):
actual = one_away("pale", "bale")
self.assertTrue(actual)
def one_away(string1, string2):
diff_count = 0
i = 0
j = 0
# for now, we break out separate case for if the string lengths are
# different or not
if len(string1) == len(string2):
while (i < len(string1)) and (diff_count <= 1):
if string1[i] != string2[i]:
diff_count += 1
i += 1
else:
if len(string1) < len(string2):
string_short = string1
string_long = string2
else:
string_long = string1
string_short = string2
while(i < len(string_long)) and (diff_count <= 1):
if j >= len(string_short):
diff_count += 1
else:
if string_long[i] != string_short[j]:
diff_count += 1
else:
j += 1
i += 1
if diff_count > 1:
return False
else:
return True
if __name__ == '__main__':
unittest.main()
| 21.802817 | 70 | 0.631783 | 206 | 1,548 | 4.514563 | 0.31068 | 0.052688 | 0.090323 | 0.109677 | 0.360215 | 0.221505 | 0.088172 | 0 | 0 | 0 | 0 | 0.025884 | 0.251292 | 1,548 | 70 | 71 | 22.114286 | 0.776531 | 0.053618 | 0 | 0.294118 | 0 | 0 | 0.036252 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.137255 | false | 0 | 0.019608 | 0 | 0.215686 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9886841ad97481579a6dce1caae1343591e52f5d | 9,593 | py | Python | Pressure_Converter/pressure_converter_script.py | Affanmir/Awesome-Python-Scripts | bba0512e1c580d605205744ece878da13f2c7661 | [
"MIT"
] | 1,026 | 2018-10-02T18:51:12.000Z | 2022-03-31T13:45:14.000Z | Pressure_Converter/pressure_converter_script.py | Affanmir/Awesome-Python-Scripts | bba0512e1c580d605205744ece878da13f2c7661 | [
"MIT"
] | 164 | 2018-10-02T18:37:40.000Z | 2021-11-18T13:29:54.000Z | Pressure_Converter/pressure_converter_script.py | Affanmir/Awesome-Python-Scripts | bba0512e1c580d605205744ece878da13f2c7661 | [
"MIT"
] | 521 | 2018-10-02T18:15:40.000Z | 2022-03-26T12:10:15.000Z | from typing import Union
def atmospeheres_to_bars(atm: float, unit: str) -> Union[float, str]:
"""
This function converts atm to bar
Wikipedia reference: https://en.wikipedia.org/wiki/Standard_atmosphere_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Bar_(unit)
>>> atmospeheres_to_bars(2.5, "atm")
2.533125
>>> atmospeheres_to_bars("12", "atm")
12.158999999999999
>>> atmospeheres_to_bars(0, "atm")
0.0
>>> atmospeheres_to_bars(35, "mmHg")
'Invalid unit'
>>> atmospeheres_to_bars("atmospheres", "atm")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'atmospheres'
"""
if unit == "atm":
bar = float(atm) * 1.01325
return bar
else:
return "Invalid unit"
def bars_to_atmospheres(bar: float, unit: str) -> Union[float, str]:
"""
This function converts bar to atm
Wikipedia reference: https://en.wikipedia.org/wiki/Standard_atmosphere_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Bar_(unit)
>>> bars_to_atmospheres(36, "bar")
35.529237601776465
>>> bars_to_atmospheres("57.6", "bar")
56.84678016284234
>>> bars_to_atmospheres(0, "bar")
0.0
>>> bars_to_atmospheres(35, "Pa")
'Invalid unit'
>>> bars_to_atmospheres("barrs", "bar")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'barrs'
"""
if unit == "bar":
atm = float(bar) / 1.01325
return atm
else:
return "Invalid unit"
def atmospheres_to_milimeter_mercury(atm: float, unit: str) -> Union[float, str]:
"""
This function converts atm to mmHg
Wikipedia reference: https://en.wikipedia.org/wiki/Standard_atmosphere_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Millimetre_of_mercury
>>> atmospheres_to_milimeter_mercury(2, "atm")
1520.0
>>> atmospheres_to_milimeter_mercury("6.9", "atm")
5244.0
>>> atmospheres_to_milimeter_mercury(0, "atm")
0.0
>>> atmospheres_to_milimeter_mercury(35, "torr")
'Invalid unit'
>>> atmospheres_to_milimeter_mercury("atmos", "atm")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'atmos'
"""
if unit == "atm":
mm_hg = float(atm) * 760
return mm_hg
else:
return "Invalid unit"
def milimeter_mercury_to_atmospheres(mm_hg: float, unit: str) -> Union[float, str]:
"""
This function converts mmHg to atm
Wikipedia reference: https://en.wikipedia.org/wiki/Standard_atmosphere_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Millimetre_of_mercury
>>> milimeter_mercury_to_atmospheres(23506.92, "mmHg")
30.93015789473684
>>> milimeter_mercury_to_atmospheres("304000", "mmHg")
400.0
>>> milimeter_mercury_to_atmospheres(0, "mmHg")
0.0
>>> milimeter_mercury_to_atmospheres(35, "bar")
'Invalid unit'
>>> milimeter_mercury_to_atmospheres("merc", "mmHg")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'merc'
"""
if unit == "mmHg":
atm = float(mm_hg) / 760
return atm
else:
return "Invalid unit"
def atmospheres_to_pascals(atm: float, unit: str) -> Union[float, str]:
"""
This function converts atm to Pa
Wikipedia reference: https://en.wikipedia.org/wiki/Standard_atmosphere_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Pascal_(unit)
>>> atmospheres_to_pascals(5.4, "atm")
547155.0
>>> atmospheres_to_pascals("7.098", "atm")
719204.85
>>> atmospheres_to_pascals(0, "atm")
0.0
>>> atmospheres_to_pascals(35, "Pa")
'Invalid unit'
>>> atmospheres_to_pascals("ats", "atm")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'ats'
"""
if unit == "atm":
pa = float(atm) * 101325
return pa
else:
return "Invalid unit"
def pascals_to_atmospheres(pa: float, unit: str) -> Union[float, str]:
"""
This function converts Pa to atm
Wikipedia reference: https://en.wikipedia.org/wiki/Standard_atmosphere_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Pascal_(unit)
>>> pascals_to_atmospheres(202650, "Pa")
2.0
>>> pascals_to_atmospheres("1013250", "Pa")
10.0
>>> pascals_to_atmospheres(0, "Pa")
0.0
>>> pascals_to_atmospheres(35, "mmhg")
'Invalid unit'
>>> pascals_to_atmospheres("Pas", "Pa")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'Pas'
"""
if unit == "Pa":
atm = float(pa) / 101325
return atm
else:
return "Invalid unit"
def bars_to_milimeter_mercury(bar: float, unit: str) -> Union[float, str]:
"""
This function converts bar to mmHg
Wikipedia reference: https://en.wikipedia.org/wiki/Bar_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Millimetre_of_mercury
>>> bars_to_milimeter_mercury(3.75, "bar")
2812.725
>>> bars_to_milimeter_mercury("0.82", "bar")
615.0491999999999
>>> bars_to_milimeter_mercury(0, "bar")
0.0
>>> bars_to_milimeter_mercury(3, "atm")
'Invalid unit'
>>> bars_to_milimeter_mercury("brs", "bar")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'brs'
"""
if unit == "bar":
mm_hg = float(bar) * round(760 / 1.01325, 2)
return mm_hg
else:
return "Invalid unit"
def milimeter_mercury_to_bars(mm_hg: float, unit: str) -> Union[float, str]:
"""
This function converts mmHg to bar
Wikipedia reference: https://en.wikipedia.org/wiki/Bar_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Millimetre_of_mercury
>>> milimeter_mercury_to_bars(4970.5, "mmHg")
6.626803189078208
>>> milimeter_mercury_to_bars("378", "mmHg")
0.503959683225342
>>> milimeter_mercury_to_bars(0, "mmHg")
0.0
>>> milimeter_mercury_to_bars(3, "bar")
'Invalid unit'
>>> milimeter_mercury_to_bars("brs", "mmHg")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'brs'
"""
if unit == "mmHg":
bar = float(mm_hg) / round(760 / 1.01325, 2)
return bar
else:
return "Invalid unit"
def bars_to_pascals(bar: float, unit: str) -> Union[float, str]:
"""
This function converts bar to Pa
Wikipedia reference: https://en.wikipedia.org/wiki/Bar_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Pascal_(unit)
>>> bars_to_pascals(0.653, "bar")
65300.0
>>> bars_to_pascals("1.2", "bar")
120000.0
>>> bars_to_pascals(0, "bar")
0.0
>>> bars_to_pascals(3.1, "Pa")
'Invalid unit'
>>> bars_to_pascals("bP", "bar")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'bP'
"""
if unit == "bar":
pa = float(bar) * 100000
return pa
else:
return "Invalid unit"
def pascals_to_bars(pa: float, unit: str) -> Union[float, str]:
"""
This function converts Pa to bar
Wikipedia reference: https://en.wikipedia.org/wiki/Bar_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Pascal_(unit)
>>> pascals_to_bars(45000, "Pa")
0.45
>>> pascals_to_bars("1200000", "Pa")
12.0
>>> pascals_to_bars(0, "Pa")
0.0
>>> pascals_to_bars(3.1, "mmHg")
'Invalid unit'
>>> pascals_to_bars("pass", "Pa")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'pass'
"""
if unit == "Pa":
bar = float(pa) / 100000
return bar
else:
return "Invalid unit"
def milimeter_mercury_to_pascals(mm_hg: float, unit: str) -> Union[float, str]:
"""
This function converts mmHg to Pa
Wikipedia reference: https://en.wikipedia.org/wiki/Millimetre_of_mercury
Wikipedia reference: https://en.wikipedia.org/wiki/Pascal_(unit)
>>> milimeter_mercury_to_pascals(25, "mmHg")
3333.0
>>> milimeter_mercury_to_pascals("652", "mmHg")
86924.64
>>> milimeter_mercury_to_pascals(0, "mmHg")
0.0
>>> milimeter_mercury_to_pascals(342.1, "bar")
'Invalid unit'
>>> milimeter_mercury_to_pascals("mercurium", "mmHg")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'mercurium'
"""
if unit == "mmHg":
pa = float(mm_hg) * round(101325 / 760, 2)
return pa
else:
return "Invalid unit"
def pascals_to_milimeter_mercury(pa: float, unit: str) -> Union[float, str]:
"""
This function converts Pa to mmHg
Wikipedia reference: https://en.wikipedia.org/wiki/Millimetre_of_mercury
Wikipedia reference: https://en.wikipedia.org/wiki/Pascal_(unit)
>>> pascals_to_milimeter_mercury(153000, "Pa")
1147.6147614761476
>>> pascals_to_milimeter_mercury("97650.8", "Pa")
732.4542454245425
>>> pascals_to_milimeter_mercury(0, "Pa")
0.0
>>> pascals_to_milimeter_mercury(342.1, "mmhg")
'Invalid unit'
>>> pascals_to_milimeter_mercury("merc", "Pa")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'merc'
"""
if unit == "Pa":
mm_hg = float(pa) / round(101325 / 760, 2)
return mm_hg
else:
return "Invalid unit"
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29.791925 | 83 | 0.640154 | 1,231 | 9,593 | 4.796913 | 0.105605 | 0.097544 | 0.09348 | 0.101609 | 0.710754 | 0.644708 | 0.595428 | 0.574598 | 0.561897 | 0.508721 | 0 | 0.06199 | 0.221411 | 9,593 | 321 | 84 | 29.884735 | 0.728612 | 0.648494 | 0 | 0.631579 | 0 | 0 | 0.074662 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.026316 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9886f6ef61ce8671578994a8e3555d6131699f26 | 444 | py | Python | jp.atcoder/abc129/abc129_c/10071155.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc129/abc129_c/10071155.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc129/abc129_c/10071155.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
MOD = 10 ** 9 + 7
n, m, *a = map(int, sys.stdin.read().split())
broken = set(a)
def main():
res = [None] * (n + 1)
res[0] = 1
res[1] = 0 if 1 in broken else 1
for i in range(2, n+1):
if i in broken:
res[i] = 0
else:
res[i] = res[i-2] + res[i-1]
res[i] %= MOD
return res[n]
if __name__ == '__main__':
ans = main()
print(ans)
| 17.76 | 46 | 0.432432 | 72 | 444 | 2.555556 | 0.458333 | 0.108696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060377 | 0.403153 | 444 | 24 | 47 | 18.5 | 0.633962 | 0 | 0 | 0 | 0 | 0 | 0.019002 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.166667 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9886fd5ef77fff993bad45edb77225659ddd988c | 6,569 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/cisco/nxos/plugins/modules/nxos_banner.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/nxos/plugins/modules/nxos_banner.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/nxos/plugins/modules/nxos_banner.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
module: nxos_banner
author: Trishna Guha (@trishnaguha)
short_description: Manage multiline banners on Cisco NXOS devices
description:
- This will configure both exec and motd banners on remote devices running Cisco NXOS.
It allows playbooks to add or remove banner text from the active running configuration.
notes:
- Since responses from the device are always read with surrounding whitespaces stripped,
tasks that configure banners with preceeding or trailing whitespaces will not be idempotent.
- Limited Support for Cisco MDS
version_added: 1.0.0
options:
banner:
description:
- Specifies which banner that should be configured on the remote device.
required: true
choices:
- exec
- motd
type: str
text:
description:
- The banner text that should be present in the remote device running configuration.
This argument accepts a multiline string, with no empty lines. Requires I(state=present).
type: str
state:
description:
- Specifies whether or not the configuration is present in the current devices
active running configuration.
default: present
choices:
- present
- absent
type: str
extends_documentation_fragment:
- cisco.nxos.nxos
"""
EXAMPLES = """
- name: configure the exec banner
cisco.nxos.nxos_banner:
banner: exec
text: |
this is my exec banner
that contains a multiline
string
state: present
- name: remove the motd banner
cisco.nxos.nxos_banner:
banner: motd
state: absent
- name: Configure banner from file
cisco.nxos.nxos_banner:
banner: motd
text: "{{ lookup('file', './config_partial/raw_banner.cfg') }}"
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- banner exec
- this is my exec banner
- that contains a multiline
- string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import (
load_config,
run_commands,
)
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import (
nxos_argument_spec,
)
import re
def execute_show_command(module, command):
format = "text"
cmds = [{"command": command, "output": format}]
output = run_commands(module, cmds)
return output
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params["state"]
platform_regex = "Nexus.*Switch"
if state == "absent":
if have.get("text") and not (
(have.get("text") == "User Access Verification")
or re.match(platform_regex, have.get("text"))
):
commands.append("no banner %s" % module.params["banner"])
elif state == "present" and want.get("text") != have.get("text"):
banner_cmd = "banner %s @\n%s\n@" % (
module.params["banner"],
want["text"],
)
commands.append(banner_cmd)
return commands
def map_config_to_obj(module):
command = "show banner %s" % module.params["banner"]
output = execute_show_command(module, command)[0]
if "Invalid command" in output:
module.fail_json(
msg="banner: %s may not be supported on this platform. Possible values are : exec | motd"
% module.params["banner"]
)
if isinstance(output, dict):
output = list(output.values())
if output != []:
output = output[0]
else:
output = ""
if isinstance(output, dict):
output = list(output.values())
if output != []:
output = output[0]
else:
output = ""
else:
output = output.rstrip()
obj = {"banner": module.params["banner"], "state": "absent"}
if output:
obj["text"] = output
obj["state"] = "present"
return obj
def map_params_to_obj(module):
text = module.params["text"]
return {
"banner": module.params["banner"],
"text": to_text(text) if text else None,
"state": module.params["state"],
}
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=["exec", "motd"]),
text=dict(),
state=dict(default="present", choices=["present", "absent"]),
)
argument_spec.update(nxos_argument_spec)
required_if = [("state", "present", ("text",))]
module = AnsibleModule(
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True,
)
warnings = list()
result = {"changed": False}
if warnings:
result["warnings"] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result["commands"] = commands
if commands:
if not module.check_mode:
msgs = load_config(module, commands, True)
if msgs:
for item in msgs:
if item:
if isinstance(item, dict):
err_str = item["clierror"]
else:
err_str = item
if (
"more than 40 lines" in err_str
or "buffer overflowed" in err_str
):
load_config(module, commands)
result["changed"] = True
module.exit_json(**result)
if __name__ == "__main__":
main()
| 28.437229 | 102 | 0.630994 | 803 | 6,569 | 5.052304 | 0.305106 | 0.026621 | 0.026621 | 0.01405 | 0.229973 | 0.146167 | 0.108948 | 0.094158 | 0.094158 | 0.094158 | 0 | 0.002922 | 0.270665 | 6,569 | 230 | 103 | 28.56087 | 0.843874 | 0.114173 | 0 | 0.210227 | 0 | 0 | 0.405765 | 0.023127 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028409 | false | 0 | 0.034091 | 0 | 0.085227 | 0.005682 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98877c694a0a259f7bba574f818a2c84334902b2 | 8,555 | py | Python | guv/greenthread.py | timgates42/guv | d7bac2ca6a73cc2059969af08223b82f3e187922 | [
"MIT"
] | 120 | 2015-01-05T15:15:26.000Z | 2020-07-28T11:25:10.000Z | guv/greenthread.py | timgates42/guv | d7bac2ca6a73cc2059969af08223b82f3e187922 | [
"MIT"
] | 22 | 2015-01-12T21:52:32.000Z | 2017-01-22T18:18:20.000Z | guv/greenthread.py | timgates42/guv | d7bac2ca6a73cc2059969af08223b82f3e187922 | [
"MIT"
] | 13 | 2015-01-18T11:42:34.000Z | 2021-07-15T10:59:22.000Z | from collections import deque
import sys
import greenlet
from . import event, hubs
from .support import reraise
__all__ = ['sleep', 'spawn', 'spawn_n', 'kill', 'spawn_after', 'GreenThread']
def sleep(seconds=0):
"""Yield control to the hub until at least `seconds` have elapsed
:param float seconds: time to sleep for
"""
hub = hubs.get_hub()
current = greenlet.getcurrent()
assert hub is not current, 'do not call blocking functions from the hub'
timer = hub.schedule_call_global(seconds, current.switch)
try:
hub.switch()
finally:
timer.cancel()
def spawn_n(func, *args, **kwargs):
"""Spawn a greenlet
Execution control returns immediately to the caller; the created greenlet is scheduled to be run
at the start of the next event loop iteration, after other scheduled greenlets, but before
greenlets waiting for I/O events.
This is faster than :func:`spawn`, but it is not possible to retrieve the return value of
the greenlet, or whether it raised any exceptions. It is fastest if there are no keyword
arguments.
If an exception is raised in the function, a stack trace is printed; the print can be
disabled by calling :func:`guv.debug.hub_exceptions` with False.
:return: greenlet object
:rtype: greenlet.greenlet
"""
hub = hubs.get_hub()
g = greenlet.greenlet(func, parent=hub)
hub.schedule_call_now(g.switch, *args, **kwargs)
return g
def spawn(func, *args, **kwargs):
"""Spawn a GreenThread
Execution control returns immediately to the caller; the created GreenThread is scheduled to
be run at the start of the next event loop iteration, after other scheduled greenlets,
but before greenlets waiting for I/O events.
:return: GreenThread object which can be used to retrieve the return value of the function
:rtype: GreenThread
"""
hub = hubs.get_hub()
g = GreenThread(hub)
hub.schedule_call_now(g.switch, func, *args, **kwargs)
return g
def spawn_after(seconds, func, *args, **kwargs):
"""Spawn a GreenThread after `seconds` have elapsed
Execution control returns immediately to the caller.
To cancel the spawn and prevent *func* from being called, call :meth:`GreenThread.cancel` on the
returned GreenThread. This will not abort the function if it's already started running, which is
generally the desired behavior. If terminating *func* regardless of whether it's started or not
is the desired behavior, call :meth:`GreenThread.kill`.
:return: GreenThread object which can be used to retrieve the return value of the function
:rtype: GreenThread
"""
hub = hubs.get_hub()
g = GreenThread(hub)
hub.schedule_call_global(seconds, g.switch, func, *args, **kwargs)
return g
def _spawn_n(seconds, func, args, kwargs):
hub = hubs.get_hub()
g = greenlet.greenlet(func, parent=hub)
t = hub.schedule_call_global(seconds, g.switch, *args, **kwargs)
return t, g
class GreenThread(greenlet.greenlet):
"""The GreenThread class is a type of Greenlet which has the additional property of being able
to retrieve the return value of the main function. Do not construct GreenThread objects
directly; call :func:`spawn` to get one.
"""
def __init__(self, parent):
"""
:param parent: parent greenlet
:type parent: greenlet.greenlet
"""
greenlet.greenlet.__init__(self, self.main, parent)
self._exit_event = event.Event()
self._resolving_links = False
def wait(self):
"""Return the result of the main function of this GreenThread
If the result is a normal return value, :meth:`wait` returns it. If it raised an exception,
:meth:`wait` will raise the same exception (though the stack trace will unavoidably contain
some frames from within the GreenThread module).
"""
return self._exit_event.wait()
def link(self, func, *curried_args, **curried_kwargs):
"""Set up a function to be called with the results of the GreenThread
The function must have the following signature::
func(gt, [curried args/kwargs])
When the GreenThread finishes its run, it calls *func* with itself and with the `curried
arguments <http://en.wikipedia.org/wiki/Currying>`_ supplied at link-time. If the function
wants to retrieve the result of the GreenThread, it should call wait() on its first
argument.
Note that *func* is called within execution context of the GreenThread, so it is possible to
interfere with other linked functions by doing things like switching explicitly to another
GreenThread.
"""
self._exit_funcs = getattr(self, '_exit_funcs', deque())
self._exit_funcs.append((func, curried_args, curried_kwargs))
if self._exit_event.ready():
self._resolve_links()
def unlink(self, func, *curried_args, **curried_kwargs):
"""Remove linked function set by :meth:`link`
Remove successfully return True, otherwise False
"""
if not getattr(self, '_exit_funcs', None):
return False
try:
self._exit_funcs.remove((func, curried_args, curried_kwargs))
return True
except ValueError:
return False
def main(self, function, *args, **kwargs):
try:
result = function(*args, **kwargs)
except:
self._exit_event.send_exception(*sys.exc_info())
self._resolve_links()
raise
else:
self._exit_event.send(result)
self._resolve_links()
def _resolve_links(self):
# ca and ckw are the curried function arguments
if self._resolving_links:
return
self._resolving_links = True
try:
exit_funcs = getattr(self, '_exit_funcs', deque())
while exit_funcs:
f, ca, ckw = exit_funcs.popleft()
f(self, *ca, **ckw)
finally:
self._resolving_links = False
def kill(self, *throw_args):
"""Kill the GreenThread using :func:`kill`
After being killed all calls to :meth:`wait` will raise `throw_args` (which default to
:class:`greenlet.GreenletExit`).
"""
return kill(self, *throw_args)
def cancel(self, *throw_args):
"""Kill the GreenThread using :func:`kill`, but only if it hasn't already started running
After being canceled, all calls to :meth:`wait` will raise `throw_args` (which default to
:class:`greenlet.GreenletExit`).
"""
return cancel(self, *throw_args)
def cancel(g, *throw_args):
"""Cancel the target greenlet/GreenThread if it hasn't already started
This is like :func:`kill`, but only has an effect if the target greenlet/GreenThread has not
yet started.
"""
if not g:
kill(g, *throw_args)
def kill(g, *throw_args):
"""Terminate the target greenlet/GreenThread by raising an exception into it
Whatever that GreenThread might be doing, be it waiting for I/O or another primitive, it sees an
exception right away.
By default, this exception is GreenletExit, but a specific exception may be specified.
`throw_args` should be the same as the arguments to raise; either an exception instance or an
exc_info tuple.
Calling :func:`kill` causes the calling greenlet to cooperatively yield.
:param g: target greenlet/GreenThread to kill
:type g: greenlet.greenlet or GreenThread
"""
if g.dead:
return
hub = hubs.get_hub()
if not g:
# greenlet hasn't started yet and therefore throw won't work on its own; semantically we
# want it to be as though the main method never got called
def just_raise(*a, **kw):
if throw_args:
reraise(throw_args[0], throw_args[1], throw_args[2])
else:
raise greenlet.GreenletExit()
g.run = just_raise
if isinstance(g, GreenThread):
# it's a GreenThread object, so we want to call its main method to take advantage of
# the notification
try:
g.main(just_raise, (), {})
except:
pass
current = greenlet.getcurrent()
if current is not hub:
# arrange to wake the caller back up immediately
hub.schedule_call_now(current.switch)
g.throw(*throw_args)
| 35.205761 | 100 | 0.658679 | 1,151 | 8,555 | 4.801911 | 0.24066 | 0.024426 | 0.010856 | 0.014113 | 0.304324 | 0.265243 | 0.2296 | 0.18491 | 0.18491 | 0.136059 | 0 | 0.000631 | 0.25903 | 8,555 | 242 | 101 | 35.35124 | 0.871273 | 0.513968 | 0 | 0.359223 | 0 | 0 | 0.032119 | 0 | 0 | 0 | 0 | 0 | 0.009709 | 1 | 0.15534 | false | 0.009709 | 0.048544 | 0 | 0.330097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
988be3dce863409b6b062d305bd83405f34d1ed8 | 460 | py | Python | While_loop/while_loop.py | SaicharanKandukuri/snippets-python-from-scrach | b0823fde3cf1a88bf43d97bdc542de7e32c76dac | [
"MIT"
] | 1 | 2021-05-29T03:09:24.000Z | 2021-05-29T03:09:24.000Z | While_loop/while_loop.py | SaicharanKandukuri/snippets-python-from-scrach | b0823fde3cf1a88bf43d97bdc542de7e32c76dac | [
"MIT"
] | null | null | null | While_loop/while_loop.py | SaicharanKandukuri/snippets-python-from-scrach | b0823fde3cf1a88bf43d97bdc542de7e32c76dac | [
"MIT"
] | null | null | null | name = None
x=0
while not name:
x=x+1
if x<3:
name = input("Enter your name: ")
if x==3:
name = input("Man enter your name : ")
if x==4:
name = input("Why are spamming mate: ")
if x==5:
name = input("For God sake: ")
if x==6:
name = input("Bro!........")
if x==7:
print("Iam out mate!")
exit(0)
print("Hello "+ name)
if x>=5:
print("Nice to hear you name :)") | 20.909091 | 47 | 0.465217 | 71 | 460 | 3.014085 | 0.478873 | 0.098131 | 0.098131 | 0.074766 | 0.257009 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033898 | 0.358696 | 460 | 22 | 48 | 20.909091 | 0.691525 | 0 | 0 | 0 | 0 | 0 | 0.284165 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
988edc13ccae6c0801420ecb6d188c9b4a82c75b | 1,052 | py | Python | Lecture_notes/数据提取与验证码的识别(下)/code/多线程爬虫.py | littleturings/2021PythonWebCrawler | a9089a912affce4369cf50df3c22c55eb4ebf2d5 | [
"MIT"
] | 1 | 2021-02-03T08:28:16.000Z | 2021-02-03T08:28:16.000Z | Lecture_notes/数据提取与验证码的识别(下)/code/多线程爬虫.py | littleturings/2021PythonWebCrawler | a9089a912affce4369cf50df3c22c55eb4ebf2d5 | [
"MIT"
] | null | null | null | Lecture_notes/数据提取与验证码的识别(下)/code/多线程爬虫.py | littleturings/2021PythonWebCrawler | a9089a912affce4369cf50df3c22c55eb4ebf2d5 | [
"MIT"
] | null | null | null | from threading import Thread
import requests
from lxml import etree
from fake_useragent import UserAgent
from queue import Queue
class Spider(Thread):
def __init__(self,url_queue):
Thread.__init__(self)
self.url_queue = url_queue
def run(self):
while not self.url_queue.empty():
url = self.url_queue.get()
print(url)
header = {"User-Agent": UserAgent().chrome}
resp = requests.get(url, headers=header)
e = etree.HTML(resp.text)
contents = [div.xpath('string(.)').strip() for div in e.xpath("//div[@class='content']")]
with open('duanzi.txt', 'a', encoding='utf-8') as f:
for content in contents:
f.write(content + "\n")
if __name__ == "__main__":
base_url = "https://www.qiushibaike.com/text/page/{}/"
url_queue =Queue()
for num in range(1,6):
url_queue.put(base_url.format(num))
for num in range(3):
spider = Spider(url_queue)
spider.start()
| 30.941176 | 101 | 0.586502 | 136 | 1,052 | 4.338235 | 0.5 | 0.108475 | 0.081356 | 0.044068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005298 | 0.282319 | 1,052 | 34 | 102 | 30.941176 | 0.776159 | 0 | 0 | 0 | 0 | 0 | 0.103514 | 0.021842 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.178571 | 0 | 0.285714 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
988feb164822c678128261338e61810b6c4748b8 | 486 | py | Python | __isVowel.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | __isVowel.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | __isVowel.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | '''
A Python program to test whether a passed letter is a vowel ornot.
'''
def isVowel (inputStr):
vowelTuple = ('a', 'e', 'i', 'o', 'u')
for vowel in vowelTuple:
if inputStr == vowel:
return True
return False
def main ():
myStr = input ("Enter a letter")
isStrVowel = isVowel(myStr)
if isStrVowel:
print ("You input a vowel")
else:
print ("You input a consonant")
main()
| 20.25 | 66 | 0.524691 | 57 | 486 | 4.473684 | 0.596491 | 0.047059 | 0.101961 | 0.109804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.364198 | 486 | 24 | 67 | 20.25 | 0.825243 | 0.135802 | 0 | 0 | 0 | 0 | 0.13835 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9890311c2c70976d1163fe80f05202d37c39505b | 5,821 | py | Python | scripts/cscap/harvest_soil_bd.py | isudatateam/datateam | eb8e1dad6c05cb1b236689862fe87c56b25ea6fc | [
"MIT"
] | 5 | 2017-05-20T04:51:55.000Z | 2022-03-07T18:55:27.000Z | scripts/cscap/harvest_soil_bd.py | isudatateam/datateam | eb8e1dad6c05cb1b236689862fe87c56b25ea6fc | [
"MIT"
] | 275 | 2017-03-09T20:31:30.000Z | 2022-03-30T22:43:47.000Z | scripts/cscap/harvest_soil_bd.py | isudatateam/datateam | eb8e1dad6c05cb1b236689862fe87c56b25ea6fc | [
"MIT"
] | 3 | 2020-06-01T15:03:06.000Z | 2021-02-01T13:46:58.000Z | """Scrape out the Soil Bulk Density and Texture data from Google Drive"""
from __future__ import print_function
import sys
import psycopg2
import pyiem.cscap_utils as util
YEAR = sys.argv[1]
config = util.get_config()
pgconn = psycopg2.connect(
database="sustainablecorn", host=config["database"]["host"]
)
pcursor = pgconn.cursor()
# Get me a client, stat
spr_client = util.get_spreadsheet_client(config)
drive_client = util.get_driveclient(config)
res = (
drive_client.files()
.list(
q=("title contains '%s'")
% (("Soil Bulk Density and " "Water Retention Data"),)
)
.execute()
)
DOMAIN = [
"SOIL1",
"SOIL2",
"SOIL29",
"SOIL30",
"SOIL31",
"SOIL32",
"SOIL8",
"SOIL33",
"SOIL34",
"SOIL35",
"SOIL39",
"SOIL41",
"SOIL42",
]
# Load up current data, incase we need to do some deleting
current = {}
pcursor.execute(
"""
SELECT uniqueid, plotid, varname, depth, subsample
from soil_data WHERE year = %s and varname in %s
""",
(YEAR, tuple(DOMAIN)),
)
for row in pcursor:
key = "|".join([str(s) for s in row])
current[key] = True
for item in res["items"]:
if item["mimeType"] != "application/vnd.google-apps.spreadsheet":
continue
try:
# print("Processing %s %s" % (item['title'], item['id']))
spreadsheet = util.Spreadsheet(spr_client, item["id"])
except Exception as exp:
print("harvest_soil_bd FAIL: %s\n%s" % (exp, item["title"]))
continue
siteid = item["title"].split()[0]
spreadsheet.get_worksheets()
worksheet = spreadsheet.worksheets.get(YEAR)
if worksheet is None:
# print 'Missing Soil BD+WR %s sheet for %s' % (YEAR, siteid)
continue
worksheet.get_cell_feed()
if siteid == "DPAC":
pass
elif (
worksheet.get_cell_value(1, 1) != "plotid"
or worksheet.get_cell_value(1, 2) != "depth"
or worksheet.get_cell_value(1, 3) != "subsample"
):
print(
("FATAL site: %s(%s) bd & wr has bad header 1:%s 2:%s 3:%s")
% (
siteid,
YEAR,
worksheet.get_cell_value(1, 1),
worksheet.get_cell_value(1, 2),
worksheet.get_cell_value(1, 3),
)
)
continue
for row in range(3, worksheet.rows + 1):
plotid = worksheet.get_cell_value(row, 1)
if siteid == "DPAC":
depth = worksheet.get_cell_value(row, 3)
# Combine the location value into the subsample
subsample = "%s%s" % (
worksheet.get_cell_value(row, 2),
worksheet.get_cell_value(row, 4),
)
else:
depth = worksheet.get_cell_value(row, 2)
subsample = worksheet.get_cell_value(row, 3)
if depth.find(" to ") == -1:
print(
("harvest_soil_bd found invalid depth: %s %s %s")
% (depth, siteid, YEAR)
)
continue
if plotid is None or depth is None:
continue
for col in range(4, worksheet.cols + 1):
if worksheet.get_cell_value(1, col) is None:
# print(("harvest_soil_bd %s(%s) row: %s col: %s is null"
# ) % (siteid, YEAR, row, col))
continue
varname = worksheet.get_cell_value(1, col).strip().split()[0]
if varname[:4] != "SOIL":
# print 'Invalid varname: %s site: %s year: %s' % (
# worksheet.get_cell_value(1,col).strip(),
# siteid, YEAR)
continue
inval = worksheet.get_cell_value(row, col)
val = util.cleanvalue(inval)
if inval is not None and val is None:
print(
(
"harvest_soil_bd found None. site: %s year: %s "
" row: %s col: %s varname: %s"
)
% (siteid, YEAR, row, col, varname)
)
try:
pcursor.execute(
"""
INSERT into soil_data(uniqueid, plotid, varname, year,
depth, value, subsample)
values (%s, %s, %s, %s, %s, %s, %s)
""",
(siteid, plotid, varname, YEAR, depth, val, subsample),
)
except Exception as exp:
print("HARVEST_SOIL_BD TRACEBACK")
print(exp)
print(
("%s %s %s %s %s %s")
% (siteid, plotid, varname, depth, val, subsample)
)
sys.exit()
key = "%s|%s|%s|%s|%s" % (
siteid,
plotid,
varname,
depth,
subsample,
)
if key in current:
del current[key]
for key in current:
(siteid, plotid, varname, depth, subsample) = key.split("|")
if varname in DOMAIN:
print(
("harvest_soil_bd rm %s %s %s %s %s %s")
% (YEAR, siteid, plotid, varname, repr(depth), repr(subsample))
)
d1 = "depth is null" if depth == "None" else "depth = '%s'" % (depth,)
d2 = (
"subsample is null"
if subsample == "None"
else "subsample = '%s'" % (subsample,)
)
pcursor.execute(
"""DELETE from soil_data where uniqueid = %s and
plotid = %s and varname = %s and year = %s and """
+ d1
+ """ and
"""
+ d2,
(siteid, plotid, varname, YEAR),
)
pcursor.close()
pgconn.commit()
pgconn.close()
| 30.798942 | 78 | 0.49373 | 646 | 5,821 | 4.349845 | 0.252322 | 0.018505 | 0.096797 | 0.119573 | 0.254093 | 0.196441 | 0.078292 | 0.05694 | 0.029893 | 0 | 0 | 0.017208 | 0.381034 | 5,821 | 188 | 79 | 30.962766 | 0.762698 | 0.093455 | 0 | 0.154839 | 0 | 0.006452 | 0.133044 | 0.00807 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.006452 | 0.025806 | 0 | 0.025806 | 0.058065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9891dd87fdf4f97e52addf857b05cd50067c4326 | 3,335 | py | Python | app/training.py | m-triple-m/project_junky | e55d3eeae6e97f002ab3087245e9dbeb4ed3eafd | [
"MIT"
] | null | null | null | app/training.py | m-triple-m/project_junky | e55d3eeae6e97f002ab3087245e9dbeb4ed3eafd | [
"MIT"
] | null | null | null | app/training.py | m-triple-m/project_junky | e55d3eeae6e97f002ab3087245e9dbeb4ed3eafd | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import pickle
import sklearn.ensemble as ske
from sklearn import tree, linear_model
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.externals import joblib
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
import os
def AI_Trainer(dataset, classifier_file, feature_file):
data = pd.read_csv(dataset, sep='|')
X = data.drop(['Name', 'md5', 'legitimate'], axis=1).values
y = data['legitimate'].values
print('Researching important feature based on %i total features\n' % X.shape[1])
# Feature selection using Trees Classifier
fsel = ske.ExtraTreesClassifier().fit(X, y)
model = SelectFromModel(fsel, prefit=True)
X_new = model.transform(X)
nb_features = X_new.shape[1]
X_new, y = shuffle(X_new, y, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X_new, y ,test_size=0.2)
features = []
print('%i features identified as important:' % nb_features)
indices = np.argsort(fsel.feature_importances_)[::-1][:nb_features]
for f in range(nb_features):
print("%d. feature %s (%f)" % (f + 1, data.columns[2+indices[f]], fsel.feature_importances_[indices[f]]))
# XXX : take care of the feature order
for f in sorted(np.argsort(fsel.feature_importances_)[::-1][:nb_features]):
features.append(data.columns[2+f])
#Algorithm comparison
algorithms = {
#"DecisionTree": tree.DecisionTreeClassifier(max_depth=10),
"RandomForest": ske.RandomForestClassifier(n_estimators=100,criterion='entropy',max_features="auto"),
#"GradientBoosting": ske.GradientBoostingClassifier(n_estimators=100,max_features="log2"),
#"AdaBoost": ske.AdaBoostClassifier(n_estimators=100),
#"GNB": GaussianNB()
}
results = {}
print("\nNow testing algorithms")
for algo in algorithms:
clf = algorithms[algo]
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
print("%s : %f %%" % (algo, score*100))
results[algo] = score
winner = "RandomForest" #max(results, key=results.get)
print('\nWinner algorithm is %s with a %f %% success' % (results[winner], results[winner]*100))
# Save the algorithm and the feature list for later predictions
print('Saving algorithm and feature list in classifier directory...')
joblib.dump(algorithms[winner], classifier_file)
open(feature_file, 'wb').write(pickle.dumps(features))
print('Saved')
# Identify false and true positive rates
clf = algorithms[winner]
res = clf.predict(X_test)
mt = confusion_matrix(y_test, res)
print("False positive rate : %f %%" % ((mt[0][1] / float(sum(mt[0])))*100))
print('False negative rate : %f %%' % ( (mt[1][0] / float(sum(mt[1]))*100)))
def train():
dataset=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'dataset.csv')
classifier=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'classifier/classifier.pkl')
features=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'classifier/feature.pkl')
AI_Trainer(dataset, classifier, features)
if __name__ == "__main__":
train() | 38.333333 | 113 | 0.686657 | 445 | 3,335 | 4.986517 | 0.359551 | 0.024335 | 0.00676 | 0.016224 | 0.102298 | 0.102298 | 0.102298 | 0.102298 | 0.064443 | 0.064443 | 0 | 0.015323 | 0.178111 | 3,335 | 87 | 114 | 38.333333 | 0.794236 | 0.133733 | 0 | 0 | 0 | 0 | 0.153526 | 0.016325 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033898 | false | 0 | 0.288136 | 0 | 0.322034 | 0.169492 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9894780acddb9d5e169d21af5bce8cf904e87b23 | 5,466 | py | Python | examples/Forecasting/models/forecast.py | vophihungvn/h1st | d421995bb0b8de6a5a76788261efef5b26bc7c12 | [
"Apache-2.0"
] | 1 | 2021-12-31T08:51:11.000Z | 2021-12-31T08:51:11.000Z | examples/Forecasting/models/forecast.py | vophihungvn/h1st | d421995bb0b8de6a5a76788261efef5b26bc7c12 | [
"Apache-2.0"
] | null | null | null | examples/Forecasting/models/forecast.py | vophihungvn/h1st | d421995bb0b8de6a5a76788261efef5b26bc7c12 | [
"Apache-2.0"
] | null | null | null | import h1st as h1
import pandas as pd
import os
import sklearn
import sklearn.metrics
import subprocess
import pathlib
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.compose import make_column_transformer
from sklearn.preprocessing import OneHotEncoder
from Forecasting import config
class ForecastModel(h1.MLModel):
def __init__(self):
super().__init__()
self.base_model = None
self.feature_cols = ['Open', 'Promo', 'StateHoliday', 'SchoolHoliday',
'DayOfWeek', 'DayOfMonth', 'Month',
'StoreType', 'Assortment', 'CompetitionDistance',
'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear',
'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear']
self.data_dir = config.FORECAST_DATA_PATH
def load_data(self):
# needs to have kaggle tools, and user credentials, and agreed to competition rules etc.
pathlib.Path(self.data_dir).mkdir(parents=True, exist_ok=True)
if not os.path.isfile(os.path.join(self.data_dir, "train.csv")):
print("Using `kaggle` command to download data from rossmann-store-sales competition.")
print("You'll need https://pypi.org/project/kaggle/ tool and agrees to the terms of the competition at https://www.kaggle.com/c/rossmann-store-sales/")
subprocess.run("kaggle competitions download -c rossmann-store-sales -p {data}/".format(data=self.data_dir), shell=True, check=True)
subprocess.run("cd {data}; unzip rossmann-store-sales.zip".format(data=self.data_dir), shell=True, check=True)
df = pd.read_csv(os.path.join(self.data_dir, "train.csv"), low_memory=False)
store_info = pd.read_csv(os.path.join(self.data_dir, "store.csv"))
df = df.merge(store_info, on="Store")
return df
def explore(self):
df = self.load_data()
import seaborn
print(df.count()) # count NA
seaborn.distplot(df.Sales) # Sales distribution
def prep(self, loaded_data):
"""
Prepare data for modelling
:param loaded_data: data return from load_data method
:returns: dictionary contains train data and validation data
"""
df = loaded_data
df.fillna(0, inplace=True) # safe to fill, see countNA table below:
# Store 1017209
# DayOfWeek 1017209
# Date 1017209
# Sales 1017209
# Customers 1017209
# Open 1017209
# Promo 1017209
# StateHoliday 1017209
# SchoolHoliday 1017209
# StoreType 1017209
# Assortment 1017209
# CompetitionDistance 1014567
# CompetitionOpenSinceMonth 693861
# CompetitionOpenSinceYear 693861
# Promo2 1017209
# Promo2SinceWeek 509178
# Promo2SinceYear 509178
# PromoInterval 509178
# dtype: int64
df["Date"] = pd.to_datetime(df.Date)
df["DayOfWeek"] = df.Date.dt.dayofweek
df["DayOfMonth"] = df.Date.dt.day
df["Month"] = df.Date.dt.month
train_df = df[df["Date"] < "2015-06-01"]
val_df = df[df["Date"] >= "2015-06-01"]
print(len(train_df), len(val_df))
# sales only should get 949194 68015
# after dropNA on storeinfo: 302061 22265
return {
'train_df': train_df,
'val_df': val_df,
'len_train_val': (len(train_df), len(val_df))
}
def train(self, prepared_data):
train_df = prepared_data['train_df'][self.feature_cols]
sales = prepared_data['train_df']["Sales"]
transformer = make_column_transformer(
(OneHotEncoder(handle_unknown="ignore"), ['StateHoliday', "StoreType", "Assortment"]),
remainder="passthrough")
transformer.fit(train_df[self.feature_cols])
model = Pipeline([('transform', transformer),
('model', RandomForestRegressor(max_depth=10, n_estimators=200))])
model.fit(train_df, sales)
self.base_model = model
def evaluate(self, prepared_data):
val_df = prepared_data['val_df']
y_pred = self.base_model.predict(val_df[self.feature_cols])
y_true = val_df['Sales']
self.metrics = {'mae': sklearn.metrics.mean_absolute_error(y_true, y_pred),
}
def predict(self, input_data):
# repeat this because input_data might not be "prepared" e.g. come from another test file
store_info = pd.read_csv(os.path.join(self.data_dir, "store.csv"))
input_data = input_data.merge(store_info, on="Store")
input_data.fillna(0, inplace=True)
input_data["Date"] = pd.to_datetime(input_data.Date)
input_data["DayOfWeek"] = input_data.Date.dt.dayofweek
input_data["DayOfMonth"] = input_data.Date.dt.day
input_data["Month"] = input_data.Date.dt.month
input_data = input_data[self.feature_cols]
result = self.base_model.predict(input_data)
return result
| 41.725191 | 163 | 0.595134 | 614 | 5,466 | 5.136808 | 0.337134 | 0.045656 | 0.027901 | 0.017755 | 0.125872 | 0.098605 | 0.087191 | 0.075777 | 0.064046 | 0.029803 | 0 | 0.046554 | 0.304427 | 5,466 | 130 | 164 | 42.046154 | 0.783009 | 0.208196 | 0 | 0.024691 | 0 | 0.012346 | 0.178044 | 0.017124 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08642 | false | 0.012346 | 0.160494 | 0 | 0.296296 | 0.049383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
989688c2724f16a6ea2d06900ed3f29271555bc8 | 13,063 | py | Python | ceviche/fdtd.py | kwadwo00/ceviche | a1ee155304c4679b262e4fdf8c8a28bc4d060ec8 | [
"MIT"
] | 111 | 2019-11-14T13:55:15.000Z | 2022-03-29T12:19:01.000Z | ceviche/fdtd.py | kwadwo00/ceviche | a1ee155304c4679b262e4fdf8c8a28bc4d060ec8 | [
"MIT"
] | 13 | 2019-11-22T05:49:07.000Z | 2022-03-20T17:02:59.000Z | ceviche/fdtd.py | kwadwo00/ceviche | a1ee155304c4679b262e4fdf8c8a28bc4d060ec8 | [
"MIT"
] | 42 | 2019-11-13T19:29:06.000Z | 2022-03-19T11:58:09.000Z | import numpy as np
import autograd.numpy as npa
from copy import copy, deepcopy
from .constants import *
from .utils import reshape_to_ND, grid_center_to_xyz, grid_xyz_to_center
from .derivatives import curl_E, curl_H
class fdtd():
def __init__(self, eps_r, dL, npml):
""" Makes an FDTD object
eps_r: the relative permittivity (array > 1)
if eps_r.shape = 3, it holds a single permittivity
if eps_r.shape = 4, the last index is the batch index (running several simulations at once)
dL: the grid size(s) (float/int or list of 3 floats/ints for dx, dy, dz)
npml: the number of PML grids in each dimension (list of 3 ints)
"""
# set the grid shape
eps_r = reshape_to_ND(eps_r, N=3)
self.Nx, self.Ny, self.Nz = self.grid_shape = eps_r.shape
# set the attributes
self.dL = dL
self.npml = npml
self.eps_r = eps_r
def __repr__(self):
return "FDTD(eps_r.shape={}, dL={}, NPML={})".format(self.grid_shape, self.dL, self.npml)
def __str__(self):
return "FDTD object:\n\tdomain size = {}\n\tdL = {}\n\tNPML = {}".format(self.grid_shape, self.dL, self.npml)
@property
def dL(self):
""" Returns the grid size """
return self.__dL
@dL.setter
def dL(self, new_dL):
""" Resets the time step when dL is set. """
self.__dL = new_dL
self._set_time_step()
@property
def npml(self):
""" Returns the pml grid size list """
return self.__npml
@npml.setter
def npml(self, new_npml):
""" Defines some attributes when npml is set. """
self.__npml = new_npml
self._compute_sigmas()
@property
def eps_r(self):
""" Returns the relative permittivity grid """
return self.__eps_r
@eps_r.setter
def eps_r(self, new_eps):
""" Defines some attributes when eps_r is set. """
self.__eps_r = new_eps
self.eps_xx, self.eps_yy, self.eps_zz = grid_center_to_xyz(self.__eps_r)
self.eps_arr = self.__eps_r.flatten()
self.N = self.eps_arr.size
self.grid_shape = self.Nx, self.Ny, self.Nz = self.__eps_r.shape
self._compute_update_parameters()
self.initialize_fields()
def forward(self, Jx=None, Jy=None, Jz=None):
""" one time step of FDTD """
self.t_index += 1
# get curls of E
CEx = curl_E(0, self.Ex, self.Ey, self.Ez, self.dL)
CEy = curl_E(1, self.Ex, self.Ey, self.Ez, self.dL)
CEz = curl_E(2, self.Ex, self.Ey, self.Ez, self.dL)
# update the curl E integrals
self.ICEx = self.ICEx + CEx
self.ICEy = self.ICEy + CEy
self.ICEz = self.ICEz + CEz
# update the H field integrals
self.IHx = self.IHx + self.Hx
self.IHy = self.IHy + self.Hy
self.IHz = self.IHz + self.Hz
# update the H fields
self.Hx = self.mHx1 * self.Hx + self.mHx2 * CEx + self.mHx3 * self.ICEx + self.mHx4 * self.IHx
self.Hy = self.mHy1 * self.Hy + self.mHy2 * CEy + self.mHy3 * self.ICEy + self.mHy4 * self.IHy
self.Hz = self.mHz1 * self.Hz + self.mHz2 * CEz + self.mHz3 * self.ICEz + self.mHz4 * self.IHz
# update fields dict
self.fields['Hx'] = self.Hx
self.fields['Hy'] = self.Hy
self.fields['Hz'] = self.Hz
# get curls of H
CHx = curl_H(0, self.Hx, self.Hy, self.Hz, self.dL)
CHy = curl_H(1, self.Hx, self.Hy, self.Hz, self.dL)
CHz = curl_H(2, self.Hx, self.Hy, self.Hz, self.dL)
# update the curl E integrals
self.ICHx = self.ICHx + CHx
self.ICHy = self.ICHy + CHy
self.ICHz = self.ICHz + CHz
# update the D field integrals
self.IDx = self.IDx + self.Dx
self.IDy = self.IDy + self.Dy
self.IDz = self.IDz + self.Dz
# update the D fields
self.Dx = self.mDx1 * self.Dx + self.mDx2 * CHx + self.mDx3 * self.ICHx + self.mDx4 * self.IDx
self.Dy = self.mDy1 * self.Dy + self.mDy2 * CHy + self.mDy3 * self.ICHy + self.mDy4 * self.IDy
self.Dz = self.mDz1 * self.Dz + self.mDz2 * CHz + self.mDz3 * self.ICHz + self.mDz4 * self.IDz
# add sources to the electric fields
self.Dx += 0 if Jx is None else Jx
self.Dy += 0 if Jy is None else Jy
self.Dz += 0 if Jz is None else Jz
# update field dict
self.fields['Dx'] = self.Dx
self.fields['Dy'] = self.Dy
self.fields['Dz'] = self.Dz
# update the E fields
self.Ex = self.mEx1 * self.Dx
self.Ey = self.mEy1 * self.Dy
self.Ez = self.mEz1 * self.Dz
# update field dict
self.fields['Ex'] = self.Ex
self.fields['Ey'] = self.Ey
self.fields['Ez'] = self.Ez
return self.fields
def initialize_fields(self):
""" Initializes:
- the H, D, and E fields for updating
- the integration terms needed to deal with PML
- the curls of the fields
"""
self.t_index = 0
# magnetic fields
self.Hx = npa.zeros(self.grid_shape)
self.Hy = npa.zeros(self.grid_shape)
self.Hz = npa.zeros(self.grid_shape)
# E field curl integrals
self.ICEx = npa.zeros(self.grid_shape)
self.ICEy = npa.zeros(self.grid_shape)
self.ICEz = npa.zeros(self.grid_shape)
# H field integrals
self.IHx = npa.zeros(self.grid_shape)
self.IHy = npa.zeros(self.grid_shape)
self.IHz = npa.zeros(self.grid_shape)
# E field curls
self.CEx = npa.zeros(self.grid_shape)
self.CEy = npa.zeros(self.grid_shape)
self.CEz = npa.zeros(self.grid_shape)
# H field curl integrals
self.ICHx = npa.zeros(self.grid_shape)
self.ICHy = npa.zeros(self.grid_shape)
self.ICHz = npa.zeros(self.grid_shape)
# D field integrals
self.IDx = npa.zeros(self.grid_shape)
self.IDy = npa.zeros(self.grid_shape)
self.IDz = npa.zeros(self.grid_shape)
# H field curls
self.CHx = npa.zeros(self.grid_shape)
self.CHy = npa.zeros(self.grid_shape)
self.CHz = npa.zeros(self.grid_shape)
# electric displacement fields
self.Dx = npa.zeros(self.grid_shape)
self.Dy = npa.zeros(self.grid_shape)
self.Dz = npa.zeros(self.grid_shape)
# electric fields
self.Ex = npa.zeros(self.grid_shape)
self.Ey = npa.zeros(self.grid_shape)
self.Ez = npa.zeros(self.grid_shape)
# field dictionary to return layer
self.fields = {'Ex': npa.zeros(self.grid_shape),
'Ey': npa.zeros(self.grid_shape),
'Ez': npa.zeros(self.grid_shape),
'Dx': npa.zeros(self.grid_shape),
'Dy': npa.zeros(self.grid_shape),
'Dz': npa.zeros(self.grid_shape),
'Hx': npa.zeros(self.grid_shape),
'Hy': npa.zeros(self.grid_shape),
'Hz': npa.zeros(self.grid_shape)
}
def _set_time_step(self, stability_factor=0.5):
""" Set the time step based on the generalized Courant stability condition
Delta T < 1 / C_0 / sqrt(1 / dx^2 + 1/dy^2 + 1/dz^2)
dt = courant_condition * stability_factor, so stability factor should be < 1
"""
dL_sum = 3 / self.dL ** 2
dL_avg = 1 / npa.sqrt(dL_sum)
courant_stability = dL_avg / C_0
self.dt = courant_stability * stability_factor
def _compute_sigmas(self):
""" Computes sigma tensors for PML """
# initialize sigma matrices on the full 2X grid
grid_shape_2X = (2 * self.Nx, 2 * self.Ny, 2 * self.Nz)
sigx2 = np.zeros(grid_shape_2X)
sigy2 = np.zeros(grid_shape_2X)
sigz2 = np.zeros(grid_shape_2X)
# sigma vector in the X direction
for nx in range(2 * self.npml[0]):
nx1 = 2 * self.npml[0] - nx + 1
nx2 = 2 * self.Nx - 2 * self.npml[0] + nx
sigx2[nx1, :, :] = (0.5 * EPSILON_0 / self.dt) * (nx / 2 / self.npml[0])**3
sigx2[nx2, :, :] = (0.5 * EPSILON_0 / self.dt) * (nx / 2 / self.npml[0])**3
# sigma arrays in the Y direction
for ny in range(2 * self.npml[1]):
ny1 = 2 * self.npml[1] - ny + 1
ny2 = 2 * self.Ny - 2 * self.npml[1] + ny
sigy2[:, ny1, :] = (0.5 * EPSILON_0 / self.dt) * (ny / 2 / self.npml[1])**3
sigy2[:, ny2, :] = (0.5 * EPSILON_0 / self.dt) * (ny / 2 / self.npml[1])**3
# sigma arrays in the Z direction
for nz in range(2 * self.npml[2]):
nz1 = 2 * self.npml[2] - nz + 1
nz2 = 2 * self.Nz - 2 * self.npml[2] + nz
sigz2[:, :, nz1] = (0.5 * EPSILON_0 / self.dt) * (nz / 2 / self.npml[2])**3
sigz2[:, :, nz2] = (0.5 * EPSILON_0 / self.dt) * (nz / 2 / self.npml[2])**3
# # PML tensors for H field
self.sigHx = sigx2[1::2, ::2, ::2]
self.sigHy = sigy2[ ::2, 1::2, ::2]
self.sigHz = sigz2[ ::2, ::2, 1::2]
# # PML tensors for D field
self.sigDx = sigx2[ ::2, 1::2, 1::2]
self.sigDy = sigy2[1::2, ::2, 1::2]
self.sigDz = sigz2[1::2, 1::2, ::2]
def _compute_update_parameters(self, mu_r=1.0):
""" Computes update coefficients based on values computed earlier.
For more details, see http://emlab.utep.edu/ee5390fdtd/Lecture%2014%20--%203D%20Update%20Equations%20with%20PML.pdf
NOTE: relative permeability set = 1 for now
"""
# H field update coefficients
self.mHx0 = (1 / self.dt + (self.sigHy + self.sigHz) / 2 / EPSILON_0 + self.sigHy * self.sigHz * self.dt / 4 / EPSILON_0**2)
self.mHy0 = (1 / self.dt + (self.sigHx + self.sigHz) / 2 / EPSILON_0 + self.sigHx * self.sigHz * self.dt / 4 / EPSILON_0**2)
self.mHz0 = (1 / self.dt + (self.sigHx + self.sigHy) / 2 / EPSILON_0 + self.sigHx * self.sigHy * self.dt / 4 / EPSILON_0**2)
self.mHx1 = (1 / self.mHx0 * (1/self.dt - (self.sigHy + self.sigHz) / 2 / EPSILON_0 - self.sigHy * self.sigHz * self.dt / 4 / EPSILON_0**2))
self.mHy1 = (1 / self.mHy0 * (1/self.dt - (self.sigHx + self.sigHz) / 2 / EPSILON_0 - self.sigHx * self.sigHz * self.dt / 4 / EPSILON_0**2))
self.mHz1 = (1 / self.mHz0 * (1/self.dt - (self.sigHx + self.sigHy) / 2 / EPSILON_0 - self.sigHx * self.sigHy * self.dt / 4 / EPSILON_0**2))
self.mHx2 = (-1 / self.mHx0 * C_0 / mu_r)
self.mHy2 = (-1 / self.mHy0 * C_0 / mu_r)
self.mHz2 = (-1 / self.mHz0 * C_0 / mu_r)
self.mHx3 = (-1 / self.mHx0 * C_0 * self.dt * self.sigHx / EPSILON_0 / mu_r)
self.mHy3 = (-1 / self.mHy0 * C_0 * self.dt * self.sigHy / EPSILON_0 / mu_r)
self.mHz3 = (-1 / self.mHz0 * C_0 * self.dt * self.sigHz / EPSILON_0 / mu_r)
self.mHx4 = (-1 / self.mHx0 * self.dt * self.sigHy * self.sigHz / EPSILON_0**2)
self.mHy4 = (-1 / self.mHy0 * self.dt * self.sigHx * self.sigHz / EPSILON_0**2)
self.mHz4 = (-1 / self.mHz0 * self.dt * self.sigHx * self.sigHy / EPSILON_0**2)
# D field update coefficients
self.mDx0 = (1 / self.dt + (self.sigDy + self.sigDz) / 2 / EPSILON_0 + self.sigDy * self.sigDz * self.dt / 4 / EPSILON_0**2)
self.mDy0 = (1 / self.dt + (self.sigDx + self.sigDz) / 2 / EPSILON_0 + self.sigDx * self.sigDz * self.dt / 4 / EPSILON_0**2)
self.mDz0 = (1 / self.dt + (self.sigDx + self.sigDy) / 2 / EPSILON_0 + self.sigDx * self.sigDy * self.dt / 4 / EPSILON_0**2)
self.mDx1 = (1 / self.mDx0 * (1/self.dt - (self.sigDy + self.sigDz) / 2 / EPSILON_0 - self.sigDy * self.sigDz * self.dt / 4 / EPSILON_0**2))
self.mDy1 = (1 / self.mDy0 * (1/self.dt - (self.sigDx + self.sigDz) / 2 / EPSILON_0 - self.sigDx * self.sigDz * self.dt / 4 / EPSILON_0**2))
self.mDz1 = (1 / self.mDz0 * (1/self.dt - (self.sigDx + self.sigDy) / 2 / EPSILON_0 - self.sigDx * self.sigDy * self.dt / 4 / EPSILON_0**2))
self.mDx2 = (1 / self.mDx0 * C_0)
self.mDy2 = (1 / self.mDy0 * C_0)
self.mDz2 = (1 / self.mDz0 * C_0)
self.mDx3 = (1 / self.mDx0 * C_0 * self.dt * self.sigDx / EPSILON_0)
self.mDy3 = (1 / self.mDy0 * C_0 * self.dt * self.sigDy / EPSILON_0)
self.mDz3 = (1 / self.mDz0 * C_0 * self.dt * self.sigDz / EPSILON_0)
self.mDx4 = (-1 / self.mDx0 * self.dt * self.sigDy * self.sigDz / EPSILON_0**2)
self.mDy4 = (-1 / self.mDy0 * self.dt * self.sigDx * self.sigDz / EPSILON_0**2)
self.mDz4 = (-1 / self.mDz0 * self.dt * self.sigDx * self.sigDy / EPSILON_0**2)
# D -> E update coefficients
self.mEx1 = (1 / self.eps_xx)
self.mEy1 = (1 / self.eps_yy)
self.mEz1 = (1 / self.eps_zz)
| 41.208202 | 148 | 0.559672 | 1,976 | 13,063 | 3.586032 | 0.135628 | 0.057155 | 0.073384 | 0.081287 | 0.451454 | 0.367062 | 0.220294 | 0.195032 | 0.157776 | 0.157776 | 0 | 0.042901 | 0.302304 | 13,063 | 316 | 149 | 41.338608 | 0.734584 | 0.152339 | 0 | 0.015873 | 0 | 0 | 0.011915 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.031746 | 0.010582 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9897920e4b31c1105fd21257030e059f22b924ba | 5,289 | py | Python | examples/probabilistic_keypoint_estimation/processors.py | niqbal996/paz | f27205907367415d5b21f90e1a1d1d1ce598e889 | [
"MIT"
] | 300 | 2020-10-29T08:02:05.000Z | 2022-03-30T21:47:32.000Z | examples/probabilistic_keypoint_estimation/processors.py | albertofernandezvillan/paz | 9fbd50b993f37e1e807297a29c6044c09967c9cc | [
"MIT"
] | 30 | 2020-10-29T12:40:32.000Z | 2022-03-31T14:06:35.000Z | examples/probabilistic_keypoint_estimation/processors.py | albertofernandezvillan/paz | 9fbd50b993f37e1e807297a29c6044c09967c9cc | [
"MIT"
] | 62 | 2020-10-29T12:34:13.000Z | 2022-03-29T05:21:45.000Z | from paz.backend.image import draw_circle
from paz.backend.image.draw import GREEN
from paz.backend.image import resize_image
from paz import processors as pr
from paz.abstract import Processor
import numpy as np
from paz.backend.image import lincolor
import seaborn as sns
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
class PartitionKeypoints(Processor):
"""Partitions keypoints from shape ''[num_keypoints, 2]'' into a list of
the form ''[(2, 1), (2, 1), ....]'' and length equal to the number of
of_keypoints.
"""
def __init__(self):
super(PartitionKeypoints, self).__init__()
def call(self, keypoints):
keypoints = np.vsplit(keypoints, len(keypoints))
keypoints = [np.squeeze(keypoint) for keypoint in keypoints]
return (*keypoints, )
class ToNumpyArray(Processor):
def __init__(self):
super(ToNumpyArray, self).__init__()
def call(self, predictions):
return np.array(predictions)
class PredictDistributions(Processor):
def __init__(self, model, preprocess=None):
super(PredictDistributions, self).__init__()
self.model = model
self.preprocess = preprocess
def call(self, x):
if self.preprocess is not None:
x = self.preprocess(x)
distributions = self.model(x)
return distributions
class ComputeMeans(Processor):
def __init__(self):
super(ComputeMeans, self).__init__()
def call(self, distributions):
keypoints = np.zeros((len(distributions), 2))
for arg, distribution in enumerate(distributions):
keypoints[arg] = distribution.mean()
return keypoints
class ToProbabilityGrid(Processor):
def __init__(self, grid):
self.grid = grid
def call(self, distribution):
probability = distribution.prob(self.grid).numpy()[::-1, :]
return probability
def build_figure():
figure = Figure()
canvas = FigureCanvas(figure)
axis = figure.gca()
axis.axis('off')
figure.tight_layout(pad=0)
axis.margins(0)
# figure.canvas.draw()
return figure, axis, canvas
def to_pixels(figure):
figure.canvas.draw()
image = np.frombuffer(figure.canvas.tostring_rgb(), dtype=np.uint8)
image = image.reshape(figure.canvas.get_width_height()[::-1] + (3,))
return image
def interpolate_probability(probability, shape):
normalization_constant = np.max(probability)
probability = probability / normalization_constant
probability = probability * 255.0
probability = probability.astype('uint8')
probability = resize_image(probability, shape)
probability = probability / 255.0
probability = probability * normalization_constant
return probability
class DrawProbabilities(Processor):
def __init__(self, num_keypoints, normalized=True):
self.colors = lincolor(num_keypoints, normalized=normalized)
self.figure, self.axis, self.canvas = build_figure()
# self._figure, self._axis, self._canvas = build_figure()
def call(self, image, probabilities):
for color, probability in zip(self.colors, probabilities):
cmap = sns.light_palette(color, input='hsl', as_cmap=True)
probability = interpolate_probability(probability, image.shape[:2])
self.axis.contour(probability, cmap=cmap, levels=np.arange(1, 50, 3))
self.axis.imshow(image)
contour = to_pixels(self.figure)
# contour = resize_image(contour, (image.shape[:2]))
# self._axis.imshow(image)
# self._axis.imshow(contour)
# new_image = to_pixels(self._figure)
return contour
class PredictMeanDistribution(Processor):
def __init__(self, model, preprocess=None):
super(PredictMeanDistribution, self).__init__()
print('Building graph...')
self.num_keypoints = len(model.output_shape)
# self.model = tf.function(model.mean)
self.model = model
self.preprocess = preprocess
def call(self, x):
if self.preprocess is not None:
x = self.preprocess(x)
distributions = self.model(x)
keypoints = np.zeros((self.num_keypoints, 2))
for arg, distribution in enumerate(distributions):
keypoints[arg] = distribution.mean()
return keypoints
def draw_circles(image, points, color=GREEN, radius=3):
for point in points:
draw_circle(image, point, color, radius)
return image
if __name__ == '__main__':
from facial_keypoints import FacialKeypoints
from paz.backend.image import show_image
from paz.abstract import SequentialProcessor
data_manager = FacialKeypoints('dataset/', 'train')
datasets = data_manager.load_data()
augment_keypoints = SequentialProcessor()
augment_keypoints.add(pr.RandomKeypointRotation())
augment_keypoints.add(pr.RandomKeypointTranslation())
for arg in range(100):
original_image = datasets[0]['image'].copy()
kp = datasets[0]['keypoints'].copy()
original_image, kp = augment_keypoints(original_image, kp)
original_image = draw_circles(original_image, kp.astype('int'))
show_image(original_image.astype('uint8'))
| 33.474684 | 81 | 0.682927 | 605 | 5,289 | 5.786777 | 0.257851 | 0.015995 | 0.021994 | 0.034276 | 0.259069 | 0.189089 | 0.161668 | 0.161668 | 0.114253 | 0.114253 | 0 | 0.008389 | 0.211193 | 5,289 | 157 | 82 | 33.687898 | 0.830777 | 0.076952 | 0 | 0.236842 | 0 | 0 | 0.014642 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.114035 | 0.008772 | 0.429825 | 0.008772 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
989dc6b7e06440eaa1baf455b551c9b102c10503 | 2,993 | py | Python | scripts/blueprint_graph_queries.py | azdolinski/apstra-api-python | 2380bcdbd5be31e552d9259592249b13fa432286 | [
"Apache-2.0"
] | 1 | 2022-03-23T22:16:15.000Z | 2022-03-23T22:16:15.000Z | scripts/blueprint_graph_queries.py | azdolinski/apstra-api-python | 2380bcdbd5be31e552d9259592249b13fa432286 | [
"Apache-2.0"
] | 4 | 2022-03-26T15:12:50.000Z | 2022-03-31T07:31:53.000Z | scripts/blueprint_graph_queries.py | azdolinski/apstra-api-python | 2380bcdbd5be31e552d9259592249b13fa432286 | [
"Apache-2.0"
] | 2 | 2022-03-26T00:04:42.000Z | 2022-03-26T14:23:20.000Z | # Copyright 2020-present, Apstra, Inc. All rights reserved.
#
# This source code is licensed under End User License Agreement found in the
# LICENSE file at http://www.apstra.com/eula
from aos.client import AosClient
from scripts.utils import deserialize_fixture
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# You will need to update the connection details below with your
# specific AOS instance
AOS_IP = "<aos-IP>"
AOS_PORT = 443
AOS_USER = "admin"
AOS_PW = "aos-aos"
# Login
aos = AosClient(protocol="https", host=AOS_IP, port=AOS_PORT)
aos.auth.login(AOS_USER, AOS_PW)
# Find Blueprint by Name
bp_name = "apstra-pod1"
bp = aos.blueprint.get_id_by_name(label=bp_name)
# QE Queries
# Return all fabric switches (nodes). Notice the use of "is_in" with role
# to filter the query.
switch_query = (
"match(node('system', name='switches', "
"role=is_in(['spine', 'leaf', 'superspine'])))"
)
resp = aos.blueprint.qe_query(bp.id, query=switch_query)
# aos.blueprint.get_all_tor_nodes() uses two queries to return all top of
# rack (ToR) nodes and their properties. It calls two methods to do this.
#
# aos.blueprint.get_bp_system_leaf_nodes()
# Return all nodes of type system with a role of 'leaf'.
leaf_query = "match(node('system', name='leaf', role='leaf'))"
resp = aos.blueprint.qe_query(bp.id, query=leaf_query)
# aos.blueprint.get_bp_system_redundancy_group() returns the
# redundancy_group details a given system is a member of
system_id = 'foo'
rg_query = (
"match(node('redundancy_group', name='rg')"
".out('composed_of_systems')"
".node('system', role='leaf',"
f" id='{system_id}'))"
)
resp = aos.blueprint.qe_query(bp.id, query=rg_query)
# Query the Blueprint for all fabric links between leafs and spines
link_query = (
"match(node('system', role='leaf', name='system')"
".out('hosted_interfaces')"
".node('interface', name='iface').out('link')"
".node('link', role='spine_leaf'))"
),
resp = aos.blueprint.qe_query(bp.id, query=link_query)
# Query the Blueprint for all links in the fabric belonging to a specific
# routing-zone (VRF). We are using routing-zone 'blue' in this example.
link_query = (
"match(node('system', role='spine', deploy_mode='deploy')"
".out('hosted_interfaces')"
".node('interface', name='leaf_intf')"
".out('link')"
".node('link', role='spine_leaf')"
".in_('link')"
".node('interface')"
".in_('hosted_interfaces')"
".node('system', role='leaf'),"
"node(name='leaf_intf')"
".in_('member_interfaces')"
".node('sz_instance')"
".in_('instantiated_by')"
".node('security_zone', vrf_name='blue')"
)
resp = aos.blueprint.qe_query(bp.id, query=link_query)
# QL Queries | 33.629213 | 76 | 0.642833 | 406 | 2,993 | 4.576355 | 0.352217 | 0.058127 | 0.037675 | 0.048439 | 0.275027 | 0.224435 | 0.128095 | 0.100108 | 0.065662 | 0.044133 | 0 | 0.004685 | 0.215503 | 2,993 | 89 | 77 | 33.629213 | 0.786627 | 0.322753 | 0 | 0.117647 | 0 | 0 | 0.402993 | 0.134663 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.058824 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98a08f9c2482c0d99eb42cf5f396b2658fb698d8 | 490 | py | Python | grade.py | pooja160699/fullstackprograms | 60db5db8e49ba99a106188c6771a8163be3d18be | [
"bzip2-1.0.6"
] | null | null | null | grade.py | pooja160699/fullstackprograms | 60db5db8e49ba99a106188c6771a8163be3d18be | [
"bzip2-1.0.6"
] | null | null | null | grade.py | pooja160699/fullstackprograms | 60db5db8e49ba99a106188c6771a8163be3d18be | [
"bzip2-1.0.6"
] | null | null | null | #grade
def grade(a):
total=0
for i in a:
total=total+i
print(total)
per=float(total/5)
print(per)
if per>80:
print("a")
elif per>70:
print("b")
elif per>60:
print("c")
elif per>50:
print("d")
elif per>40:
print("e")
else:
print("fail")
print("enter marks of 5 subjects")
a=[]
for i in range(0,5):
inp=input()
a.append(inp)
print(a)
grade(a)
| 14 | 35 | 0.461224 | 71 | 490 | 3.183099 | 0.464789 | 0.123894 | 0.053097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.050336 | 0.391837 | 490 | 34 | 36 | 14.411765 | 0.708054 | 0.010204 | 0 | 0 | 0 | 0 | 0.075556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0 | 0 | 0.038462 | 0.384615 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98a54a0f98ef2d7111fbff0beed3ba90e0411134 | 1,233 | py | Python | tasks/config.py | laruja/f-show | 0ef15d25d230ffeb20e70fae5b4769daa9a10828 | [
"MIT"
] | 4 | 2021-07-27T09:14:44.000Z | 2021-09-08T02:14:31.000Z | tasks/config.py | laruja/f-show | 0ef15d25d230ffeb20e70fae5b4769daa9a10828 | [
"MIT"
] | null | null | null | tasks/config.py | laruja/f-show | 0ef15d25d230ffeb20e70fae5b4769daa9a10828 | [
"MIT"
] | null | null | null | import util
# 日志目录
global logfileDir
logfileDir = './log'
# 数据库配置
global db
db = {
'host': '127.0.0.1',
'port': 27017
}
global daliy
daliy = 'daliy'
global daliyxxx
daliyxxx = 'daliy'+util.yesterday().strftime('%Y%m%d') # daliy20210622
global history
history = 'histest'
global fund
fund = 'fund'
# 基金类型字典
global fundType1
global fundType2
global head1
global head2
fundType1 = {'股票型': '6020-6010', '混合型': '6020-6040',
'债券型': '6020-6030', 'QDII型': '6020-6050'}
fundType2 = {'货币型': '6020-6020', '短期理财债券型': '6020-6060'}
# 基金代码 分级代码 基金简称 份额净值 累计净值 基金资产净值 估值日期 备注
head1 = ['code', 'subcode', 'shortName', 'shareNetValue',
'totalNetValue', 'zcNetValue', 'valuationDate']
# 基金代码 分级代码 基金简称 每万份基金已实现收益 7日年化收益率百分比 基金份额净值 基金累计净值 基金资产净值 估值日期 备注
head2 = ['code', 'subcode', 'shortName', 'gainPer', 'yearSevenDayYieldRatePercent',
'shareNetValue', 'totalNetValue', 'zcNetValue', 'valuationDate']
# db中基金类型定义
global fundTypeDB
fundTypeDB = [
{'code': '6020-6010', 'name': '股票型'},
{'code': '6020-6040', 'name': '混合型'},
{'code': '6020-6030', 'name': '债券型'},
{'code': '6020-6050', 'name': 'QDII型'},
{'code': '6020-6020', 'name': '货币型'},
{'code': '6020-6060', 'name': '短期理财债券型'},
]
| 26.804348 | 84 | 0.633414 | 140 | 1,233 | 5.578571 | 0.471429 | 0.06146 | 0.03073 | 0.12548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121212 | 0.170316 | 1,233 | 45 | 85 | 27.4 | 0.642229 | 0.119221 | 0 | 0 | 0 | 0 | 0.395176 | 0.025974 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.027778 | 0 | 0.027778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98a5c9401ffa5bdac95b62746713c3eae81af041 | 6,172 | py | Python | mindquantum/algorithm/nisq/chem/unitary_cc.py | Takishima/mindquantum | e90dfe474b759023d7ae18281b9a87cb8d223d04 | [
"Apache-2.0"
] | null | null | null | mindquantum/algorithm/nisq/chem/unitary_cc.py | Takishima/mindquantum | e90dfe474b759023d7ae18281b9a87cb8d223d04 | [
"Apache-2.0"
] | null | null | null | mindquantum/algorithm/nisq/chem/unitary_cc.py | Takishima/mindquantum | e90dfe474b759023d7ae18281b9a87cb8d223d04 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Unitary coupled-cluster ansatz."""
from mindquantum.core.circuit import Circuit
from mindquantum.core.circuit.utils import add_prefix
from mindquantum.core.operators import TimeEvolution
from .._ansatz import Ansatz
from .transform import Transform
from .uccsd0 import uccsd0_singlet_generator
def _check_int_list(input_list, name):
if not isinstance(input_list, list):
raise ValueError(
"The input {} should be a list, \
but get {}.".format(
str(name), type(input_list)
)
)
for i in input_list:
if not isinstance(i, int):
raise ValueError(
"The indices of {} should be integer, \
but get {}.".format(
str(name), type(i)
)
)
class UCCAnsatz(Ansatz):
r"""
The unitary coupled-cluster ansatz for molecular simulations.
.. math::
U(\vec{\theta}) = \prod_{j=1}^{N(N\ge1)}{\prod_{i=0}^{N_{j}}{\exp{(\theta_{i}\hat{\tau}_{i})}}}
where :math:`\hat{\tau}` are anti-Hermitian operators.
Note:
Currently, the circuit is construncted using JW transformation.
In addition, the reference state wave function (Hartree-Fock) will NOT be
included.
Args:
n_qubits(int): Number of qubits (spin-orbitals). Default: None.
n_electrons(int): Number of electrons (occupied spin-orbitals). Default: None.
occ_orb(list): Indices of manually assigned occupied spatial
orbitals, for ansatz construction only. Default: None.
vir_orb(list): Indices of manually assigned virtual spatial
orbitals, for ansatz construction only. Default: None.
generalized(bool): Whether to use generalized excitations which
do not distinguish occupied or virtual orbitals (UCCGSD). Default: False.
trotter_step(int): The order of Trotterization step. Default: 1.
Examples:
>>> from mindquantum.algorithm.nisq.chem import UCCAnsatz
>>> ucc = UCCAnsatz(12, 4, occ_orb=[1],
... vir_orb=[2, 3],
... generalized=True,
... trotter_step=2)
>>> circuit = ucc.circuit.remove_barrier()
>>> len(circuit)
3624
>>> params_list = ucc.circuit.params_name
>>> len(params_list)
48
>>> circuit[-10:]
q5: ──●────RX(7π/2)───────H───────●────────────────────────────●───────H──────
│ │ │
q7: ──X───────H────────RX(π/2)────X────RZ(-0.5*t_1_d0_d_17)────X────RX(7π/2)──
"""
def __init__(self, n_qubits=None, n_electrons=None, occ_orb=None, vir_orb=None, generalized=False, trotter_step=1):
"""Initialize a UCCAnsatz object."""
if n_qubits is not None and not isinstance(n_qubits, int):
raise ValueError(
"The number of qubits should be integer, \
but get {}.".format(
type(n_qubits)
)
)
if n_electrons is not None and not isinstance(n_electrons, int):
raise ValueError(
"The number of electrons should be integer, \
but get {}.".format(
type(n_electrons)
)
)
if isinstance(n_electrons, int) and n_electrons > n_qubits:
raise ValueError(
"The number of electrons must be smaller than \
the number of qubits (spin-orbitals) in the ansatz!"
)
if occ_orb is not None:
_check_int_list(occ_orb, "occupied orbitals")
if vir_orb is not None:
_check_int_list(vir_orb, "virtual orbitals")
if not isinstance(generalized, bool):
raise ValueError(
"The parameter generalized should be bool, \
but get {}.".format(
type(generalized)
)
)
if not isinstance(trotter_step, int) or trotter_step < 1:
raise ValueError("Trotter step must be a positive integer!")
super().__init__("Unitary CC", n_qubits, n_qubits, n_electrons, occ_orb, vir_orb, generalized, trotter_step)
def _implement(self, n_qubits, n_electrons, occ_orb=None, vir_orb=None, generalized=False, trotter_step=1):
"""Implement the UCC ansatz using uccsd0."""
ansatz_circuit = Circuit()
for trotter_idx in range(trotter_step):
uccsd0_fermion_op = uccsd0_singlet_generator(n_qubits, n_electrons, True, occ_orb, vir_orb, generalized)
uccsd0_circuit = TimeEvolution(Transform(uccsd0_fermion_op).jordan_wigner().imag, 1).circuit
# Modify parameter names
uccsd0_circuit_modified = add_prefix(uccsd0_circuit, "t_" + str(trotter_idx))
ansatz_circuit += uccsd0_circuit_modified
n_qubits_circuit = 0
if list(ansatz_circuit):
n_qubits_circuit = ansatz_circuit.n_qubits
# If the ansatz's n_qubits is not set by user, use n_qubits_circuit.
if self.n_qubits is None:
self.n_qubits = n_qubits_circuit
if self.n_qubits < n_qubits_circuit:
raise ValueError(
"The number of qubits in the ansatz circuit {} is larger than \
the input n_qubits {}! Please check input parameters such as occ_orb, etc.".format(
n_qubits_circuit, n_qubits
)
)
self._circuit = ansatz_circuit
| 41.422819 | 119 | 0.599968 | 769 | 6,172 | 4.784135 | 0.312094 | 0.043762 | 0.034248 | 0.026094 | 0.227779 | 0.202772 | 0.113074 | 0.071215 | 0.026094 | 0.026094 | 0 | 0.012655 | 0.283053 | 6,172 | 148 | 120 | 41.702703 | 0.795706 | 0.410402 | 0 | 0.153846 | 0 | 0 | 0.024566 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.076923 | 0 | 0.128205 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98a5de6ff37a8157fd5acdb6ec1f751faeddd147 | 6,506 | py | Python | Tax-Calculator-3.0.0/taxcalc/policy.py | grantseiter/Tax-Benefits-Of-Parenthood | 5350e832e8b877b46c2a3cab070fc8262b914a52 | [
"MIT"
] | null | null | null | Tax-Calculator-3.0.0/taxcalc/policy.py | grantseiter/Tax-Benefits-Of-Parenthood | 5350e832e8b877b46c2a3cab070fc8262b914a52 | [
"MIT"
] | null | null | null | Tax-Calculator-3.0.0/taxcalc/policy.py | grantseiter/Tax-Benefits-Of-Parenthood | 5350e832e8b877b46c2a3cab070fc8262b914a52 | [
"MIT"
] | null | null | null | """
Tax-Calculator federal tax policy Policy class.
"""
# CODING-STYLE CHECKS:
# pycodestyle policy.py
# pylint --disable=locally-disabled policy.py
import os
import json
import numpy as np
from taxcalc.parameters import Parameters
from taxcalc.growfactors import GrowFactors
class Policy(Parameters):
"""
Policy is a subclass of the abstract Parameters class, and
therefore, inherits its methods (none of which are shown here).
Constructor for the federal tax policy class.
Parameters
----------
gfactors: GrowFactors class instance
containing price inflation rates and wage growth rates
Raises
------
ValueError:
if gfactors is not a GrowFactors class instance or None.
Returns
-------
class instance: Policy
"""
DEFAULTS_FILE_NAME = 'policy_current_law.json'
DEFAULTS_FILE_PATH = os.path.abspath(os.path.dirname(__file__))
JSON_START_YEAR = 2013 # remains the same unless earlier data added
LAST_KNOWN_YEAR = 2019 # last year for which indexed param vals are known
# should increase LAST_KNOWN_YEAR by one every calendar year
LAST_BUDGET_YEAR = 2030 # last extrapolation year
# should increase LAST_BUDGET_YEAR by one every calendar year
DEFAULT_NUM_YEARS = LAST_BUDGET_YEAR - JSON_START_YEAR + 1
# NOTE: the following three data structures use internal parameter names:
# (1) specify which Policy parameters have been removed or renamed
REMOVED_PARAMS = {
# following five parameters removed in PR 2223 merged on 2019-02-06
'DependentCredit_Child_c': 'is a removed parameter name',
'DependentCredit_Nonchild_c': 'is a removed parameter name',
'DependentCredit_before_CTC': 'is a removed parameter name',
'FilerCredit_c': 'is a removed parameter name',
'ALD_InvInc_ec_base_RyanBrady': 'is a removed parameter name',
# TODO: following parameter renamed in PR 2292 merged on 2019-04-15
"cpi_offset": (
"was renamed parameter_indexing_CPI_offset. "
"See documentation for change in usage."
),
"CPI_offset": (
"was renamed parameter_indexing_CPI_offset. "
"See documentation for change in usage."
),
# TODO: following parameters renamed in PR 2345 merged on 2019-06-24
'PT_excl_rt':
'was renamed PT_qbid_rt in release 2.4.0',
'PT_excl_wagelim_thd':
'was renamed PT_qbid_taxinc_thd in release 2.4.0',
'PT_excl_wagelim_prt':
'was renamed PT_qbid_taxinc_gap in release 2.4.0',
'PT_excl_wagelim_rt':
'was renamed PT_qbid_w2_wages_rt in release 2.4.0',
'CTC_c_under5_bonus': 'was renamed CTC_c_under6_bonus.',
'ACTC_rt_bonus_under5family':
'was renamed ACTC_rt_bonus_under6family.',
'CTC_new_c_under5_bonus': 'was renamed CTC_new_c_under6_bonus.'
}
# (2) specify which Policy parameters have been redefined recently
REDEFINED_PARAMS = {}
# (3) specify which Policy parameters are wage (rather than price) indexed
WAGE_INDEXED_PARAMS = ['SS_Earnings_c', 'SS_Earnings_thd']
def __init__(self, gfactors=None, only_reading_defaults=False, **kwargs):
# put JSON contents of DEFAULTS_FILE_NAME into self._vals dictionary
super().__init__()
# handle gfactors argument
if gfactors is None:
self._gfactors = GrowFactors()
elif isinstance(gfactors, GrowFactors):
self._gfactors = gfactors
else:
raise ValueError('gfactors is not None or a GrowFactors instance')
# read default parameters and initialize
syr = Policy.JSON_START_YEAR
lyr = Policy.LAST_BUDGET_YEAR
nyrs = Policy.DEFAULT_NUM_YEARS
self._inflation_rates = None
self._wage_growth_rates = None
self.initialize(syr, nyrs, Policy.LAST_KNOWN_YEAR,
Policy.REMOVED_PARAMS,
Policy.REDEFINED_PARAMS,
Policy.WAGE_INDEXED_PARAMS, **kwargs)
@staticmethod
def read_json_reform(obj):
"""
Return a reform dictionary suitable for use with implement_reform
method generated from the specified JSON object, which can be None or
a string containing a local filename, a URL beginning with 'http'
pointing to a valid JSON file hosted online, or a valid JSON text.
"""
return Parameters._read_json_revision(obj, 'policy')
def implement_reform(self, reform,
print_warnings=True, raise_errors=True):
"""
Implement reform using Tax-Calculator syled reforms/adjustments. Users
may also use the adjust method with ParamTools styled reforms.
"""
# need to do conversion:
return self._update(reform, print_warnings, raise_errors)
@staticmethod
def parameter_list():
"""
Returns list of parameter names in the policy_current_law.json file.
"""
path = os.path.join(
Policy.DEFAULTS_FILE_PATH,
Policy.DEFAULTS_FILE_NAME
)
with open(path) as f:
defaults = json.loads(f.read()) # pylint: disable=protected-access
return [k for k in defaults if k != "schema"]
def set_rates(self):
"""Initialize taxcalc indexing data."""
cpi_vals = [
vo["value"] for
vo in self._data["parameter_indexing_CPI_offset"]["value"]
]
# extend parameter_indexing_CPI_offset values through budget window
# if they have not been extended already.
cpi_vals = cpi_vals + cpi_vals[-1:] * (
self.end_year - self.start_year + 1 - len(cpi_vals)
)
cpi_offset = {
(self.start_year + ix): val
for ix, val in enumerate(cpi_vals)
}
self._gfactors = GrowFactors()
self._inflation_rates = [
np.round(rate + cpi_offset[self.start_year + ix], 4)
for ix, rate in enumerate(
self._gfactors.price_inflation_rates(
self.start_year, self.end_year
)
)
]
self._wage_growth_rates = self._gfactors.wage_growth_rates(
self.start_year, self.end_year
)
| 39.192771 | 80 | 0.632339 | 790 | 6,506 | 4.975949 | 0.331646 | 0.022895 | 0.012719 | 0.024167 | 0.191554 | 0.159501 | 0.092852 | 0.058255 | 0.039176 | 0.039176 | 0 | 0.01614 | 0.295266 | 6,506 | 165 | 81 | 39.430303 | 0.841221 | 0.316631 | 0 | 0.123711 | 0 | 0 | 0.244913 | 0.076734 | 0 | 0 | 0 | 0.006061 | 0 | 1 | 0.051546 | false | 0 | 0.051546 | 0 | 0.237113 | 0.020619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98a60c53249410258e27e16f17c6bf4773ac5726 | 2,550 | py | Python | tests/test_ops/test_merge_cells.py | BIGWangYuDong/mmcv | c46deb0576edaff5cd5a7d384c617478c7a73a70 | [
"Apache-2.0"
] | 1 | 2022-03-18T02:41:11.000Z | 2022-03-18T02:41:11.000Z | tests/test_ops/test_merge_cells.py | BIGWangYuDong/mmcv | c46deb0576edaff5cd5a7d384c617478c7a73a70 | [
"Apache-2.0"
] | null | null | null | tests/test_ops/test_merge_cells.py | BIGWangYuDong/mmcv | c46deb0576edaff5cd5a7d384c617478c7a73a70 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
"""
CommandLine:
pytest tests/test_merge_cells.py
"""
import torch
import torch.nn.functional as F
from mmcv.ops.merge_cells import (BaseMergeCell, ConcatCell, GlobalPoolingCell,
SumCell)
def test_sum_cell():
inputs_x = torch.randn([2, 256, 32, 32])
inputs_y = torch.randn([2, 256, 16, 16])
sum_cell = SumCell(256, 256)
output = sum_cell(inputs_x, inputs_y, out_size=inputs_x.shape[-2:])
assert output.size() == inputs_x.size()
output = sum_cell(inputs_x, inputs_y, out_size=inputs_y.shape[-2:])
assert output.size() == inputs_y.size()
output = sum_cell(inputs_x, inputs_y)
assert output.size() == inputs_x.size()
def test_concat_cell():
inputs_x = torch.randn([2, 256, 32, 32])
inputs_y = torch.randn([2, 256, 16, 16])
concat_cell = ConcatCell(256, 256)
output = concat_cell(inputs_x, inputs_y, out_size=inputs_x.shape[-2:])
assert output.size() == inputs_x.size()
output = concat_cell(inputs_x, inputs_y, out_size=inputs_y.shape[-2:])
assert output.size() == inputs_y.size()
output = concat_cell(inputs_x, inputs_y)
assert output.size() == inputs_x.size()
def test_global_pool_cell():
inputs_x = torch.randn([2, 256, 32, 32])
inputs_y = torch.randn([2, 256, 32, 32])
gp_cell = GlobalPoolingCell(with_out_conv=False)
gp_cell_out = gp_cell(inputs_x, inputs_y, out_size=inputs_x.shape[-2:])
assert (gp_cell_out.size() == inputs_x.size())
gp_cell = GlobalPoolingCell(256, 256)
gp_cell_out = gp_cell(inputs_x, inputs_y, out_size=inputs_x.shape[-2:])
assert (gp_cell_out.size() == inputs_x.size())
def test_resize_methods():
inputs_x = torch.randn([2, 256, 128, 128])
target_resize_sizes = [(128, 128), (256, 256)]
resize_methods_list = ['nearest', 'bilinear']
for method in resize_methods_list:
merge_cell = BaseMergeCell(upsample_mode=method)
for target_size in target_resize_sizes:
merge_cell_out = merge_cell._resize(inputs_x, target_size)
gt_out = F.interpolate(inputs_x, size=target_size, mode=method)
assert merge_cell_out.equal(gt_out)
target_size = (64, 64) # resize to a smaller size
merge_cell = BaseMergeCell()
merge_cell_out = merge_cell._resize(inputs_x, target_size)
kernel_size = inputs_x.shape[-1] // target_size[-1]
gt_out = F.max_pool2d(
inputs_x, kernel_size=kernel_size, stride=kernel_size)
assert (merge_cell_out == gt_out).all()
| 38.059701 | 79 | 0.681569 | 385 | 2,550 | 4.202597 | 0.192208 | 0.116811 | 0.074784 | 0.084054 | 0.533375 | 0.533375 | 0.513597 | 0.513597 | 0.508035 | 0.508035 | 0 | 0.048745 | 0.187451 | 2,550 | 66 | 80 | 38.636364 | 0.732143 | 0.047451 | 0 | 0.34 | 0 | 0 | 0.006198 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.08 | false | 0 | 0.06 | 0 | 0.14 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98a754ea36c6f75e3a944d4f376a736140a55916 | 6,337 | py | Python | bot.py | der-test/3dp_helpbot | db379d7738231d0b1879f1ef8a70f7fdfc6b2d47 | [
"MIT"
] | null | null | null | bot.py | der-test/3dp_helpbot | db379d7738231d0b1879f1ef8a70f7fdfc6b2d47 | [
"MIT"
] | null | null | null | bot.py | der-test/3dp_helpbot | db379d7738231d0b1879f1ef8a70f7fdfc6b2d47 | [
"MIT"
] | null | null | null | """
/r/3DPrinting Help Bot V2
Tries to help users by replying to them with helpful links
:author: Tim Estermann, @der-test
:original author: Connor Henley, @thatging3rkid
"""
import os
import sys
import time
import pickle
import praw
import praw.models
import datetime
import traceback
import subprocess
import config
version = ""
try:
version = subprocess.check_output(["git", "describe", "--tags"]).strip().decode("utf-8")
except:
version = "unknown"
DEBUG_INFO = "\n\n*****\n I am a bot | /r/3DPrinting Help Bot by " \
"[/u/vurt_feather](https://reddit.com/user/vurt_feather) | version " + version + \
" | [GitHub](https://github.com/der-test/3dp-helpbot)" \
" | [How to summon](https://github.com/der-test/3dp_helpbot#reddit-usage)"
KEYWORDS = ["3d modeling program", "cad program", "cad software", "looking for modeling program",
"3d modeling software", "software for designing 3d models", "software to make 3d models",
"software for 3d modeling", "3d modelling program", "3d editing software", "3d editing program"]
"""
Generates the responses when a user asks for help with different keywords after the mention
"""
def resp_modeling():
return "[Here](https://reddit.com/r/3Dprinting/wiki/index#wiki_what_should_i_do_to_start_modelling_" + \
"things_to_print.3F) is the wiki entry of CAD/3D modeling software.\n\n[Here](https://reddit.com/r/" + \
"3Dprinting/comments/bm6wq2/so_you_want_to_learn_x_program/) is a guide containing resources to" + \
" learn most CAD/3D modeling software." + DEBUG_INFO
def resp_slicers():
return "[Here](https://www.reddit.com/r/3Dprinting/wiki/slicers)" + \
" is the wiki entry for slicer software." + DEBUG_INFO
def resp_trouble():
return "[Here](https://www.reddit.com/r/3Dprinting/wiki/troubleshootingandcalibration)" + \
" is the wiki entry for general troubleshooting & calibration help." + DEBUG_INFO
def resp_services():
return "[Here](http://www.reddit.com/r/3DPrinting/wiki/Services)" + \
" is the wiki entry for print, design and model-host services." + DEBUG_INFO
def resp_general():
return "[Here](https://reddit.com/r/3Dprinting/wiki/index)" + \
" is the general wiki entry." + DEBUG_INFO
# A class isn't necessary, but globals in Python are weird
class Bot:
def __init__(self):
# Login
# Config located in config.py, copy config.txt or cp config.txt config.py
self.__bot = praw.Reddit(username=config.username, password=config.password, client_id=config.client_id,
client_secret=config.client_secret, user_agent="3dp_helpbot " + version)
print("Logged in...")
# Initialize data
try:
df = open("data.dat", "rb")
self.__viewed = pickle.load(df)
df.close()
except:
self.__viewed = []
# Run the bot
i = 0
while True:
try:
self.__run()
if len(self.__viewed) > 200:
for i in range(0, 15):
self.__viewed.remove(0)
# Write the viewed ids to disk every 5 iterations
if i == 5:
i = 0
df = open("data.dat", "wb")
pickle.dump(self.__viewed, df)
df.close()
else:
i += 1
except Exception:
traceback.print_exc()
pass
def __run(self):
"""
Checks for new posts and replies
"""
# Commented out checking new posts
# Get new posts
#for post in self.__bot.subreddit('3dprinting').new(limit = 20):
# Only check the post once
#if post.id not in self.__viewed:
#self.__viewed.append(post.id)
# See if a post needs a reply
#for word in KEYWORDS:
#if word in post.title.lower() or word in post.selftext.lower():
#post.reply(resp_modeling())
#break
#pass
# Read the inbox for mentions and replies
read = []
for item in self.__bot.inbox.unread(limit = 25):
if isinstance(item, praw.models.Comment):
# Mark as read
read.append(item)
# Check the contents of the inbox item
#
if "/u/3dp_helpbot modeling" in item.body.lower():
# Bot has been summoned with keyword modeling, give out info
item.reply(resp_modeling())
elif "/u/3dp_helpbot slicers" in item.body.lower():
# Bot has been summoned with keyword slicers, give out info
item.reply(resp_slicers())
elif "/u/3dp_helpbot trouble" in item.body.lower():
# Bot has been summoned with keyword trouble, give out info
item.reply(resp_trouble())
elif "/u/3dp_helpbot services" in item.body.lower():
# Bot has been summoned with keyword trouble, give out info
item.reply(resp_services())
elif "/u/3dp_helpbot" in item.body.lower():
# Bot has been summoned, give out info
item.reply(resp_general())
elif "good bot" == item.body.lower().strip():
# Someone called the bot a good bot!
item.reply("Thanks!" + DEBUG_INFO)
elif "bad bot" == item.body.lower().strip():
# Oh no, the bot did something bad. Feedback is welcome!
item.reply("I'm sorry to hear that. You can leave feedback [here](https://reddit.com/r/3dp_helpbot)." + DEBUG_INFO)
self.__bot.inbox.mark_read(read)
time.sleep(4) # Conform to Reddit's API; reduce spam and processing load
pass
def main():
print("Starting /u/3dp_helpbot...")
Bot()
# logging commmenteed out during development
# os.chdir("/home/user/3dp-helpbot")
# sys.stdout = open("logs/log-" + datetime.datetime.now().strftime("%m-%d-%Y_%X") + ".txt", "w+")
# sys.stderr = sys.stdout
main()
| 38.406061 | 135 | 0.576456 | 789 | 6,337 | 4.52218 | 0.340938 | 0.03083 | 0.019619 | 0.033632 | 0.222253 | 0.17741 | 0.141536 | 0.124159 | 0.11491 | 0.066704 | 0 | 0.012145 | 0.311346 | 6,337 | 164 | 136 | 38.640244 | 0.805454 | 0.230551 | 0 | 0.11828 | 0 | 0.032258 | 0.325907 | 0.012519 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086022 | false | 0.032258 | 0.107527 | 0.053763 | 0.258065 | 0.107527 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98a77d126b8fadb75bc96ccc3906c0ff62219f1a | 37,989 | py | Python | pytorch_nlu/pytorch_sequencelabeling/slLayer.py | dumpmemory/Pytorch-NLU | 864fb9acc7751fc51abd3d05d24b5a9a7eab7110 | [
"Apache-2.0"
] | 115 | 2021-08-29T04:28:40.000Z | 2022-03-29T22:57:48.000Z | pytorch_nlu/pytorch_sequencelabeling/slLayer.py | dumpmemory/Pytorch-NLU | 864fb9acc7751fc51abd3d05d24b5a9a7eab7110 | [
"Apache-2.0"
] | 2 | 2022-01-14T01:52:07.000Z | 2022-03-04T11:40:10.000Z | pytorch_nlu/pytorch_sequencelabeling/slLayer.py | dumpmemory/Pytorch-NLU | 864fb9acc7751fc51abd3d05d24b5a9a7eab7110 | [
"Apache-2.0"
] | 18 | 2021-09-23T06:41:10.000Z | 2022-03-22T04:37:05.000Z | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2021/7/24 21:45
# @author : Mo
# @function: Layer and Loss
from torch import nn
import torch
import numpy as np
__all__ = ["PriorMultiLabelSoftMarginLoss",
"LabelSmoothingCrossEntropyV1",
"LabelSmoothingCrossEntropy",
"MultiLabelCircleLoss",
"FocalLoss",
"DiceLossV1",
"DiceLoss",
"SpanFCLayer",
"FCLayer",
"Mish",
"CRF",
"GridPointer",
]
class PriorMultiLabelSoftMarginLoss(nn.Module):
def __init__(self, prior=None, num_labels=None, reduction="mean", eps=1e-9, tau=1.0):
"""PriorCrossEntropy
categorical-crossentropy-with-prior
urls: [通过互信息思想来缓解类别不平衡问题](https://spaces.ac.cn/archives/7615)
args:
prior: List<float>, prior of label, 先验知识. eg. [0.6, 0.2, 0.1, 0.1]
num_labels: int, num of labels, 类别数. eg. 10
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
eps: float, Minimum of maths, 极小值. eg. 1e-9
tau: float, weight of prior in loss, 先验知识的权重, eg. ``1.0``
returns:
Tensor of loss.
examples:
>>> loss = PriorCrossEntropy(prior)(logits, label)
"""
super(PriorMultiLabelSoftMarginLoss, self).__init__()
self.loss_mlsm = torch.nn.MultiLabelSoftMarginLoss(reduction=reduction)
if not prior: prior = np.array([1/num_labels for _ in range(num_labels)]) # 如果不存在就设置为num
if type(prior) ==list: prior = np.array(prior)
self.log_prior = torch.tensor(np.log(prior + eps)).unsqueeze(0)
self.eps = eps
self.tau = tau
def forward(self, logits, labels):
# 使用与输入label相同的device
logits = logits + self.tau * self.log_prior.to(labels.device)
loss = self.loss_mlsm(logits, labels)
return loss
class LabelSmoothingCrossEntropyV1(nn.Module):
def __init__(self, eps=0.1, reduction="mean", ignore_index=-100):
"""【ERROR,直接smooth输入logits效果不好,原因未知】LabelSmoothingCrossEntropy, no-softmax-input
eps==0-1, 通过控制ce权重、新增后置项来处理来平滑
urls: [pytorch | labelSmooth](https://zhuanlan.zhihu.com/p/265704145)
args:
ignore_index: (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default: -100
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
eps: float, Minimum of maths, 极小值. eg. 0.1
returns:
Tensor of loss.
examples:
>>> loss = LabelSmoothingCrossEntropyV1()(logits, label)
"""
super(LabelSmoothingCrossEntropyV1, self).__init__()
self.ignore_index = ignore_index
self.reduction = reduction
self.eps = eps
def forward(self, logits, labels): # logits --- logistic unit
V = max(logits.size()[-1] - 1, 1)
logits_smooth = (1 - self.eps) * logits + self.eps / V
logits_smooth_logsigmoid = torch.nn.functional.logsigmoid(logits_smooth)
loss = -(labels * logits_smooth_logsigmoid + (1 - labels) * logits_smooth_logsigmoid)
loss = loss.sum(dim=1) # / logits.size(1) # only return N loss values
if "mean" == self.reduction:
loss = loss.mean()
elif "sum" == self.reduction:
loss = loss.sum()
else:
_
return loss
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self, eps=0.1, reduction="mean", ignore_index=-100):
"""LabelSmoothingCrossEntropy, no-softmax-input
对logits进行smoothing, 即log_softmax后进行操作
args:
ignore_index: (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default: -100
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
eps: float, Minimum of maths, 极小值. eg. 0.1
returns:
Tensor of loss.
examples:
>>> loss = LabelSmoothingCrossEntropyV1()(logits, label)
"""
super(LabelSmoothingCrossEntropy, self).__init__()
self.ignore_index = ignore_index
self.reduction = reduction
self.eps = eps
def forward(self, logits, labels):
V = max(logits.size()[-1] - 1, 1)
loss = (1 - self.eps) * (-(labels * torch.nn.functional.logsigmoid(logits) +
(1 - labels) * torch.nn.functional.logsigmoid(-logits))) + self.eps / V
loss = loss.sum(dim=1) / logits.size(1) # only return N loss values
if "mean" == self.reduction:
loss = loss.mean()
elif "sum" == self.reduction:
loss = loss.sum()
else:
_
return loss
class MultiLabelCircleLoss(nn.Module):
def __init__(self, reduction="mean", inf=1e12):
"""CircleLoss of MultiLabel, 多个目标类的多标签分类场景,希望“每个目标类得分都不小于每个非目标类的得分”
多标签分类的交叉熵(softmax+crossentropy推广, N选K问题), LSE函数的梯度恰好是softmax函数
让同类相似度与非同类相似度之间拉开一定的margin。
- 使同类相似度比最大的非同类相似度更大。
- 使最小的同类相似度比最大的非同类相似度更大。
- 所有同类相似度都比所有非同类相似度更大。
urls: [将“softmax+交叉熵”推广到多标签分类问题](https://spaces.ac.cn/archives/7359)
args:
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
inf: float, Minimum of maths, 无穷大. eg. 1e12
returns:
Tensor of loss.
examples:
>>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],]
>>> label, logits = torch.tensor(label).float(), torch.tensor(logits).float()
>>> loss = MultiLabelCircleLoss()(logits, label)
"""
super(MultiLabelCircleLoss, self).__init__()
self.reduction = reduction
self.inf = inf # 无穷大
def forward(self, logits, labels):
logits = (1 - 2 * labels) * logits # <3, 4>
logits_neg = logits - labels * self.inf # <3, 4>
logits_pos = logits - (1 - labels) * self.inf # <3, 4>
zeros = torch.zeros_like(logits[..., :1]) # <3, 1>
logits_neg = torch.cat([logits_neg, zeros], dim=-1) # <3, 5>
logits_pos = torch.cat([logits_pos, zeros], dim=-1) # <3, 5>
neg_loss = torch.logsumexp(logits_neg, dim=-1) # <3, >
pos_loss = torch.logsumexp(logits_pos, dim=-1) # <3, >
loss = neg_loss + pos_loss
if "mean" == self.reduction:
loss = loss.mean()
else:
loss = loss.sum()
return loss
class FocalLoss(nn.Module):
def __init__(self, alpha=0.5, gamma=2, reduction="mean"):
"""FocalLoss
聚焦损失, 不确定的情况下alpha==0.5效果可能会好一点
Usage is same as nn.BCEWithLogits:
>>> loss = criteria(logits, lbs)
"""
super(FocalLoss, self).__init__()
self.crit = nn.BCEWithLogitsLoss(reduction="none")
self.reduction = reduction
self.alpha = alpha
self.gamma = gamma
def forward(self, logits, labels):
probs = torch.sigmoid(logits)
coeff = torch.abs(labels - probs).pow(self.gamma).neg()
log_0_probs = torch.where(logits >= 0, -logits + nn.functional.softplus(logits, -1, 50), -nn.functional.softplus(logits, 1, 50))
log_1_probs = torch.where(logits >= 0, nn.functional.softplus(logits, -1, 50), logits - nn.functional.softplus(logits, 1, 50))
loss = labels * self.alpha * log_1_probs + (1. - labels) * (1. - self.alpha) * log_0_probs
loss = loss * coeff
if self.reduction == "mean":
loss = loss.mean()
if self.reduction == "sum":
loss = loss.sum()
return loss
class DiceLossV1(nn.Module):
def __init__(self, reduction="mean", epsilon=1e-9):
"""【ERROR, 不收敛-原因未知】Dice-Loss, 切块损失, 用于不均衡数据, 但是收敛困难
paper: Dice Loss for Data-imbalanced NLP Tasks
url: https://arxiv.org/pdf/1911.02855.pdf
args:
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
epsilon: float, Minimum of maths, 无穷小. eg. 1e-9
returns:
Tensor of loss.
examples:
>>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],]
>>> label, logits = torch.tensor(label).float(), torch.tensor(logits).float()
>>> loss = DiceLoss()(logits, label)
"""
super(DiceLossV1, self).__init__()
self.reduction = reduction
self.epsilon = epsilon
def forward(self, logits, labels):
prob = torch.sigmoid(logits) # <2, 4>
# logits: [N, C], index: [N, ]
index = labels.unsqueeze(1).view(prob.size(0), -1) # <2, 4>
prob = torch.gather(prob, dim=1, index=index)
dsc_i = 1 - ((1 - prob) * prob + self.epsilon) / ((1 - prob) * prob + 1 + self.epsilon)
if "mean" == self.reduction:
loss = dsc_i.mean()
else:
loss = dsc_i.sum()
return loss
class DiceLoss(nn.Module):
def __init__(self, epsilon=1e-9):
"""Dice-Loss, 切块损失, 用于不均衡数据, 但是收敛困难, 不太稳定
paper: Dice Loss for Data-imbalanced NLP Tasks
url: https://arxiv.org/pdf/1911.02855.pdf
args:
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
epsilon: float, Minimum of maths, 无穷小. eg. 1e-9
returns:
Tensor of loss.
examples:
>>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],]
>>> label, logits = torch.tensor(label).long(), torch.tensor(logits).float()
>>> loss = DiceLoss()(logits, label)
"""
super(DiceLoss, self).__init__()
self.epsilon = epsilon
def forward(self, logits, labels): # 利用预测值与标签相乘当作交集
predict = torch.sigmoid(logits)
intersect = predict * labels + self.epsilon
unionset = predict + labels + self.epsilon
loss = 1. - 2 * intersect.sum() / unionset.sum()
return loss
class SpanFCLayer(nn.Module):
def __init__(self, input_dim, output_dim, dropout_rate=0.1, is_active=True,
is_dropout=True, active_type="mish"):
"""SpanFCLayer
Span-FC-Layer, mostly last output of span of model, 新增LayerNorm(条件层标准化)
args:
input_dim: input dimension, 输入维度, eg. 768
output_dim: output dimension, 输出维度, eg. 32
dropout_rate: dropout rate, 随机失活, eg. 0.1
is_dropout: use dropout or not, 是否使用随机失活dropout, eg. True
is_active: use activation or not, 是否使用激活函数如tanh, eg. True
active_type: type of activate function, 激活函数类型, eg. "tanh", "relu", "mish"
Returns:
Tensor of batch.
"""
super(SpanFCLayer, self).__init__()
self.linear_0 = nn.Linear(input_dim, input_dim)
self.linear_1 = nn.Linear(input_dim, output_dim)
self.layer_norm = nn.LayerNorm(input_dim)
self.dropout = nn.Dropout(dropout_rate) # probability of an element to be zeroed
self.is_dropout = is_dropout
self.active_type = active_type
self.is_active = is_active
self.softmax = nn.Softmax(1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True) # inplace是否覆盖, 为了节省内存
self.tanh = nn.Tanh()
self.gelu = nn.GELU()
def forward(self, x):
if self.is_dropout:
x = self.dropout(x)
x = self.linear_0(x)
if self.is_active:
if self.active_type.upper() == "MISH":
x = x * torch.tanh(nn.functional.softplus(x))
elif self.active_type.upper() == "SWISH":
x = x * torch.sigmoid(x)
elif self.active_type.upper() == "TANH":
x = self.tanh(x)
elif self.active_type.upper() == "GELU":
x = self.gelu(x)
elif self.active_type.upper() == "RELU":
x = self.relu(x)
else:
x = self.relu(x)
x = self.layer_norm(x)
x = self.linear_1(x)
if self.is_active:
if self.active_type.upper() == "MISH":
x = x * torch.tanh(nn.functional.softplus(x))
elif self.active_type.upper() == "SWISH":
x = x * torch.sigmoid(x)
elif self.active_type.upper() == "TANH":
x = self.tanh(x)
elif self.active_type.upper() == "GELU":
x = self.gelu(x)
elif self.active_type.upper() == "RELU":
x = self.relu(x)
else:
x = self.relu(x)
return x
class FCLayer(nn.Module):
def __init__(self, input_dim, output_dim, dropout_rate=0.1, is_active=True,
is_dropout=True, active_type="mish"):
"""
FC-Layer, mostly last output of model
args:
input_dim: input dimension, 输入维度, eg. 768
output_dim: output dimension, 输出维度, eg. 32
dropout_rate: dropout rate, 随机失活, eg. 0.1
is_dropout: use dropout or not, 是否使用随机失活dropout, eg. True
is_active: use activation or not, 是否使用激活函数如tanh, eg. True
active_type: type of activate function, 激活函数类型, eg. "tanh", "relu"
Returns:
Tensor of batch.
"""
super(FCLayer, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
self.dropout = nn.Dropout(dropout_rate) # probability of an element to be zeroed
self.is_dropout = is_dropout
self.active_type = active_type
self.is_active = is_active
self.softmax = nn.Softmax(1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
self.tanh = nn.Tanh()
self.gelu = nn.GELU()
def forward(self, x):
if self.is_dropout:
x = self.dropout(x)
x = self.linear(x)
if self.is_active:
if self.active_type.upper() == "MISH":
x = x * torch.tanh(nn.functional.softplus(x))
elif self.active_type.upper() == "SWISH":
x = x * torch.sigmoid(x)
elif self.active_type.upper() == "TANH":
x = self.tanh(x)
elif self.active_type.upper() == "GELU":
x = self.gelu(x)
elif self.active_type.upper() == "RELU":
x = self.relu(x)
else:
x = self.relu(x)
return x
class Swish(nn.Module):
def __init__(self):
""" Swish函数可以看做是介于线性函数与ReLU函数之间的平滑函数.(sigmoid和Relu的拼凑)
Searching for Activation Functions
Applies the swish function element-wise:
f(x)=x⋅sigmoid(βx)
paper: https://arxiv.org/abs/1710.05941(2017)
"""
super(Swish, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class Mish(nn.Module):
def __init__(self):
""" Mish函数可以看做是介于线性函数与ReLU函数之间的平滑函数.(tanh和Relu的拼凑)
Script provides functional interface for Mish activation function.
Applies the mish function element-wise:
mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(x)))
See additional documentation for mish class.
"""
super(Mish).__init__()
def forword(self, x):
x = x * torch.tanh(nn.functional.softplus(x))
return x
class CRF(nn.Module):
"""Conditional random field.
This module implements a conditional random field [LMP01]_. The forward computation
of this class computes the log likelihood of the given sequence of tags and
emission score tensor. This class also has `~CRF.decode` method which finds
the best tag sequence given an emission score tensor using `Viterbi algorithm`_.
Args:
num_tags: Number of tags.
batch_first: Whether the first dimension corresponds to the size of a minibatch.
Attributes:
start_transitions (`~torch.nn.Parameter`): Start transition score tensor of size
``(num_tags,)``.
end_transitions (`~torch.nn.Parameter`): End transition score tensor of size
``(num_tags,)``.
transitions (`~torch.nn.Parameter`): Transition score tensor of size
``(num_tags, num_tags)``.
.. [LMP01] Lafferty, J., McCallum, A., Pereira, F. (2001).
"Conditional random fields: Probabilistic models for segmenting and
labeling sequence data". *Proc. 18th International Conf. on Machine
Learning*. Morgan Kaufmann. pp. 282–289.
.. _Viterbi algorithm: https://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def __init__(self, num_tags: int, batch_first: bool = False) -> None:
if num_tags <= 0:
raise ValueError('invalid number of tags: {}'.format(num_tags))
super().__init__()
self.num_tags = num_tags
self.batch_first = batch_first
self.start_transitions = nn.Parameter(torch.empty(num_tags))
self.end_transitions = nn.Parameter(torch.empty(num_tags))
self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))
self.reset_parameters()
def reset_parameters(self) -> None:
"""Initialize the transition parameters.
The parameters will be initialized randomly from a uniform distribution
between -0.1 and 0.1.
"""
nn.init.uniform_(self.start_transitions, -0.1, 0.1)
nn.init.uniform_(self.end_transitions, -0.1, 0.1)
nn.init.uniform_(self.transitions, -0.1, 0.1)
def __repr__(self) -> str:
return '{}(num_tags={})'.format(self.__class__.__name__, self.num_tags)
def forward(self, emissions: torch.Tensor, tags: torch.LongTensor, mask = None, reduction = 'mean'):
"""Compute the conditional log likelihood of a sequence of tags given emission scores.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
tags (`~torch.LongTensor`): Sequence of tags tensor of size
``(seq_length, batch_size)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
reduction: Specifies the reduction to apply to the output:
``none|sum|mean|token_mean``. ``none``: no reduction will be applied.
``sum``: the output will be summed over batches. ``mean``: the output will be
averaged over batches. ``token_mean``: the output will be averaged over tokens.
Returns:
`~torch.Tensor`: The log likelihood. This will have size ``(batch_size,)`` if
reduction is ``none``, ``()`` otherwise.
"""
if reduction not in ('none', 'sum', 'mean', 'token_mean'):
raise ValueError('invalid reduction: {}'.format(reduction))
if mask is None:
mask = torch.ones_like(tags, dtype=torch.uint8, device=tags.device)
if mask.dtype != torch.uint8:
mask = mask.byte()
self._validate(emissions, tags=tags, mask=mask)
if self.batch_first:
emissions = emissions.transpose(0, 1)
tags = tags.transpose(0, 1)
mask = mask.transpose(0, 1)
# shape: (batch_size,)
numerator = self._compute_score(emissions, tags, mask)
# shape: (batch_size,)
denominator = self._compute_normalizer(emissions, mask)
# shape: (batch_size,)
llh = numerator - denominator
if reduction == 'none':
return llh
if reduction == 'sum':
return llh.sum()
if reduction == 'mean':
return llh.mean()
return llh.sum() / mask.float().sum()
def decode(self, emissions: torch.Tensor, mask = None, nbest = None, pad_tag = None):
"""Find the most likely tag sequence using Viterbi algorithm.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
nbest (`int`): Number of most probable paths for each sequence
pad_tag (`int`): Tag at padded positions. Often input varies in length and
the length will be padded to the maximum length in the batch. Tags at
the padded positions will be assigned with a padding tag, i.e. `pad_tag`
Returns:
A PyTorch tensor of the best tag sequence for each batch of shape
(nbest, batch_size, seq_length)
"""
if nbest is None:
nbest = 1
if mask is None:
mask = torch.ones(emissions.shape[:2], dtype=torch.uint8,
device=emissions.device)
if mask.dtype != torch.uint8:
mask = mask.byte()
self._validate(emissions, mask=mask)
if self.batch_first:
emissions = emissions.transpose(0, 1)
mask = mask.transpose(0, 1)
if nbest == 1:
return self._viterbi_decode(emissions, mask, pad_tag).unsqueeze(0)
return self._viterbi_decode_nbest(emissions, mask, nbest, pad_tag)
def _validate(self, emissions: torch.Tensor, tags = None, mask = None):
if emissions.dim() != 3:
raise ValueError('emissions must have dimension of 3, got {}'.format(emissions.dim()))
if emissions.size(2) != self.num_tags:
raise ValueError(
'expected last dimension of emissions is {}, '.format(self.num_tags) + 'got {}'.format(emissions.size(2)))
if tags is not None:
if emissions.shape[:2] != tags.shape:
raise ValueError(
'the first two dimensions of emissions and tags must match, '
'got {} and {}'.format(tuple(emissions.shape[:2]), tuple(tags.shape)))
if mask is not None:
if emissions.shape[:2] != mask.shape:
raise ValueError(
'the first two dimensions of emissions and mask must match, '
'got {} and {}'.format(tuple(emissions.shape[:2]), tuple(mask.shape)))
no_empty_seq = not self.batch_first and mask[0].all()
no_empty_seq_bf = self.batch_first and mask[:, 0].all()
if not no_empty_seq and not no_empty_seq_bf:
raise ValueError('mask of the first timestep must all be on')
def _compute_score(self, emissions: torch.Tensor, tags: torch.LongTensor, mask: torch.ByteTensor):
# emissions: (seq_length, batch_size, num_tags)
# tags: (seq_length, batch_size)
# mask: (seq_length, batch_size)
seq_length, batch_size = tags.shape
mask = mask.float()
# Start transition score and first emission
# shape: (batch_size,)
score = self.start_transitions[tags[0]]
score += emissions[0, torch.arange(batch_size), tags[0]]
for i in range(1, seq_length):
# Transition score to next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += self.transitions[tags[i - 1], tags[i]] * mask[i]
# Emission score for next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]
# End transition score
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# shape: (batch_size,)
last_tags = tags[seq_ends, torch.arange(batch_size)]
# shape: (batch_size,)
score += self.end_transitions[last_tags]
return score
def _compute_normalizer(self, emissions: torch.Tensor, mask: torch.ByteTensor):
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
seq_length = emissions.size(0)
# Start transition score and first emission; score has size of
# (batch_size, num_tags) where for each batch, the j-th column stores
# the score that the first timestep has tag j
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
for i in range(1, seq_length):
# Broadcast score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emissions = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the sum of scores of all
# possible tag sequences so far that end with transitioning from tag i to tag j
# and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emissions
# Sum over all possible current tags, but we're in score space, so a sum
# becomes a log-sum-exp: for each sample, entry i stores the sum of scores of
# all possible tag sequences so far, that end in tag i
# shape: (batch_size, num_tags)
next_score = torch.logsumexp(next_score, dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Sum (log-sum-exp) over all possible tags
# shape: (batch_size,)
return torch.logsumexp(score, dim=1)
def _viterbi_decode(self, emissions: torch.FloatTensor, mask: torch.ByteTensor, pad_tag = None):
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
# return: (batch_size, seq_length)
if pad_tag is None:
pad_tag = 0
device = emissions.device
seq_length, batch_size = mask.shape
# Start transition and first emission
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
history_idx = torch.zeros((seq_length, batch_size, self.num_tags),
dtype=torch.long, device=device)
oor_idx = torch.zeros((batch_size, self.num_tags),
dtype=torch.long, device=device)
oor_tag = torch.full((seq_length, batch_size), pad_tag,
dtype=torch.long, device=device)
# - score is a tensor of size (batch_size, num_tags) where for every batch,
# value at column j stores the score of the best tag sequence so far that ends
# with tag j
# - history_idx saves where the best tags candidate transitioned from; this is used
# when we trace back the best tag sequence
# - oor_idx saves the best tags candidate transitioned from at the positions
# where mask is 0, i.e. out of range (oor)
# Viterbi algorithm recursive case: we compute the score of the best tag sequence
# for every possible next tag
for i in range(1, seq_length):
# Broadcast viterbi score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emission = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the score of the best
# tag sequence so far that ends with transitioning from tag i to tag j and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emission
# Find the maximum score over all possible current tag
# shape: (batch_size, num_tags)
next_score, indices = next_score.max(dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# and save the index that produces the next score
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(-1), next_score, score)
indices = torch.where(mask[i].unsqueeze(-1), indices, oor_idx)
history_idx[i - 1] = indices
# End transition score
# shape: (batch_size, num_tags)
end_score = score + self.end_transitions
_, end_tag = end_score.max(dim=1)
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# insert the best tag at each sequence end (last position with mask == 1)
history_idx = history_idx.transpose(1, 0).contiguous()
history_idx.scatter_(1, seq_ends.view(-1, 1, 1).expand(-1, 1, self.num_tags),
end_tag.view(-1, 1, 1).expand(-1, 1, self.num_tags))
history_idx = history_idx.transpose(1, 0).contiguous()
# The most probable path for each sequence
best_tags_arr = torch.zeros((seq_length, batch_size),
dtype=torch.long, device=device)
best_tags = torch.zeros(batch_size, 1, dtype=torch.long, device=device)
for idx in range(seq_length - 1, -1, -1):
best_tags = torch.gather(history_idx[idx], 1, best_tags)
best_tags_arr[idx] = best_tags.data.view(batch_size)
return torch.where(mask, best_tags_arr, oor_tag).transpose(0, 1)
def _viterbi_decode_nbest(self, emissions: torch.FloatTensor, mask: torch.ByteTensor, nbest: int, pad_tag = None):
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
# return: (nbest, batch_size, seq_length)
if pad_tag is None:
pad_tag = 0
device = emissions.device
seq_length, batch_size = mask.shape
# Start transition and first emission
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
history_idx = torch.zeros((seq_length, batch_size, self.num_tags, nbest),
dtype=torch.long, device=device)
oor_idx = torch.zeros((batch_size, self.num_tags, nbest),
dtype=torch.long, device=device)
oor_tag = torch.full((seq_length, batch_size, nbest), pad_tag,
dtype=torch.long, device=device)
# + score is a tensor of size (batch_size, num_tags) where for every batch,
# value at column j stores the score of the best tag sequence so far that ends
# with tag j
# + history_idx saves where the best tags candidate transitioned from; this is used
# when we trace back the best tag sequence
# - oor_idx saves the best tags candidate transitioned from at the positions
# where mask is 0, i.e. out of range (oor)
# Viterbi algorithm recursive case: we compute the score of the best tag sequence
# for every possible next tag
for i in range(1, seq_length):
if i == 1:
broadcast_score = score.unsqueeze(-1)
broadcast_emission = emissions[i].unsqueeze(1)
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emission
else:
broadcast_score = score.unsqueeze(-1)
broadcast_emission = emissions[i].unsqueeze(1).unsqueeze(2)
# shape: (batch_size, num_tags, nbest, num_tags)
next_score = broadcast_score + self.transitions.unsqueeze(1) + broadcast_emission
# Find the top `nbest` maximum score over all possible current tag
# shape: (batch_size, nbest, num_tags)
next_score, indices = next_score.view(batch_size, -1, self.num_tags).topk(nbest, dim=1)
if i == 1:
score = score.unsqueeze(-1).expand(-1, -1, nbest)
indices = indices * nbest
# convert to shape: (batch_size, num_tags, nbest)
next_score = next_score.transpose(2, 1)
indices = indices.transpose(2, 1)
# Set score to the next score if this timestep is valid (mask == 1)
# and save the index that produces the next score
# shape: (batch_size, num_tags, nbest)
score = torch.where(mask[i].unsqueeze(-1).unsqueeze(-1), next_score, score)
indices = torch.where(mask[i].unsqueeze(-1).unsqueeze(-1), indices, oor_idx)
history_idx[i - 1] = indices
# End transition score shape: (batch_size, num_tags, nbest)
end_score = score + self.end_transitions.unsqueeze(-1)
_, end_tag = end_score.view(batch_size, -1).topk(nbest, dim=1)
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# insert the best tag at each sequence end (last position with mask == 1)
history_idx = history_idx.transpose(1, 0).contiguous()
history_idx.scatter_(1, seq_ends.view(-1, 1, 1, 1).expand(-1, 1, self.num_tags, nbest),
end_tag.view(-1, 1, 1, nbest).expand(-1, 1, self.num_tags, nbest))
history_idx = history_idx.transpose(1, 0).contiguous()
# The most probable path for each sequence
best_tags_arr = torch.zeros((seq_length, batch_size, nbest),
dtype=torch.long, device=device)
best_tags = torch.arange(nbest, dtype=torch.long, device=device) \
.view(1, -1).expand(batch_size, -1)
for idx in range(seq_length - 1, -1, -1):
best_tags = torch.gather(history_idx[idx].view(batch_size, -1), 1, best_tags)
best_tags_arr[idx] = best_tags.data.view(batch_size, -1) // nbest
return torch.where(mask.unsqueeze(-1), best_tags_arr, oor_tag).permute(2, 1, 0)
class GridPointer(nn.Module):
def __init__(self, head_nums, head_size, is_RoPE=True):
"""GridPointer, 分类-网格(全局)指针模块
将序列的每个(start, end)作为整体来进行判断
代码来源:
网址url: [GlobalPointer:用统一的方式处理嵌套和非嵌套NER](https://kexue.fm/archives/8373)
ptorch版gaohongkui: https://github.com/gaohongkui/GlobalPointer_pytorch
"""
super(GridPointer, self).__init__()
self.head_nums = head_nums
self.head_size = head_size
self.is_RoPE = is_RoPE
def forward(self, x, attention_mask, token_type_ids):
batch_size = x.size(0)
max_len = x.size(1)
outputs = torch.split(x, self.head_size * 2, dim=-1) # <batch, len, label, head*2>
outputs = torch.stack(outputs, dim=-2)
qw, kw = outputs[..., :self.head_size], outputs[..., self.head_size:] # <batch, len, label, head>
if self.is_RoPE:
def SinusoidalPositionEmbedding(output_size, batch_size, max_len, device):
"""embedding of Sinusoidal-Position
"""
position_ids = torch.arange(0, max_len, dtype=torch.float).unsqueeze(-1)
indices = torch.arange(0, output_size // 2, dtype=torch.float)
indices = torch.pow(10000, -2 * indices / output_size)
embeddings = position_ids * indices
embeddings = torch.stack([torch.sin(embeddings), torch.cos(embeddings)], dim=-1)
embeddings = embeddings.repeat((batch_size, *([1] * len(embeddings.shape))))
embeddings = torch.reshape(embeddings, (batch_size, max_len, output_size))
embeddings = embeddings.to(device)
return embeddings
pos_emb = SinusoidalPositionEmbedding(self.head_size, batch_size, max_len, device=x.device) # <batch, len, head>
cos_pos = pos_emb[..., None, 1::2].repeat_interleave(2, dim=-1) # <batch, len, 1, head>
sin_pos = pos_emb[..., None, ::2].repeat_interleave(2, dim=-1) # <batch, len, 1, head>
qw2 = torch.stack([-qw[..., 1::2], qw[..., ::2]], -1)
qw2 = qw2.reshape(qw.shape)
qw = qw * cos_pos + qw2 * sin_pos
kw2 = torch.stack([-kw[..., 1::2], kw[..., ::2]], -1)
kw2 = kw2.reshape(kw.shape)
kw = kw * cos_pos + kw2 * sin_pos
logits = torch.einsum("bmhd, bnhd->bhmn", qw, kw) # <batch, label, len, len>
pad_mask = attention_mask.unsqueeze(1).unsqueeze(1).expand(batch_size, self.head_nums, max_len, max_len)
logits = logits*pad_mask - (1-pad_mask)*1e12
# 排除下三角
mask = torch.tril(torch.ones_like(logits), diagonal=-1)
logits = (logits - mask * 1e12)
logits = logits / self.head_size**0.5 # scale
return logits
| 45.117577 | 144 | 0.587565 | 4,744 | 37,989 | 4.564713 | 0.113828 | 0.036574 | 0.021335 | 0.021427 | 0.605449 | 0.562641 | 0.525791 | 0.484784 | 0.455599 | 0.448303 | 0 | 0.019169 | 0.295533 | 37,989 | 841 | 145 | 45.171225 | 0.789926 | 0.358867 | 0 | 0.416476 | 0 | 0 | 0.030048 | 0.003641 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080092 | false | 0 | 0.006865 | 0.004577 | 0.171625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98a9ddb77716f5ad2a2d263d27421845bd0aff98 | 1,528 | py | Python | models/lossfuns.py | zekunhao1995/DualSDF | 177a102b315949bfa59a6ae1c47de52ddbea6eaa | [
"MIT"
] | 107 | 2020-04-07T01:15:14.000Z | 2022-03-17T09:32:46.000Z | models/lossfuns.py | zekunhao1995/DualSDF | 177a102b315949bfa59a6ae1c47de52ddbea6eaa | [
"MIT"
] | 6 | 2020-05-16T00:41:28.000Z | 2021-04-27T16:04:21.000Z | models/lossfuns.py | zekunhao1995/DualSDF | 177a102b315949bfa59a6ae1c47de52ddbea6eaa | [
"MIT"
] | 17 | 2020-04-14T10:50:24.000Z | 2022-01-20T09:43:08.000Z | import numpy as np
# PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
# Original DeepSDF loss
def clamped_l1(pred_dist, gt_dist, trunc=0.1):
pred_dist_trunc = torch.clamp(pred_dist, -trunc, trunc)
gt_dist_trunc = torch.clamp(gt_dist, -trunc, trunc)
loss = torch.abs(pred_dist_trunc - gt_dist_trunc)
return loss
# [B N]
def clamped_l1_correct(pred_dist, gt_dist, trunc=0.1):
pred_dist_lower = torch.clamp(pred_dist, None, trunc)
pred_dist_upper = torch.clamp(pred_dist, -trunc, None)
pos_trunced_mask = (gt_dist >= trunc)
neg_trunced_mask = (gt_dist <= -trunc)
valid_mask = ~(pos_trunced_mask|neg_trunced_mask)
loss_valid = torch.sum(torch.abs(pred_dist - gt_dist) * valid_mask.float(), dim=-1)
loss_lower = torch.sum((trunc - pred_dist_lower) * pos_trunced_mask.float(), dim=-1)
loss_upper = torch.sum((pred_dist_upper + trunc) * neg_trunced_mask.float(), dim=-1)
loss = (loss_lower + loss_upper + loss_valid) / pred_dist.size(1)
return loss
# L2 loss on the outside, encourage inside to < 0.0
def onesided_l2(pred_dist, gt_dist):
valid_mask = (gt_dist >= 0.0).float()
num_valid = torch.sum(valid_mask, dim=-1)
num_inside = valid_mask[0].numel() - num_valid
loss_valid = torch.sum((gt_dist-pred_dist)**2 * valid_mask, dim=-1) / (num_valid+1e-8)
loss_inside = torch.sum(torch.clamp(pred_dist, 0.0, None) * (1.0-valid_mask), dim=-1) / (num_inside+1e-8)
loss = loss_valid + loss_inside
return loss
| 40.210526 | 109 | 0.697644 | 252 | 1,528 | 3.940476 | 0.206349 | 0.128902 | 0.077543 | 0.056395 | 0.317221 | 0.197382 | 0.058409 | 0.058409 | 0.058409 | 0 | 0 | 0.022979 | 0.174084 | 1,528 | 37 | 110 | 41.297297 | 0.763867 | 0.055628 | 0 | 0.107143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.142857 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98ab4c8c271cda0de6cc40da611003519c199dfe | 5,184 | py | Python | rmot/generate_stim/run.py | DrugowitschLab/motion-structure-used-in-perception | d4f0115e154d5e529094383963c8cdaa1386720b | [
"MIT"
] | 4 | 2020-04-03T09:34:29.000Z | 2020-10-22T20:36:40.000Z | rmot/generate_stim/run.py | DrugowitschLab/motion-structure-used-in-perception | d4f0115e154d5e529094383963c8cdaa1386720b | [
"MIT"
] | null | null | null | rmot/generate_stim/run.py | DrugowitschLab/motion-structure-used-in-perception | d4f0115e154d5e529094383963c8cdaa1386720b | [
"MIT"
] | 1 | 2020-09-17T09:48:27.000Z | 2020-09-17T09:48:27.000Z | import numpy as np
import pylab as pl
from time import time
import logging
from motionstruct.functions import init_logging, asciiL, recursive_dict_update
from motionstruct.classes import PhiWorld
import scipy.io as sio
import os
# Help string and argument parsing
from argparse import ArgumentParser, RawTextHelpFormatter
parser = ArgumentParser(formatter_class=RawTextHelpFormatter,
description="Stimulus generator for rotational MOT",
epilog="If using ipython3, indicate end of ipython arg parser via '--':\n $ ipython3 run.py -- <args>")
parser.add_argument(dest="cfgfile", metavar="filename", default="config.py", nargs='?', type=str,
help="config.py file holding dictionary 'cfg' (same directory, default: config.py)")
parser.add_argument("-u", "--update", dest="updatefile", metavar="filename", default=[], nargs='*', type=str,
help="file holding dictionary 'cfg' for recursively updating the main cfg (same directory, default: None)")
args = parser.parse_args()
# Import config from specified config file
cfgmodulename = args.cfgfile.split(".py")[0]
cmd = "from " + cfgmodulename + " import cfg"
exec(cmd)
msg = []
for udf in args.updatefile:
updatemodulename = udf.split(".py")[0]
cmd = "from " + updatemodulename + " import cfg as ucfg"
exec(cmd)
_,msg = recursive_dict_update(cfg, ucfg, msg=msg, s="[%s] cfg." % updatemodulename)
# Dryrun?
DRYRUN = cfg["global"]["DRYRUN"]
# # # # # # # # # # # # # # # # # # # # # #
# # # M A I N R O U T I N E # # #
# # # # # # # # # # # # # # # # # # # # # #
# Create the output directory
outdir = cfg["global"]["outdir"]
if not DRYRUN:
import os
if outdir[-1] != "/":
outdir += "/"
if not os.path.exists(outdir):
os.makedirs(outdir)
# Create the logger
logger = init_logging(cfg, outdir)
logger.info("Stimulus generator started.")
logger.info("Loading config from file: %s.py" % cfgmodulename)
if len(args.updatefile) > 0:
logger.info("Updating config from files: %s" % str(args.updatefile))
logger.debug("Number of entry updates: %d. Details follow." % len(msg))
for m in msg:
logger.debug("Updated key: %s" % m)
logger.info("DSL: '%s'" % cfg["global"]["dsl"])
if not DRYRUN:
logger.debug("Output directory: %s" % outdir)
# copy the config file to outdir
from shutil import copyfile
logger.debug("Copy file '%s.py' to '%s'." % (cfgmodulename, outdir))
copyfile("%s.py" % cfgmodulename, outdir+"config.py")
if len(args.updatefile) > 0:
for i,udf in enumerate(args.updatefile):
logger.debug("Copy file '%s' to '%s'." % (udf, outdir))
copyfile(udf, outdir+"uconfig_%d.py" % (i+1))
import os
if os.path.isfile("speed_and_seed.py"):
logger.debug("Copy file 'speed_and_seed.py' to '%s'." % (outdir,))
copyfile("speed_and_seed.py", outdir+"speed_and_seed.py")
L = cfg["world"]["L"]
N, M = L.shape
T = cfg["global"]["T"]
targets = cfg["global"]["targets"]
logger.info("The data's motion structure matrix L looks as follows:\n" + asciiL(L, indent=5))
# Generate World (can be reused in repetitions)
kwargs = cfg["world"]
wld = PhiWorld(**kwargs)
# Generate Observation Generator (can be reused in repetitions)
kwargs = cfg["observe"]
obscls = kwargs.pop("cls")
obs = obscls(**kwargs)
# Data storage
archive = dict(
wld_t = [], # world (ground truth) simulation times
wld_S = [], # world (ground truth) states
obs_t = [], # observation (visible data) time points
obs_X = [], # observation (visible data) values
)
# Take start time
t_start = time()
reps = cfg["global"]["reps"]
# HERE COMES THE OUTER MAIN LOOP
logger.info("Enter simulation main loop.")
for rep in range(reps):
logger.info("*** Trial %d of %d ***" % (rep+1, reps))
# Draw the data using the Observation Generator which calls World Generator
obs.run_sim_and_generate_observations(T, wld)
# Store data
logger.debug("Store data to archive.")
archive["wld_t"].append(wld.get_times())
archive["wld_S"].append(wld.S)
archive["obs_t"].append(obs.get_times())
archive["obs_X"].append(obs.X)
# Write matlab file
if not DRYRUN:
logger.debug("Write matlab file.")
fname = cfg["global"]["f_outfname"](rep+1) # make index matlab friendly
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
mdict = {'X':obs.X, 'T':obs.get_times(), 'targets':[t+1 for t in targets], 'dsl' : cfg["global"]["dsl"]}
sio.savemat( fname, mdict )
t_end = time()
logger.info("Stimulus generation main loop completed. Main loop runtime: %5.3fs." % (t_end - t_start))
if not DRYRUN:
fname = outdir + "simdata.npz"
logger.info("Save results to file '%s'." % fname)
np.savez_compressed(fname, **archive)
logger.info("Generation completed successfully.")
# TEST WITH
#fig = figure(figsize=(16,9)); ax = fig.add_axes((0,0,1,1), aspect='auto', xlim=(0,1920), ylim=(0,1080)); tn=0
#while tn < obs.X.shape[0]: ax.plot(obs.X[tn,:,0], obs.X[tn,:,1], 'o'); tn+=1
| 35.751724 | 129 | 0.637924 | 715 | 5,184 | 4.566434 | 0.316084 | 0.030628 | 0.013476 | 0.017152 | 0.077795 | 0.020214 | 0.020214 | 0 | 0 | 0 | 0 | 0.008706 | 0.202353 | 5,184 | 144 | 130 | 36 | 0.780895 | 0.167824 | 0 | 0.113402 | 0 | 0.010309 | 0.279075 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.14433 | 0 | 0.14433 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98acf47b5de02fab44c2cbbf5152e7f879ad95fc | 598 | py | Python | Interview_Questions/Tencent_20190310/3_card.py | sintocos/CS_Roaming | c85d2c95337a00079160f7f5a8a1b0e31eb5fcb3 | [
"BSD-3-Clause"
] | null | null | null | Interview_Questions/Tencent_20190310/3_card.py | sintocos/CS_Roaming | c85d2c95337a00079160f7f5a8a1b0e31eb5fcb3 | [
"BSD-3-Clause"
] | null | null | null | Interview_Questions/Tencent_20190310/3_card.py | sintocos/CS_Roaming | c85d2c95337a00079160f7f5a8a1b0e31eb5fcb3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
# description: 小Q有一叠纸牌,一共有n张,从上往下依次编号为1到n。现在小Q要对这叠纸牌反复做以下操作:
# 把当前位于顶端的牌扔掉,然后把新的顶端的牌放到新叠牌的底部。
# 小Q会一直操作到只剩下一张牌为止。小Q想知道每次丢掉的牌的编号。
# example: input:7 (输入为一行,只有一个数字n, 1<=n<=1e6)
#
# output:1 3 5 7 4 2 6 (输出n个用空格间隔的整数,表示每次丢掉的牌的编号)
"""
@param string line 一个测试用例
@return string 处理后的结果
"""
def solution(line):
cards = [str(x + 1) for x in range(int(line))]
res = []
while len(cards) > 2:
res += [cards.pop(0)]
cards.append(cards.pop(0))
return " ".join(res + cards)
print(solution(8))
| 22.148148 | 66 | 0.583612 | 71 | 598 | 4.915493 | 0.71831 | 0.045845 | 0.051576 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041096 | 0.267559 | 598 | 26 | 67 | 23 | 0.755708 | 0.571906 | 0 | 0 | 0 | 0 | 0.004132 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.25 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98ae3f1be8211f99c7d8b22fd98d93670a9d293c | 10,626 | py | Python | scripts/gis_append.py | mwb249/connect-dispatch | c6f17c088bce639daaa9fd1ab04f3edd15dc1887 | [
"MIT"
] | null | null | null | scripts/gis_append.py | mwb249/connect-dispatch | c6f17c088bce639daaa9fd1ab04f3edd15dc1887 | [
"MIT"
] | null | null | null | scripts/gis_append.py | mwb249/connect-dispatch | c6f17c088bce639daaa9fd1ab04f3edd15dc1887 | [
"MIT"
] | null | null | null | """
Connect|DISPATCH: Connecting Computer-Aided Dispatch (CAD) Systems to ArcGIS.
The gis_append script is activated when incident_push.p is modified in the watch directory.
"""
from connectdispatch import fire_ops
import logging
import os
import yaml
import pickle
import csv
import mgrs
from datetime import datetime
from arcgis.gis import GIS
from arcgis.geocoding import Geocoder, geocode
from arcgis.geometry import filters, Point
from pyproj import Proj, transform
from copy import deepcopy
# Logging
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
# Directories
cwd = os.getcwd()
watch_dir = cwd + '/watch'
config_dir = cwd + '/config'
# Open config file
with open(config_dir + '/config.yml', 'r') as yamlfile:
cfg = yaml.load(yamlfile)
# Open incident push file
file_incident_push = open(watch_dir + '/incident_push/incident_push.p', 'rb')
incident_push = pickle.load(file_incident_push)
# Open agency codes table
file_agency_codes = csv.DictReader(open(config_dir + '/agency_codes.csv'))
agency_codes = {rows['agency_code']: rows['city_desc'] for rows in file_agency_codes}
# Define projections
mi_south = Proj(init='epsg:3593', preserve_units=True) # NAD 1983 StatePlane Michigan South, FIPS 2113 IntlFeet
web_mercator = Proj(init='epsg:3857') # WGS 1984 Web Mercator Auxiliary Sphere
wgs84 = Proj(init='epsg:4326') # WGS84
for i in incident_push:
for agency in cfg['agencies'].keys():
if i['agency_code'] == cfg['agencies'][agency]['agency_code']:
# Assign variable to web GIS
ago_portal = cfg['agencies'][agency]['ago_portal']
ago_user = cfg['agencies'][agency]['ago_user']
ago_pass = cfg['agencies'][agency]['ago_pass']
gis = GIS(ago_portal, ago_user, ago_pass)
# Assign variable to feature layers
fl_fireincidents = gis.content.get(cfg['agencies'][agency]['flc_fireincidents']).layers[0]
fl_serviceareas = gis.content.get(cfg['agencies'][agency]['flc_serviceareas']).layers[0]
fl_firedistricts = gis.content.get(cfg['agencies'][agency]['flc_firedistricts']).layers[0]
fl_boxalarmareas = gis.content.get(cfg['agencies'][agency]['flc_boxalarmareas']).layers[0]
fl_taxparcels = gis.content.get(cfg['agencies'][agency]['flc_taxparcels']).layers[0]
# If incident is mutual-aid, find agency_code
mutaid_agency_code = ''
if i['inc_type_code'] == 'MUTAID':
if i['city_desc'] == agency_codes['city_desc']:
mutaid_agency_code = agency_codes['agency_code']
else:
mutaid_agency_code = i['agency_code']
# Query feature layer and create search extent dictionary
if i['inc_type_code'] == 'MUTAID':
where_statement = i['agency_code'] + " LIKE '" + mutaid_agency_code + "'"
else:
where_statement = i['agency_code'] + " LIKE '" + i['agency_code'] + "'"
f_servicearea = fl_serviceareas.query(where=where_statement).features[0]
sa_xmax = f_servicearea.attributes['xmax_fips2113_ftintl']
sa_xmin = f_servicearea.attributes['xmin_fips2113_ftintl']
sa_ymax = f_servicearea.attributes['ymax_fips2113_ftintl']
sa_ymin = f_servicearea.attributes['ymin_fips2113_ftintl']
search_area = {'xmax': sa_xmax, 'xmin': sa_xmin, 'ymax': sa_ymax, 'ymin': sa_ymin}
# Determine geocoder, default is the 'World Geocoder for ArcGIS'
if cfg['agencies'][agency]['use_oc_geocoder']:
geocoder = Geocoder(cfg['geocoders']['oc_geocoder'])
else:
geocoder = None
# Geocode address
geocode_result = geocode(i['address'], search_extent=search_area, geocoder=geocoder)
if not geocode_result:
# If geocode fails use the agency default location
geocode_result = geocode(cfg['agencies'][agency]['default_location'], geocoder=geocoder)
geocode_success = 'N'
else:
geocode_success = 'Y'
pass
# Transform coordinates
x, y = transform(mi_south, web_mercator, geocode_result[0]['location']['x'],
geocode_result[0]['location']['y'])
long, lat = transform(mi_south, wgs84, geocode_result[0]['location']['x'],
geocode_result[0]['location']['y'])
# Round Lat/Long
lat = round(lat, 6)
long = round(long, 6)
# Convert Lat/Long to USNG
m = mgrs.MGRS()
usng_raw = m.toMGRS(lat, long)
u = str(usng_raw.decode('utf-8'))
usng = u[0:3] + ' ' + u[3:5] + ' ' + u[5:10] + ' ' + u[10:15]
# Construct point feature
geocode_xy = Point({'x': x, 'y': y})
# Feature layer query to find box alarm areas
fset_boxalarmareas = fl_boxalarmareas.query(geometry_filter=filters.intersects(geocode_xy))
# Assign box alarm variables
boxalarm_fire = None
boxalarm_medical = None
boxalarm_wildland = None
# Loop to populate Box Alarm Variables
for boxalarmarea in fset_boxalarmareas:
if boxalarmarea.attributes['BoxAlarmType'] == 'FIRE':
boxalarm_fire = boxalarmarea.attributes['BoxAlarmNumber']
elif boxalarmarea.attributes['BoxAlarmType'] == 'MEDICAL':
boxalarm_medical = boxalarmarea.attributes['BoxAlarmNumber']
elif boxalarmarea.attributes['BoxAlarmType'] == 'WILDLAND':
boxalarm_wildland = boxalarmarea.attributes['BoxAlarmNumber']
# Determine agency district
fset_firedistricts = fl_firedistricts.query(geometry_filter=filters.intersects(geocode_xy),
return_geometry=False)
agency_district = None
if fset_firedistricts:
agency_district = fset_firedistricts.features[0].attributes['primarystation']
else:
pass
# Determine shift on duty at time of call
pattern_start = datetime.strptime(cfg['agencies'][agency]['shift_start_date'], '%m-%d-%Y')
shift_start = cfg['agencies'][agency]['shift_start_time']
agency_shift = fire_ops.kelly_shift(i['datetime_call'], pattern_start, shift_start)
# Query tax parcel layer to get structure data
fset_taxparcels = fl_taxparcels.query(where="SITEADDRESS LIKE '" + i['address'] + "'",
return_geometry=False, result_record_count=1)
parcel_id = None
map_index = None
structure_desc = None
structure_livingarea = None
structure_numbbeds = None
structure_assessvalue = None
structure_taxvalue = None
if fset_taxparcels and geocode_success == 'Y':
parcel_id = fset_taxparcels.features[0].attributes['PIN']
map_index = fset_taxparcels.features[0].attributes['PIN'][2:4]
structure_desc = fset_taxparcels.features[0].attributes['STRUCTURE_DESC']
structure_livingarea = fset_taxparcels.features[0].attributes['LIVING_AREA_SQFT']
structure_numbbeds = fset_taxparcels.features[0].attributes['NUM_BEDS']
structure_assessvalue = fset_taxparcels.features[0].attributes['ASSESSEDVALUE']
structure_taxvalue = fset_taxparcels.features[0].attributes['TAXABLEVALUE']
else:
pass
# Create new feature based on template
fset_fireincidents = fl_fireincidents.query(result_record_count=1)
f = deepcopy(fset_fireincidents.features[0])
# Assign geometry & attributes to new feature
f.geometry = geocode_xy
f.attributes['incident_number'] = i['incident_number']
f.attributes['incident_type_code'] = i['incident_type_code']
f.attributes['incident_type_desc'] = i['incident_type_desc']
f.attributes['incident_temp_url'] = i['incident_temp_url']
f.attributes['agency_code'] = i['agency_code']
f.attributes['agency_district'] = agency_district
f.attributes['agency_shift'] = agency_shift
f.attributes['parcel_id'] = parcel_id
f.attributes['address'] = i['address']
f.attributes['location'] = i['location']
f.attributes['apt_number'] = i['apt_number']
f.attributes['city_code'] = i['city_code']
f.attributes['city_desc'] = i['city_desc']
f.attributes['state'] = ['state']
f.attributes['map_index'] = map_index
f.attributes['latitude'] = lat
f.attributes['longitude'] = long
f.attributes['usng'] = usng
f.attributes['low_street'] = i['low_street']
f.attributes['high_street'] = i['high_street']
f.attributes['geocode_success'] = geocode_success
f.attributes['datetime_call'] = i['datetime_call']
f.attributes['datetime_dispatched'] = None
f.attributes['datetime_enroute'] = None
f.attributes['datetime_arrival'] = None
f.attributes['datetime_clear'] = None
f.attributes['units_assigned'] = i['incident_units']
f.attributes['chief_complaint'] = i['chief_complaint']
f.attributes['proqa_code'] = i['proqa_code']
f.attributes['proqa_suffix_code'] = i['proqa_code_suf']
f.attributes['proqa_desc'] = i['proqa_desc']
f.attributes['proqa_suffix_desc'] = i['proqa_desc_suf']
f.attributes['boxalarm_fire'] = boxalarm_fire
f.attributes['boxalarm_medical'] = boxalarm_medical
f.attributes['boxalarm_wildland'] = boxalarm_wildland
f.attributes['structure_desc'] = structure_desc
f.attributes['structure_livingarea'] = structure_livingarea
f.attributes['structure_numbbeds'] = structure_numbbeds
f.attributes['structure_assessvalue'] = structure_assessvalue
f.attributes['structure_taxvalue'] = structure_taxvalue
# Create empty list for new GIS features
feature_list = [f]
# Add features to feature layer
fl_fireincidents.edit_features(adds=feature_list)
| 48.081448 | 112 | 0.616413 | 1,175 | 10,626 | 5.348936 | 0.233191 | 0.070008 | 0.035163 | 0.025617 | 0.147335 | 0.105807 | 0.078759 | 0.014638 | 0.014638 | 0.014638 | 0 | 0.010733 | 0.272257 | 10,626 | 220 | 113 | 48.3 | 0.802017 | 0.105308 | 0 | 0.080745 | 0 | 0 | 0.182577 | 0.005385 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.031056 | 0.080745 | 0 | 0.080745 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98b2b467fe732e9f25c6541b5a2df8198a18a396 | 1,959 | py | Python | api_reflector/api.py | backwardspy/api-reflector | 5feacf7d7d549418b31acbf9407602a7659e431d | [
"MIT"
] | 4 | 2021-09-24T13:58:37.000Z | 2022-03-01T22:19:41.000Z | api_reflector/api.py | backwardspy/api-reflector | 5feacf7d7d549418b31acbf9407602a7659e431d | [
"MIT"
] | 10 | 2021-09-21T16:28:06.000Z | 2022-03-25T16:48:36.000Z | api_reflector/api.py | backwardspy/api-reflector | 5feacf7d7d549418b31acbf9407602a7659e431d | [
"MIT"
] | 2 | 2021-09-13T13:00:33.000Z | 2022-02-24T14:55:46.000Z | """
Provides the top level flask application configuration.
"""
import sentry_sdk
from flask import Flask
from flask_dance.contrib.azure import make_azure_blueprint
from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration
from werkzeug.middleware.proxy_fix import ProxyFix
from api_reflector import db
from api_reflector.admin import admin
from api_reflector.migrations import run_migrations
from api_reflector.reporting import get_logger
from api_reflector.views import api
from settings import settings
log = get_logger(__name__)
def create_app() -> Flask:
"""
Creates a flask application and registers the api blueprint.
"""
if settings.sentry_dsn:
log.debug("Initialising Sentry SDK.")
sentry_sdk.init( # pylint: disable=abstract-class-instantiated
dsn=settings.sentry_dsn,
integrations=[FlaskIntegration(), SqlalchemyIntegration()],
)
log.debug("Initializing app.")
app = Flask(__name__)
app.wsgi_app = ProxyFix( # type: ignore
app.wsgi_app,
x_proto=int(settings.use_x_forwarded_proto),
x_host=int(settings.use_x_forwarded_host),
)
app.config.update(
SECRET_KEY=settings.secret_key,
SQLALCHEMY_DATABASE_URI=settings.postgres_dsn,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
FLASK_ADMIN_SWATCH="darkly",
)
if settings.azure_auth_enabled:
azure_blueprint = make_azure_blueprint(
client_id=settings.azure_client_id,
client_secret=settings.azure_client_secret,
tenant=settings.azure_tenant,
redirect_url="/",
)
app.register_blueprint(azure_blueprint)
db.sqla.init_app(app)
admin.init_app(app)
app.register_blueprint(api)
log.debug("Migrating database.")
run_migrations.main()
log.debug("App initialisation complete.")
return app
| 28.391304 | 71 | 0.721797 | 234 | 1,959 | 5.764957 | 0.388889 | 0.033358 | 0.059303 | 0.037064 | 0.035582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.200613 | 1,959 | 68 | 72 | 28.808824 | 0.86143 | 0.088821 | 0 | 0 | 0 | 0 | 0.053947 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0 | 0.25 | 0 | 0.291667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98b51bf927ecb8711b46f44c5fe353e2b5cb0870 | 16,590 | py | Python | app_common/apptools/io/deserializer.py | KBIbiopharma/app_common | bd913e24741fb070aad058a0f90cbb2c64d8b106 | [
"MIT"
] | 2 | 2020-02-12T17:51:13.000Z | 2021-05-03T05:36:15.000Z | app_common/apptools/io/deserializer.py | KBIbiopharma/app_common | bd913e24741fb070aad058a0f90cbb2c64d8b106 | [
"MIT"
] | 30 | 2020-02-04T21:38:58.000Z | 2021-05-25T20:55:01.000Z | app_common/apptools/io/deserializer.py | KBIbiopharma/app_common | bd913e24741fb070aad058a0f90cbb2c64d8b106 | [
"MIT"
] | null | null | null | """ Module containing the deserializer api function and supporting classes.
The deserializers are versioned and the ones in this module correspond to the
latest protocol. Old versions of deserializers that were updated are stored in
the legacy_deserializers dict.
"""
import logging
from six import string_types
from traits.api import HasStrictTraits, Type
from .serialization_utils import sanitize_class_name
logger = logging.getLogger(__name__)
class NoDeserializerFoundError(ValueError):
pass
class deSerializer(HasStrictTraits):
""" Base class for all other deserialization related classes.
Contains the generic dynamic methods for deserializing some data back into
a class instance.
"""
#: Default version of the deserializer
protocol_version = 0
#: Class attr with all ndarrays mapped to their id in the serial data.
array_collection = {}
#: Dict mapping ids (set at serialization) to instances already
#: deserialized, in case the same object needs to be pointed to.
instance_collection = {}
#: Class level attribute to lookup available deserializers (same protocole)
deserializer_map = {}
#: Map between (object type, old_protocole) and deserializers classes
legacy_deserializers = {}
#: Whether at least 1 legacy deserializer has been used
legacy_file = False
@classmethod
def set_array_collection(cls, array_collection):
# Modify the dict in place to make sure all subclasses share the same
# container:
cls.array_collection.clear()
cls.array_collection.update(array_collection)
@classmethod
def instance_lookup(cls, obj_id):
""" Lookup if an object with provided id has been deserialized already.
Parameters
----------
obj_id : any immutable type
Id, assigned at serialization, of the object to deserialize. This
id is expected to be found as one of the keys of the
instance_collection class attribute.
Returns
-------
The instance of the object if the id has been encountered, and stored
in the instance_collection class attribute. Returns None otherwise.
"""
obj = None
if cls.instance_collection is None:
cls.instance_collection = {}
if obj_id in cls.instance_collection:
obj = cls.instance_collection[obj_id]
return obj
def deserialize(self, serial_data):
""" Actual deserialization of the serialized data.
Parameters
----------
serial_data : dictionary
serial_data is a dictionary with the following items.
'class_metadata' : Information about the class to be instantiated
'data' : Contains arguments required for instantiating the class
Returns
-------
A new class instance for the serial_data.
"""
deserializer = self.select_deserializer(serial_data)
obj = deserializer.build_object(serial_data)
return obj
def select_deserializer(self, serial_data):
""" Returns the deserializer class appropriate for the provided serial
data. Raises a ValueError if none is found.
Parameters
----------
serial_data : dictionary or basic type
serial_data either a basic object that can be instanciated
automatically or a dictionary with the following items:
'class_metadata' : Information about the class to be instantiated
'data' : Contains arguments required for instantiating the class
"""
if isinstance(serial_data, dict) and 'class_metadata' in serial_data:
klass = sanitize_class_name(serial_data['class_metadata']['type'])
written_version = serial_data["class_metadata"]["version"]
else:
# Basic type: no metadata, they are stored as themselves
written_version = None
klass = sanitize_class_name(serial_data.__class__.__name__)
try:
default_deserializer = self.deserializer_map[klass+'DeSerializer']
except (KeyError, AttributeError) as e:
msg = "No active deserializer was found for class {}. Now " \
"searching in the legacy deserializers... (Error was {}.)"
logger.warning(msg.format(klass, e))
default_deserializer = None
use_default = False
current_version = None
else:
current_version = default_deserializer.protocol_version
use_default = (written_version is None or
current_version == written_version)
if use_default:
deserializer = default_deserializer
elif current_version is not None and current_version < written_version:
msg = ("Trying to load a {} version {} but the most recent "
"deserializer available is version {}. This file was "
"created with a newer version of the software. It will need"
"to be updated to be able to load this file.")
msg = msg.format(klass, written_version, current_version)
logger.exception(msg)
raise NoDeserializerFoundError(msg)
else:
# Search for a deserializer support the version 'written_version'
old_deserializer_dict = self.legacy_deserializers.get(klass, None)
if old_deserializer_dict is None:
msg = "Unable to find a legacy deserializer for a {}.".format(
klass)
logger.exception(msg)
raise NoDeserializerFoundError(msg)
deSerializer.legacy_file = True
deserializer = old_deserializer_dict.get(written_version, None)
if deserializer is None:
versions = sorted(old_deserializer_dict.keys())
msg = "Unable to find a deserializer for a {} version {}. " \
"Available versions are {}."
msg = msg.format(klass, written_version, versions)
logger.exception(msg)
raise NoDeserializerFoundError(msg)
return deserializer()
def build_object(self, serial_data):
""" Recreate class objects from the serialized data.
Deserialize all arguments to the class constructor for the target data
type, and build the instance.
Parameters
----------
serial_data : dictionary
serial_data is a dictionary with the following items.
'class_metadata' : Information about the class to be instantiated
'data' : Contains arguments required for instantiating the class
Returns
------
A class instance for the serial_data, whether from the instance_lookup
or a newly created one if the id has never been encountered.
"""
data = serial_data.pop('data', None)
metadata = serial_data.pop('class_metadata', None)
obj_id = metadata['id']
constructor_data = {'metadata': metadata}
metadata_name = None
if data is not None:
data = self.deserialize(data)
constructor_data['args'] = data
if serial_data:
instance_data = self.deserialize(serial_data)
if instance_data.get('metadata'):
metadata_name = instance_data['metadata'].get('name')
constructor_data['kwargs'] = instance_data
if obj_id is None:
instance = self.get_instance(constructor_data)
else:
# For objects that were saved with a unique "id", lookup
# if that object has already been built
instance = self.instance_lookup(obj_id)
if instance is None:
instance = self.get_instance(constructor_data)
self.instance_collection[obj_id] = instance
# FIXME: Instance metadata name is overwritten with the instance.name
# in the ChromatographyData __init__ so reassign name here. Better way
# to do it?
if metadata_name:
instance.metadata['name'] = metadata_name
return instance
class simpleObjDeSerializer(deSerializer):
klass = Type
def get_instance(self, constructor_data):
instance = self.klass(**constructor_data['kwargs'])
return instance
class dataElementDeSerializer(simpleObjDeSerializer):
def _klass_default(self):
from app_common.model_tools.data_element import DataElement
return DataElement
class basicTypeDeSerializer(deSerializer):
def build_object(self, serial_data):
# Convert unicode to string ... required for existing code
if isinstance(serial_data, string_types):
serial_data = str(serial_data)
instance = type(serial_data)(serial_data)
return instance
class boolDeSerializer(basicTypeDeSerializer):
pass
class floatDeSerializer(basicTypeDeSerializer):
pass
class float64DeSerializer(basicTypeDeSerializer):
""" Deserialization for numpy.float64
"""
def build_object(self, serial_data):
instance = float(serial_data)
return instance
class timestampDeSerializer(basicTypeDeSerializer):
""" Deserialization for pandas.tslib.Timestamp.
"""
def build_object(self, serial_data):
from pandas import Timestamp
instance = Timestamp(serial_data['data'])
return instance
class dateDeSerializer(basicTypeDeSerializer):
""" Deserialization for datetime.date.
"""
def build_object(self, serial_data):
from datetime import date
instance = date(*serial_data['data'])
return instance
class intDeSerializer(basicTypeDeSerializer):
pass
class longDeSerializer(basicTypeDeSerializer):
pass
class strDeSerializer(basicTypeDeSerializer):
pass
class unicodeDeSerializer(basicTypeDeSerializer):
pass
class noneTypeDeSerializer(basicTypeDeSerializer):
def build_object(self, serial_data):
return serial_data
class dictDeSerializer(deSerializer):
def build_object(self, serial_data):
deserialized_dict = {}
if 'class_metadata' in serial_data and \
'dict' in serial_data['class_metadata']['type']:
serial_data.pop('class_metadata', None)
for key, val in serial_data.items():
deserialized_dict.update({key: self.deserialize(val)})
return deserialized_dict
class seriesDeSerializer(deSerializer):
def get_instance(self, constructor_data):
from pandas.core.series import Series
instance = Series(constructor_data['args'],
index=constructor_data['kwargs']['index'])
return instance
class dataFrameDeSerializer(deSerializer):
protocol_version = 1
def build_object(self, serial_data):
filename = serial_data['class_metadata']['filename']
df_id = serial_data['class_metadata']['id']
return self.array_collection[(filename, df_id)]
class traitDictObjectDeSerializer(deSerializer):
def build_object(self, serial_data):
# TraitsDict object is deserialized as a regular dict because the
# constructors of HasTraits objects expect dictionaries not
# TraitDictObjects
deserialized_dict = {}
serial_data.pop('class_metadata', None)
for key, val in serial_data.items():
deserialized_dict.update({key: self.deserialize(val)})
return deserialized_dict
class listDeSerializer(deSerializer):
def build_object(self, serial_data):
_list = []
for item in serial_data:
_list.append(self.deserialize(item))
return _list
class setDeSerializer(deSerializer):
def build_object(self, serial_data):
_set = set()
for item in serial_data["data"]:
_set.add(self.deserialize(item))
return _set
class ndarrayDeSerializer(deSerializer):
protocol_version = 1
def build_object(self, serial_data):
filename = serial_data['class_metadata']['filename']
arr_uuid = serial_data['class_metadata']['id']
return self.array_collection[(filename, arr_uuid)]
class smartUnitDeSerializer(deSerializer):
def get_instance(self, constructor_data):
from scimath.units.smart_unit import SmartUnit
instance = SmartUnit(*constructor_data['args'])
return instance
class traitListObjectDeSerializer(deSerializer):
def build_object(self, serial_data):
_list = []
for item in serial_data['data']:
deserializer = self.select_deserializer(item)
_list.append(deserializer.build_object(item))
return _list
class tupleDeSerializer(deSerializer):
def build_object(self, serial_data):
elements = []
for item in serial_data['data']:
deserializer = self.select_deserializer(item)
elements.append(deserializer.build_object(item))
return tuple(elements)
class unitDeSerializer(deSerializer):
def get_instance(self, constructor_data):
import scimath.units.unit
instance = getattr(scimath.units.unit,
constructor_data['metadata']['type'])(
*constructor_data['args'])
# Set the unit.label because the Unit Class assigns
# the label attribute equal to None when the Unit Class is constructed
instance.label = constructor_data['kwargs']['label']
return instance
class unitScalarDeSerializer(deSerializer):
def get_instance(self, constructor_data):
import scimath.units.unit_scalar
instance = getattr(scimath.units.unit_scalar,
constructor_data['metadata']['type'])(
constructor_data['args'],
**constructor_data['kwargs'])
return instance
class unitArrayDeSerializer(unitScalarDeSerializer):
""" Version 1 of the unitArray deserializer.
"""
protocol_version = 1
def build_object(self, serial_data):
from scimath.units.unit_array import UnitArray
filename = serial_data['class_metadata']['filename']
array_id = serial_data['class_metadata']['id']
data = self.array_collection.get((filename, array_id), None)
units = self.deserialize(serial_data["units"])
return UnitArray(data, units=units)
class uUIDDeSerializer(deSerializer):
def get_instance(self, constructor_data):
from uuid import UUID
instance = UUID(constructor_data['args'])
return instance
def deserialize(serial_data, array_collection=None, klass=None,
additional_deserializers=None):
""" Functional entry point to deserialize any serial data.
Note that this function resets the instance_collection class attribute, and
should therefore not be called more than once for each file loading.
Parameters
----------
serial_data : dict
All non-array data to rebuild the object.
array_collection : dict
Dictionary mapping all numpy arrays stored to an id in the serial data.
klass : deSerializer [OPTIONAL]
Implementation of the deserialization class. Can be passed for example
to set the legacy_deserializers dict.
additional_deserializers : dict
Map between object class names and corresponding deserializer object to
use to recreate the instance.
Returns
-------
any, bool
Object being deserialized and whether or not legacy deserializers were
needed.
"""
if klass is None:
klass = deSerializer
if array_collection is None:
array_collection = {}
if additional_deserializers is None:
additional_deserializers = {}
klass.instance_collection.clear()
klass.legacy_file = False
deserializer_map = {}
# Additional serializers added afterwards, to allow projects to override
# the way to serialize basic types:
app_common_content = {key: val for key, val in globals().items()
if key.endswith("DeSerializer")}
deserializer_map.update(app_common_content)
deserializer_map.update(additional_deserializers)
deserializer = klass()
deserializer.deserializer_map.update(deserializer_map)
deserializer.set_array_collection(array_collection)
obj = deserializer.deserialize(serial_data['data'])
return obj, klass.legacy_file
| 34.347826 | 79 | 0.662929 | 1,810 | 16,590 | 5.91326 | 0.190608 | 0.064468 | 0.022237 | 0.025227 | 0.317481 | 0.279174 | 0.20555 | 0.161263 | 0.139307 | 0.139307 | 0 | 0.000819 | 0.264436 | 16,590 | 482 | 80 | 34.419087 | 0.87626 | 0.267752 | 0 | 0.326923 | 0 | 0 | 0.07387 | 0 | 0 | 0 | 0 | 0.002075 | 0 | 1 | 0.103846 | false | 0.026923 | 0.05 | 0.003846 | 0.403846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98b56a5b7675ee876b08c64bf15a49e2ad1f944e | 1,162 | py | Python | index.py | garlfin/RPC-Client-Server | d13dc9988e5582ce8ccb396a5aa1db21fc1041c5 | [
"MIT"
] | null | null | null | index.py | garlfin/RPC-Client-Server | d13dc9988e5582ce8ccb396a5aa1db21fc1041c5 | [
"MIT"
] | null | null | null | index.py | garlfin/RPC-Client-Server | d13dc9988e5582ce8ccb396a5aa1db21fc1041c5 | [
"MIT"
] | null | null | null | import res.server.server as server
import res.client as client
import threading
import os
import res.server.keyboardlistener
address = ("localhost", 8000)
dataPath = os.getcwd()+"/res/database/clients/db.json"
def startServer():
Server = server.RpcServer(dataPath)
Server.initialize(address)
KeyboardListener = res.server.keyboardlistener.KeyboardListener(Server)
threading.Thread(target=KeyboardListener.status, args=[KeyboardListener.DoIListen.listen]).start()
Server.register([Server.retrieveStats, Server.close])
def StartClient(client_name):
try:
client_thread = client.RPCClient(client_name)
client_thread.listen(client.getDomainFromAddress(address, "RPC"))
client_thread.receiveStats()
except ConnectionRefusedError:
print(f"[{str(client_name)}] Machine refused connection.")
currentWorkingThreads = [threading.Thread(target=startServer), threading.Thread(target=StartClient, args=["0442246"]),
threading.Thread(target=StartClient, args=["5561111"])]
for thread in currentWorkingThreads:
thread.start()
for thread in currentWorkingThreads:
thread.join()
| 33.2 | 118 | 0.744406 | 121 | 1,162 | 7.099174 | 0.429752 | 0.069849 | 0.097788 | 0.074505 | 0.172293 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01809 | 0.143718 | 1,162 | 34 | 119 | 34.176471 | 0.845226 | 0 | 0 | 0.076923 | 0 | 0 | 0.08864 | 0.024957 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.192308 | 0 | 0.269231 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98b9e22db3260d4dd56031107290c4fa484ebf3e | 5,016 | py | Python | tools/uavcan_ip_interface.py | greck2908/robot-software | 2e1e8177148a089e8883967375dde7f8ed3d878b | [
"MIT"
] | 40 | 2016-10-04T19:59:22.000Z | 2020-12-25T18:11:35.000Z | tools/uavcan_ip_interface.py | greck2908/robot-software | 2e1e8177148a089e8883967375dde7f8ed3d878b | [
"MIT"
] | 209 | 2016-09-21T21:54:28.000Z | 2022-01-26T07:42:37.000Z | tools/uavcan_ip_interface.py | greck2908/robot-software | 2e1e8177148a089e8883967375dde7f8ed3d878b | [
"MIT"
] | 21 | 2016-11-07T14:40:16.000Z | 2021-11-02T09:53:37.000Z | #!/usr/bin/env python3
"""
UAVCAN to TUN network adapter.
"""
import argparse
import os
import struct
import sys
import fcntl
import uavcan
import subprocess
import time
import logging
from queue import Queue, Empty
import threading
DSDL_DIR = os.path.join(os.path.dirname(__file__), "../uavcan_data_types/cvra")
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--interface",
"-i",
help="CAN Interface to use (e.g. can0 or /dev/ttyUSB0",
required=True,
)
parser.add_argument(
"--ip-address",
"-a",
default="10.0.0.1/24",
help="IP address of this interface (default 10.0.0.1/24)",
)
parser.add_argument(
"--packets-per-second",
type=int,
default=1000,
help="Max number of packet per second to transmit (protects the CAN bus).",
)
parser.add_argument("--dsdl", help="Path to DSDL directory", default=DSDL_DIR)
parser.add_argument(
"--verbose", "-v", action="store_true", help="Enable debug output."
)
return parser.parse_args()
def open_tun_interface(ip_addr):
if sys.platform == "linux":
fd = os.open("/dev/net/tun", os.O_RDWR)
# Values obtained with a test C program
IFF_TAP = 0x2
IFF_NO_PI = 4096
TUNSETIFF = 0x400454CA
# See man netdevice for struct ifreq
val = struct.pack("16sh15x", "uavcan0".encode(), IFF_TAP | IFF_NO_PI)
fcntl.ioctl(fd, TUNSETIFF, val)
subprocess.check_call("ip link set dev uavcan0 up".split())
subprocess.check_call("ip addr add dev uavcan0 {}".format(ip_addr).split())
return fd
elif sys.platform == "darwin": # macOS
tap = "tap0"
fd = os.open("/dev/" + tap, os.O_RDWR)
subprocess.call("ifconfig {} {}".format(tap, ip_addr).split())
return fd
else:
raise RuntimeError("supports mac and linux only")
class RateLimiter:
"""Simple rate limiter.
See https://stackoverflow.com/questions/667508/whats-a-good-rate-limiting-algorithm
"""
def __init__(self, max_rate):
self.max_rate = max_rate
self.quota = max_rate
self.last_time = time.time()
def check(self) -> bool:
"""Checks if we are allowed to proceed based on max rate."""
t = time.time()
dt, self.last_time = t - self.last_time, t
self.quota += self.max_rate * dt
self.quota = min(self.quota, self.max_rate)
# If we don't have quota left, forbid the transaction
if self.quota <= 1.0:
return False
# If we still have quota, take one from it and allow the transaction
self.quota -= 1.0
return True
def rx_thread(tun_fd, queue, max_packet_per_second):
limiter = RateLimiter(max_packet_per_second)
while True:
packet = os.read(tun_fd, 1500)
if limiter.check():
queue.put(packet)
else:
logging.debug("Dropped packet")
def node_thread(tun_fd, node, can_to_tap, tap_to_can):
def msg_callback(event):
msg = event.message
can_to_tap.put(msg.data)
node.add_handler(uavcan.thirdparty.cvra.uwb_beacon.DataPacket, msg_callback)
while True:
# A timeout of 0 means only process frames that are immediately
# available
try:
node.spin(timeout=0)
except uavcan.transport.TransferError:
logging.warning("uavcan exception, ignoring...")
pass
try:
packet = tap_to_can.get(block=False)
except Empty:
continue
# Checks that the packet fits in a UWB frame
assert len(packet) < 1024
# Finally send it over CAN
msg = uavcan.thirdparty.cvra.uwb_beacon.DataPacket()
msg.dst_addr = 0xFFFF # broadcast
msg.data = list(packet)
node.broadcast(msg)
def tx_thread(tun_fd, queue):
while True:
packet = queue.get()
os.write(tun_fd, bytes(packet))
def main():
args = parse_args()
level = logging.INFO
if args.verbose:
level = logging.DEBUG
logging.basicConfig(level=level)
if os.getuid() != 0:
logging.error("must run as root.")
sys.exit(1)
uavcan.load_dsdl(args.dsdl)
tun_fd = open_tun_interface(args.ip_address)
node = uavcan.make_node(args.interface, node_id=42)
tap_to_can = Queue()
can_to_tap = Queue()
logging.info("waiting for packets, press 3x Ctrl-C to stop...")
rx_thd = threading.Thread(
target=rx_thread, args=(tun_fd, tap_to_can, args.packets_per_second)
)
tx_thd = threading.Thread(target=tx_thread, args=(tun_fd, can_to_tap))
node_thd = threading.Thread(
target=node_thread, args=(tun_fd, node, can_to_tap, tap_to_can)
)
rx_thd.start()
tx_thd.start()
node_thd.start()
node_thd.join()
rx_thd.join()
tx_thd.join()
if __name__ == "__main__":
main()
| 26.125 | 87 | 0.621212 | 681 | 5,016 | 4.397944 | 0.372981 | 0.015025 | 0.028381 | 0.02404 | 0.100167 | 0.05409 | 0.044741 | 0.016694 | 0.016694 | 0 | 0 | 0.018443 | 0.264952 | 5,016 | 191 | 88 | 26.26178 | 0.79387 | 0.111842 | 0 | 0.099237 | 0 | 0 | 0.129018 | 0.005659 | 0 | 0 | 0.004301 | 0 | 0.007634 | 1 | 0.068702 | false | 0.007634 | 0.083969 | 0 | 0.198473 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
98ba753b081a3ea187c35cbd90efdcf17178d65c | 840 | py | Python | gpx_builder.py | jvicu2001/GGeoTrace | 7796daaa505586cf7b867e321aa528f5dd64a8c6 | [
"MIT"
] | null | null | null | gpx_builder.py | jvicu2001/GGeoTrace | 7796daaa505586cf7b867e321aa528f5dd64a8c6 | [
"MIT"
] | null | null | null | gpx_builder.py | jvicu2001/GGeoTrace | 7796daaa505586cf7b867e321aa528f5dd64a8c6 | [
"MIT"
] | null | null | null | from xml.etree import ElementTree
def gpx_builder(data):
gpx = ElementTree.Element('gpx')
tree = ElementTree.ElementTree(gpx)
gpx.set('version', '1.0')
gpxname = ElementTree.SubElement(gpx, 'name')
gpxname.text = 'PLACEHOLDER'
rte = ElementTree.SubElement(gpx, 'rte')
for routepoint in range(len(data)):
rtept = ElementTree.SubElement(rte, 'rtept')
current_jump = data[routepoint]
rtept.set('lat', current_jump['latitude'])
rtept.set('lon', current_jump['longitude'])
rtept_name = ElementTree.SubElement(rtept, 'name')
rtept_name.text = 'Jump n° {}'.format(routepoint)
rtept_desc = ElementTree.SubElement(rtept, 'desc')
rtept_desc.text = '{}\nip: {}\nTime: PLACEHOLDER'.format(data[routepoint]['ptr'], data[routepoint]['ip'])
return tree
| 33.6 | 113 | 0.653571 | 97 | 840 | 5.587629 | 0.43299 | 0.193727 | 0.088561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002954 | 0.194048 | 840 | 24 | 114 | 35 | 0.79616 | 0 | 0 | 0 | 0 | 0 | 0.132143 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f268c8fd92f2096679b4eb0bada219d945e28f0 | 1,428 | py | Python | python/recognize.py | qcumba0/voicekit-examples | 43eaf1800674ac82d7211dfea1ca0fb85affd6d1 | [
"Apache-2.0"
] | null | null | null | python/recognize.py | qcumba0/voicekit-examples | 43eaf1800674ac82d7211dfea1ca0fb85affd6d1 | [
"Apache-2.0"
] | null | null | null | python/recognize.py | qcumba0/voicekit-examples | 43eaf1800674ac82d7211dfea1ca0fb85affd6d1 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
from tinkoff.cloud.stt.v1 import stt_pb2_grpc
from auth import authorization_metadata
from audio import audio_open_read
from common import build_recognition_request, make_channel, print_recognition_response, BaseRecognitionParser
from google.protobuf.json_format import MessageToDict
from tinkoff.cloud.stt.v1 import stt_pb2
def main():
args = BaseRecognitionParser().parse_args()
total = ''
if args.encoding == stt_pb2.RAW_OPUS:
raise ValueError("RAW_OPUS encoding is not supported by this script")
with audio_open_read(args.audio_file, args.encoding, args.rate, args.num_channels, args.chunk_size,
args.pyaudio_max_seconds) as reader:
stub = stt_pb2_grpc.SpeechToTextStub(make_channel(args))
metadata = authorization_metadata(args.api_key, args.secret_key, "tinkoff.cloud.stt")
response = stub.Recognize(build_recognition_request(args, reader), metadata=metadata)
if not isinstance(response, dict):
# https://developers.google.com/protocol-buffers/docs/proto3#json
response = MessageToDict(response, including_default_value_fields=True, preserving_proto_field_name=True)
for result in response["results"]:
for alternative in result["alternatives"]:
total = total + alternative["transcript"]
print(total)
if __name__ == "__main__":
main()
| 42 | 117 | 0.72619 | 176 | 1,428 | 5.636364 | 0.522727 | 0.024194 | 0.045363 | 0.038306 | 0.066532 | 0.066532 | 0.066532 | 0.066532 | 0 | 0 | 0 | 0.006891 | 0.186975 | 1,428 | 33 | 118 | 43.272727 | 0.847545 | 0.059524 | 0 | 0 | 0 | 0 | 0.076923 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.25 | 0 | 0.291667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f281eff9319b1d52f1e904c4e69206d883b8adc | 3,872 | py | Python | coral/reglib.py | kdberends/coral | 637b1a4756f09841b71da7520754fc15d7fac72c | [
"MIT"
] | null | null | null | coral/reglib.py | kdberends/coral | 637b1a4756f09841b71da7520754fc15d7fac72c | [
"MIT"
] | 1 | 2019-04-15T09:52:59.000Z | 2019-04-15T09:52:59.000Z | coral/reglib.py | kdberends/coral | 637b1a4756f09841b71da7520754fc15d7fac72c | [
"MIT"
] | 1 | 2019-08-17T13:36:02.000Z | 2019-08-17T13:36:02.000Z | """ Regression library """
# =============================================================================
# Imports
# =============================================================================
import theano
import numpy as np
import pymc3 as pm
import random
from coral.statsfunc import get_empirical_cdf, empirical_ppf
import matplotlib.pyplot as plt
# =============================================================================
# Regression functions
# =============================================================================
def general_linear(predictor_sample, response_sample, predictor, params):
"""
General linear model of the form y=ax + b + e
"""
X = predictor_sample
y = response_sample
X_new = predictor
with pm.Model():
sigma = pm.HalfCauchy('sigma', beta=10, testval=1.)
intercept = pm.Normal('Intercept', 0, sd=20)
slope_coeff = pm.Normal('Slope', 0, sd=20)
# Define likelihood
likelihood = pm.Normal('y', mu=intercept + slope_coeff * X,
sd=sigma, observed=y)
# Draw samples using NUTS sampler
trace = pm.sample(draws=params.draws, chains=params.chains, cores=params.cores, tune=params.burn_in)
# After burn-in MCMC should sample from the poster predictive
a = trace.get_values('Intercept', burn=params.burn_in, combine=True) # trace['Intercept'][params.burn_in:]
b = trace.get_values('Slope', burn=params.burn_in, combine=True) # trace['Slope'][params.burn_in:]
sigma = trace.get_values('sigma', burn=params.burn_in, combine=True) # trace['sigma'][params.burn_in:]
a, b, sigma = map(np.array, [a, b, sigma])
response_modelled = list()
for i in range(len(a)):
response_modelled.append(a[i] + b[i] * X_new + np.random.normal(loc=0, scale=sigma[i], size=len(X_new)))
# summarize trace for 95, 89, 80, 50, 20 and 10 % ci
prob_x = np.array([2.5, 5, 10, 25, 40, 45, 55, 60, 75, 90, 95, 97.5])/100
def sumtrace(data):
p, val = get_empirical_cdf(data)
return {'p':prob_x.tolist(), 'val': np.interp(prob_x, p, val).tolist()}
trace_summary = {'intercept':a[::10].tolist(), 'slope':b[::10].tolist(), 'sigma':sigma[::10].tolist()}
return np.array(response_modelled), trace_summary
def gaussian_process(predictor_sample, response_sample, predictor, params):
"""
Args:
predictor_sample : subsample of predictor
response_sample : subsample of response
predictor: full sample of predictor
params: ParameterContainer object
Returns:
modelled response,
inference trace (if mcmc)
inference estimates (if map)
"""
X = predictor_sample[:, None]
y = response_sample
X_new = predictor[:, None]
with pm.Model() as model:
# length scale factor
L = pm.Gamma("L", alpha=2, beta=1)
# Covariance scale factor
eta = pm.HalfCauchy("eta", beta=5)
# todo: set by parameters?
kernel = 'radialbasis'
if kernel == 'matern':
# Matern kernel
cov = eta**2 * pm.gp.cov.Matern52(1, L)
elif kernel == 'radialbasis':
# Radial basis kernel
cov = eta**2 * pm.gp.cov.ExpQuad(1, L)
gp = pm.gp.Marginal(cov_func=cov)
sigma = pm.HalfCauchy("sigma", beta=15)
y_ = gp.marginal_likelihood("y", X=X, y=y, noise=sigma)
if params.inference == 'map':
mp = pm.find_MAP()
elif params.inference == 'mcmc':
mp = pm.sample(10000)
with model:
y_pred = gp.conditional("y_pred", X_new, pred_noise=True)
response_modelled = pm.sample_ppc([mp],
vars=[y_pred],
samples=params.ppc_draws)
return response_modelled['y_pred'], mp
| 33.669565 | 112 | 0.557593 | 473 | 3,872 | 4.448203 | 0.340381 | 0.022814 | 0.039924 | 0.022814 | 0.157795 | 0.13308 | 0.064639 | 0 | 0 | 0 | 0 | 0.024573 | 0.243285 | 3,872 | 114 | 113 | 33.964912 | 0.693515 | 0.26188 | 0 | 0.036364 | 0 | 0 | 0.042929 | 0 | 0 | 0 | 0 | 0.008772 | 0 | 1 | 0.054545 | false | 0 | 0.109091 | 0 | 0.218182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f2837056134a8675c72391b0e3336ba0c0c4211 | 26,367 | py | Python | pysit/optimization/optimization.py | zfang-slim/PysitForPython3 | dc60537b26018e28d92b7a956a2cf96775f0bdf9 | [
"BSD-3-Clause"
] | null | null | null | pysit/optimization/optimization.py | zfang-slim/PysitForPython3 | dc60537b26018e28d92b7a956a2cf96775f0bdf9 | [
"BSD-3-Clause"
] | null | null | null | pysit/optimization/optimization.py | zfang-slim/PysitForPython3 | dc60537b26018e28d92b7a956a2cf96775f0bdf9 | [
"BSD-3-Clause"
] | 1 | 2020-06-13T07:13:07.000Z | 2020-06-13T07:13:07.000Z |
import sys
import time
import copy
import numpy as np
import scipy.io as sio
__all__=['OptimizationBase']
__docformat__ = "restructuredtext en"
class OptimizationBase(object):
""" Base class for descent-like optimization routines.
These are stateful algorithms. The current step, as well as the step index
are stored so that (in the future) further steps can be taken without
repeating computational effort.
The basic structure of a pysit descent algorithm is focused on three
computational phases: computation of the residual, the gradient of the
objective function, and selection of a step direction based on this
information. The rest of this class specifies the layout of these methods,
which will be useful in nearly all descent algorithms. A subclass,
GradientDescent, will implement basic versions of these routines. Other
algorithms should inherit from there to prevent excess code rewriting.
A separate function method is provided for each of the three basic phases to
allow for overriding of the behavior of each. The residual and gradient
computation are least likely to be changed, but the step selection may be
changed frequently. For example, a gradient descent algorithm might
implement an adjustment that performs a line search along the gradient. An
implementation of Newton's method would simply override the adjust method to
solve the Hessian equation. Algorithms like CG or BFGS can also be
implemented in this manner.
Attributes
----------
solver : pysit wave solver object
A wave solver that inherits from pysit.solvers.WaveSolverBase
ivnersion_methods : class
A class containing all of the methods required to compute the inversion steps.
verbose : bool
Verbosity flag.
xi : solver.WaveSolverParameters
Current state of the unknowns.
i : float
Current iteration index.
<blank>_history : list of tuples
History of property (e.g., step length) with entries like the tuple (i, step_length_i) for index i.
<blank>_frequency : int
Iteration frequency at which to store a particular property.
"""
def __init__(self, objective):
"""Constructor for the BasicDescentAlgorithm class.
Parameters
----------
solver : pysit wave solver object
A wave solver that inherits from pysit.solvers.WaveSolverBase
inversion_methods : class
A class containing all of the methods required to compute the inversion steps.
Notes
-----
* InversionMethodsType is a data type that takes a wave solver object
as a construction argument. The collection of inversion methods will
depend on the solver.
* InversionMethodsType must have member functions that implement the
basic wave imaging procedures, e.g., forward modeling,
adjoint modeling, demigration, etc.
"""
self.objective_function = objective
self.solver = objective.solver
self.verbose = False
self.use_parallel = objective.use_parallel()
self.max_linesearch_iterations = 10
self.logfile = sys.stdout
self.proj_op = None
self.write = False
def reset(self,
append_mode,
value_frequency=0,
gradient_frequency=0,
gradient_length_frequency=0,
step_frequency=0,
step_length_frequency=0,
residual_length_frequency=0,
objective_frequency=0,
run_time_frequency=0,
alpha_frequency=1,
*args, **kwargs):
"""Resets the state of the optimization algorithm.
Parameters
----------
value_frequency : int
Iteration frequency that the value of the solution should be stored.
gradient_frequency : int
Iteration frequency that the gradient vector should be stored.
step_frequency : int
Iteration frequency that the step vector and step length should be stored.
objective_frequency : int
Iteration frequency that the value of the objective function should be stored.
"""
# if we are not appending reset things
# if things have not been set yet, reset things
if not append_mode or not hasattr(self, 'iteration'):
self.base_model = self.solver.ModelParameters(self.solver.mesh)
self.iteration = 0
# Reset the history lists
self.init_history("value", value_frequency)
self.init_history("gradient", gradient_frequency)
self.init_history("gradient_length", gradient_length_frequency)
self.init_history("step", step_frequency)
self.init_history("step_length", step_length_frequency)
self.init_history("residual_length", residual_length_frequency) # Requires self.residual_norm to be implemented by residual class if L2 is not appropriate
self.init_history("objective", objective_frequency)
self.init_history("run_time", run_time_frequency)
# All methods line search somehow
self.init_history("alpha", alpha_frequency)
def init_history(self, arg, freq):
"""Initializes a history variable.
Creates or overwrites an object attribute named arg_history and
arg_frequency dynamically. This allows for storing properties of some
descent algorithms that may not be relevant or exist in others.
Parameters
----------
arg : string
String prefix for naming the history and frequency variables.
freq : int
Frequency for storing the associated arg.
"""
setattr(self, arg + "_history", {})
setattr(self, arg + "_frequency", freq)
def query_store_history(self, arg, i):
f = getattr(self, arg + "_frequency")
# Only store the history if this index matches the frequency.
return f and (not np.mod(i,f))
def store_history(self, arg, i, val, force=False):
"""Stores a data point for a history variable.
To prevent repeated checks to see if a current iteration requires
history storage, this function both checks to see if data should be
stored and actually stores it.
Parameters
----------
arg : string
String prefix for naming the history and frequency variables.
i : int
Index of the current iteration.
val : arbitrary
Value to be stored.
force : boolean
Force storage anyway.
"""
# only processor 0 should store anything
if self.use_parallel and (self.objective_function.parallel_wrap_shot.rank != 0):
return
f = getattr(self, arg + "_frequency")
# Only store the history if this index matches the frequency.
if f and (force or not np.mod(i,f)):
loc = getattr(self, arg + "_history")
# if not loc.has_key(i):
# loc[i] = []
# # Always make a copy of things that are stored
# loc[i].append(copy.deepcopy(val))
if i not in loc:
loc[i] = None
# Always make a copy of things that are stored
loc[i] = copy.deepcopy(val)
def retrieve_history(self, arg):
"""Convenience routine for extracting a given history.
Parameters
----------
arg : string
String prefix for naming the history and frequency variables.
Returns
-------
iters, data : list of int, list of type(data)
If the history has been stored.
None, None
Otherwise
"""
f = getattr(self, arg + "_frequency")
# Only store the history if this index matches the frequency.
if f:
hist = getattr(self, arg + "_history")
return list(zip(*sorted(hist.items())))
else:
return None, None
def _print(self, *args):
# only processor 0 should store anything
if self.use_parallel and (self.objective_function.parallel_wrap_shot.rank != 0):
return
if self.verbose:
print(*args, file=self.logfile, flush=True)
# #### Actual optimization stuff below...
def initialize(self, initial_value, **kwargs):
"""Handle any optimization loop initialization and verify any preconditions.
Parameters
----------
initial_value : solver.ModelData
Starting guess.
"""
# Generally, there will be an initial value, but just in case...
self.base_model = copy.deepcopy(initial_value)
self.solver.model_parameters = self.base_model
def set_linesearch_configuration(self,
geom_fac=0.5,
geom_fac_up=0.7,
Wolfe_c1=0.1, # 1e-4
Wolfe_c2=0.9,
Wolfe_fac_up=1.5,
goldstein_c=1e-4,
fp_comp=1e-6):
"""Set up configurations for linesearch
Parameters:
geom_fac: factor to reduce the search step size
geom_fac_up: factor to increase the search step size
goldstein_c: the c parameter for the goldstein condition
Wolfe_c1: the c1 parameter for the Wolfe condition
Wolfe_c2: the c2 parameter for the Wolfe condition
Wolfe_fac_up: the factor to increase the search step size for the Wolfe condition
fp_comp: reasonable floating point cutoff
"""
setattr(self, "geom_fac", geom_fac)
setattr(self, "geom_fac_up", geom_fac_up)
setattr(self, "goldstein_c", goldstein_c)
setattr(self, "Wolfe_c1", Wolfe_c1)
setattr(self, "Wolfe_c2", Wolfe_c2)
setattr(self, "Wolfe_fac_up", Wolfe_fac_up)
setattr(self, "fp_comp", fp_comp)
def __call__(self,
shots,
initial_value,
iteration_parameters,
line_search='backtrack',
tolerance=1e-9,
verbose=False,
append=False,
status_configuration={},
linesearch_configuration={},
write=False,
history_iter=0,
**kwargs):
"""The main function for executing a number of steps of the descent
algorith.
Most things can be done without directly overriding this function.
Parameters
----------
shots : list of pysit.Shot
List of Shots for which to compute on.
initial_value : solver.WaveParameters
Initial guess for the iteration.
iteration_parameters : int, iterable
Loop iteration parameters, like number of steps or frequency sets.
<blank>_frequency : int, optional kwarg
Frequency with which to store histories. Detailed in reset method.
verbose : bool
Verbosity flag.
linesearch_configuration : dictionary
Possible parameters for linesearch, for more details, please check the introduction of the function set_linesearch_configuration
"""
self.reset(append, **status_configuration)
self.set_linesearch_configuration(**linesearch_configuration)
self.tolerance = tolerance
self.verbose=verbose
self.write = write
self.history_iter = history_iter
self.line_search = line_search
if type(line_search) is str:
self.ls_method = line_search
self.ls_config = None
else: #assume line_search is tuple('method', config1, config2, ...)
self.ls_method = line_search[0]
self.ls_config = line_search[1:]
self.initialize(initial_value, **kwargs)
# valid ieration parameters:
# int, e.g., iteration_parameters=4
# iterable(int), e.g., iteration_parameters=[50,50,50] will run the loop 3 times with 50 iterations each
# iterable( list(int, arguments)), e.g., iteration_parameters=[(50,[1,2,3,4,5]), (50,[6,7,8,9])] will run the loop twice, 50 times each, for the frequencies listed in arguments
if np.iterable(iteration_parameters):
for ip in iteration_parameters:
if type(ip) in [tuple, list]:
steps, arguments = ip
elif type(iteration_parameters) is int:
steps = ip
arguments = {}
else:
raise ValueError('Invalid iteration parameter {0} detected.'.format(ip))
# Call the inner loop
self.inner_loop(shots, steps, objective_arguments=arguments, **kwargs)
else:
if type(iteration_parameters) is int:
# Call the inner loop
steps=iteration_parameters
self.inner_loop(shots, steps, **kwargs)
else:
raise ValueError('Singular iteration parameters of type {0} are not permitted at this time.'.format(type(iteration_parameters))) #Floats as a convergence epsilon may happen, but nothing runs to convergence.
# Return the current state at the end of the run
return self.base_model
def inner_loop(self, shots, steps, objective_arguments={}, **kwargs):
"""Inner loop the optimization iteration
This is a separate method so that the workings of the inner loop can be
overridden without duplicating the wrapper code in the call function.
Parameters
----------
shots : list of pysit.Shot
List of Shots for which to compute the residual.
steps : int
Number of iterations to run.
"""
stop = False
iteration = 0
while not stop:
# for step in range(steps):
# Zeroth step is always the initial condition.
tt = time.time()
i = self.iteration
self.store_history('value', i, self.base_model)
self._print('Iteration {0}'.format(i))
self.solver.model_parameters = self.base_model
# extra data to try to extract from gradient call
aux_info = {'objective_value': (True, None),
'residual_norm': (True, None)}
# pass information for the solver type
objective_arguments.update(kwargs)
# Compute the gradient
gradient = self.objective_function.compute_gradient(shots, self.base_model, aux_info=aux_info, **objective_arguments)
objective_value = aux_info['objective_value'][1]
# tmp_data_write = {'data': self.base_model.data}
# fname = 'x_' + str(i) + '_2.mat'
# sio.savemat(fname, tmp_data_write)
# Process and store meta data about the gradient
self.store_history('gradient', i, gradient)
gradient_norm = gradient.norm()
self._print(' gradnorm {0}'.format(gradient_norm))
self.store_history('gradient_length', i, gradient_norm)
if aux_info['objective_value'][1] is not None:
self.store_history('objective', i, aux_info['objective_value'][1])
self._print(' objective {0}'.format(aux_info['objective_value'][1]))
if aux_info['residual_norm'][1] is not None:
self.store_history('residual_length', i, aux_info['residual_norm'][1])
self._print(' residual {0}'.format(aux_info['residual_norm'][1]))
# Compute step modifier
step = self._select_step(shots, objective_value, gradient, i, objective_arguments, **kwargs)
# Process and store meta data about the step
step_len = step.norm()
self.store_history('step_length', i, step_len)
self.store_history('step', i, step)
if self.write is True:
if self.use_parallel and (self.objective_function.parallel_wrap_shot.rank != 0):
[]
else:
if i == 0:
tmp_data_write = {'data': self.base_model.data}
fname = 'x_' + str(i+self.history_iter) + '.mat'
sio.savemat(fname, tmp_data_write)
if self.use_parallel is True:
self.objective_function.parallel_wrap_shot.comm.Barrier()
# Apply new step
self.base_model += step
if self.write is True:
if self.use_parallel and (self.objective_function.parallel_wrap_shot.rank != 0):
[]
else:
tmp_data_write = {'data': self.base_model.data}
fname = 'x_' + str(i+1+self.history_iter) + '.mat'
sio.savemat(fname, tmp_data_write)
if self.use_parallel is True:
self.objective_function.parallel_wrap_shot.comm.Barrier()
ttt = time.time()-tt
self.store_history('run_time', i, ttt)
self.iteration += 1
self._print(' run time {0}s'.format(ttt))
if (iteration >= steps) or (objective_value < self.tolerance):
stop = True
else:
iteration += 1
def _select_step(self, shots, current_objective_value, gradient, iteration, objective_arguments, **kwargs):
raise NotImplementedError("_select_step must be implemented by a subclass.")
def select_alpha(self, shots, gradient, direction, objective_arguments, **kwargs):
"""Resets the state of the optimization algorithm.
Parameters
----------
shots : list of pysit.Shot
List of Shots for which to compute on.
gradient : Solver.ModelData
The gradient in model space.
direction : Solver.ModelData
The search direction in model space.
method : {'constant', 'linear', 'quadratic', 'linesearch'}, optional
The technique used to select alpha.
alpha : float, optional
The returned value for 'constant'.
Returns
-------
alpha : float
Line search parameter.
"""
if self.ls_method == 'constant':
return self._constant_line_search()
elif self.ls_method == 'linear':
return self._linear_line_search(shots, gradient, direction, objective_arguments, **kwargs)
elif self.ls_method == 'backtrack':
return self._backtrack_line_search(shots, gradient, direction, objective_arguments, **kwargs)
elif self.ls_method == 'Wolfe':
return self._Wolfe_line_search(shots, gradient, direction, objective_arguments, **kwargs)
else:
raise ValueError('Alpha selection method {0} invalid'.format(self.ls_method))
def _constant_line_search(self):
alpha = self.ls_config[0]
return alpha
def _linear_line_search(self, shots, gradient, direction, objective_arguments, **kwargs):
raise NotImplementedError('Linear selection of alpha is an objective function dependent operation.')
# # \int{gradient*s}dx = -\int{gradient^2} = -\int{s^2}
# d_norm = -1*np.linalg.norm(direction) * np.prod(self.solver.mesh.deltas)
#
#
# # The commented out bit is probably the correct way to do things,
# # but it does not generalize between time and frequency due to
# # differences in the way the data are stored (eg, array, dict of
# # arrays, etc). Also, the "linear" test is
## res = map(lambda x: self.objective_function.modeling_tools.linear_forward_model(x, self.base_model, direction, return_parameters=['pseudodata'], **kwargs), shots)
## pds = [np.linalg.norm(r['pseudodata'])**2 for r in res]
## denominator = np.sum(pds) * self.solver.dt
#
# res = self.objective_function.apply_hessian(shots, self.base_model, direction, hessian_mode='approximate', **objective_arguments)
## res = self.objective_function.apply_hessian(shots, direction, hessian_mode='full', **objective_arguments)
# denominator = np.dot(direction.T, res).squeeze() * np.prod(self.solver.mesh.deltas)
#
# numerator = d_norm**2
#
# return numerator / denominator
def _backtrack_line_search(self, shots, gradient, direction, objective_arguments,
current_objective_value=None,
alpha0_kwargs={}, **kwargs):
geom_fac = self.geom_fac
geom_fac_up = self.geom_fac_up
goldstein_c = self.goldstein_c #1e-4
fp_comp = 1e-6
if current_objective_value is None:
fk = self.objective_function.evaluate(shots, self.base_model, **objective_arguments)
else:
fk = current_objective_value
myalpha0_kwargs = dict()
myalpha0_kwargs.update(alpha0_kwargs)
myalpha0_kwargs.update({'upscale_factor' : geom_fac_up})
alpha = self._compute_alpha0(current_objective_value, gradient, **myalpha0_kwargs)
stop = False
itercnt = 1
self._print(" Starting: ".format(itercnt), alpha, fk)
Alphas = []
Objs = []
while not stop:
# Cut the initial alpha until it is as large as can be and still satisfy the valid conditions for an updated model.
valid=False
alpha *= 2
cnt = 0
while not valid:
alpha/=2
tdir = alpha*direction
model = self.base_model + tdir
# if self.proj_op is not None:
# model = self.proj_op(model)
cnt +=1
valid = model.validate()
self.solver.model_parameters = model
fkp1 = self.objective_function.evaluate(shots, model, **objective_arguments)
Alphas.append(alpha)
Objs.append(fkp1)
cmpval = fk + alpha * goldstein_c * gradient.inner_product(tdir)
self._print(" Pass {0}: a:{1}; {2} ?<= {3}".format(itercnt, alpha, fkp1, cmpval))
if (fkp1 <= cmpval) or ((abs(fkp1-cmpval)/abs(fkp1)) <= fp_comp): # reasonable floating point cutoff
stop = True
elif itercnt > self.max_linesearch_iterations:
stop = True
alpha_idx = np.argmin(Objs)
alpha = Alphas[alpha_idx]
self._print('Too many passes ({0}), attempting to use current alpha ({1}).'.format(alpha_idx, alpha))
else:
itercnt += 1
alpha = alpha * geom_fac
self.prev_alpha = alpha
return alpha
def _Wolfe_line_search(self, shots, gradient, direction, objective_arguments,
current_objective_value=None,
alpha0_kwargs={}, **kwargs):
geom_fac = self.geom_fac
geom_fac_up = self.geom_fac_up
c1 = self.Wolfe_c1 #1e-4
c2 = self.Wolfe_c2
Wolfe_fac_up = self.Wolfe_fac_up
fp_comp = self.fp_comp
if current_objective_value is None:
fk = self.objective_function.evaluate(shots, self.base_model, **objective_arguments)
else:
fk = current_objective_value
myalpha0_kwargs = dict()
myalpha0_kwargs.update(alpha0_kwargs)
myalpha0_kwargs.update({'upscale_factor' : geom_fac_up})
alpha = self._compute_alpha0(current_objective_value, gradient, **myalpha0_kwargs)
stop = False
itercnt = 1
self._print(" Starting: ".format(itercnt), alpha, fk)
aux_info = {'objective_value': (True, None),
'residual_norm': (True, None)}
Alphas = []
Objs = []
while not stop:
# Cut the initial alpha until it is as large as can be and still satisfy the valid conditions for an updated model.
valid=False
alpha *= 2
cnt = 0
while not valid:
alpha/=2
tdir = alpha*direction
model = self.base_model + tdir
# if self.proj_op is not None:
# model = self.proj_op(model)
cnt +=1
valid = model.validate()
self.solver.model_parameters = model
gradient_kp1 = self.objective_function.compute_gradient(shots, model, aux_info=aux_info, **objective_arguments)
fkp1 = aux_info['objective_value'][1]
Alphas.append(alpha)
Objs.append(fkp1)
cmpval = fk + alpha * c1 * gradient.inner_product(tdir)
cmpval2 = c2 * gradient.inner_product(tdir)
f2kp1 = gradient_kp1.inner_product(tdir)
self._print(" Pass {0}: a:{1}; {2} ?<= {3}; |{4}| ?<= |{5}|".format(itercnt, alpha, fkp1, cmpval, f2kp1, cmpval2))
if (fkp1 <= cmpval) or ((abs(fkp1-cmpval)/abs(fkp1)) <= fp_comp): # reasonable floating point cutoff
if (abs(f2kp1) <= abs(cmpval2)) or ((abs(f2kp1-cmpval2)/abs(cmpval2)) <= fp_comp):
stop = True
else:
alpha_org = alpha
alpha *= Wolfe_fac_up
itercnt += 1
else:
itercnt += 1
alpha_org = alpha
alpha = alpha * geom_fac
if itercnt > self.max_linesearch_iterations:
stop = True
alpha_idx = np.argmin(Objs)
alpha = Alphas[alpha_idx]
self._print('Too many passes ({0}), attempting to use current alpha ({1}).'.format(alpha_idx, alpha))
self.prev_alpha = alpha
return alpha
| 39.120178 | 222 | 0.592938 | 3,046 | 26,367 | 4.980302 | 0.172685 | 0.009229 | 0.014568 | 0.00969 | 0.403032 | 0.350956 | 0.326632 | 0.294199 | 0.284047 | 0.265326 | 0 | 0.010399 | 0.325255 | 26,367 | 673 | 223 | 39.178306 | 0.84228 | 0.360906 | 0 | 0.402556 | 0 | 0.003195 | 0.073472 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054313 | false | 0.01278 | 0.015974 | 0 | 0.115016 | 0.041534 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f289f79fa5cf6d1164d782245286d6f16f90388 | 7,091 | py | Python | dashboard.py | fschw/dashoard | 6583d8bdfd1f14d40607df121832c5ceb6cbaa81 | [
"Apache-2.0"
] | null | null | null | dashboard.py | fschw/dashoard | 6583d8bdfd1f14d40607df121832c5ceb6cbaa81 | [
"Apache-2.0"
] | null | null | null | dashboard.py | fschw/dashoard | 6583d8bdfd1f14d40607df121832c5ceb6cbaa81 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
import os
import sys
import logging
import ccs811LIBRARY
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
picdir = os.path.join(os.path.join(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'e-Paper'),'RaspberryPi_JetsonNano'),'python'),'pic')
logging.info("Add pic dir: "+ picdir)
#inintialize mockups on dev env, and real libs on rasp
if os.path.exists('/sys/bus/platform/drivers/gpiomem-bcm2835'):
logging.info("Start in productive mode...")
libdir = os.path.join(os.path.join(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'e-Paper'),'RaspberryPi_JetsonNano'),'python'),'lib')
if os.path.exists(libdir):
sys.path.append(libdir)
logging.info("Add lib dir: "+ libdir)
libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Adafruit_Python_DHT')
if os.path.exists(libdir):
sys.path.append(libdir)
logging.info("Add lib dir: "+ libdir)
import Adafruit_DHT
from waveshare_epd import epd4in2
else:
logging.info("Start in mockup mode...")
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mockups'))
import Adafruit_DHT_mock
from waveshare_epd import epd4in2_mock
import time
from PIL import Image, ImageDraw, ImageFont
import subprocess
import traceback
import requests
from flask import Flask
from flask import request
import threading
access_token = ""
app = Flask(__name__)
@app.route("/")
def receive_code():
logging.info("HTTP req")
code = request.args.get('code', '')
if code is not "":
print("Code received:" + code)
url = "https://iam.viessmann.com/idp/v2/token"
header = {"Content-Type": "application/x-www-form-urlencoded"}
data = "grant_type=authorization_code&client_id=9ceff2a5f57d345a580142626e3b4a7f&redirect_uri=http://192.168.178.201:4200/&code_verifier=2e21faa1-db2c-4d0b-a10f-575fd372bc8c-575fd372bc8c&code="+code
response = requests.post(url=url, headers=header, data=data)
if response.ok:
global access_token
access_token = response.json()['access_token']
logging.info("New access token: " + access_token)
return "Authorisation OK"
else:
return "Authorisation NOK"
return "No code received"
if __name__ == "__main__":
args = {'host': '0.0.0.0', 'port' : 4200}
threading.Thread(target=app.run, kwargs=args).start()
font24 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 24)
sensor = ccs811LIBRARY.CCS811()
def setup(mode=1):
print('Starting CCS811 Read')
sensor.configure_ccs811()
sensor.set_drive_mode(mode)
if sensor.check_for_error():
sensor.print_error()
raise ValueError('Error at setDriveMode.')
result = sensor.get_base_line()
sys.stdout.write("baseline for this sensor: 0x")
if result < 0x100:
sys.stdout.write('0')
if result < 0x10:
sys.stdout.write('0')
sys.stdout.write(str(result) + "\n")
'''
try:
res = subprocess.run("sudo pigpiod", shell=True, check=True, text=True)
logging.info(res.stdout)
setup(1)
res = subprocess.run("sudo killall pigpiod", shell=True, check=True, text=True)
logging.info(res.stdout)
except IOError as e:
logging.info(e)
'''
try:
epd = epd4in2.EPD()
logging.info("Init and Clear display")
epd.init()
epd.Clear()
loop = True
cnt = 1
while loop:
image = Image.new('1', (epd.width, epd.height), 255)
draw = ImageDraw.Draw(image)
logging.info("Updating for Iteration " + str(cnt))
cnt = cnt + 1
#read outside temp
'''logging.info("Read outside temp...")
logging.info("Token:" + access_token)
header = {"Authorization": "Bearer " + access_token}
req1 = "https://api.viessmann.com/iot/v1/equipment/installations/952499/gateways/7637415022052208/devices/0/features/heating.sensors.temperature.outside"
logging.info("reading temperature.outside")
response = requests.get(url=req1, headers=header)
outsideTemp = ""
if response.status_code == 200:
outsideTemp = response.json()["data"]["properties"]["value"]["value"]
logging.info('Outside temp: {:.1f}°'.format(outsideTemp))
draw.text((10, 0), 'Außen: {:.1f}°'.format(outsideTemp), font=font24, fill=0)
# read humidity and inside temp
logging.info("Read inside temperature and humidity...")
insideHumidity, insideTemp = Adafruit_DHT.read_retry(Adafruit_DHT.AM2302, 4)
if insideHumidity is not None and insideTemp is not None:
logging.info( 'Inside temp: {:.1f}°'.format(insideTemp))
logging.info( 'Rel. Humidity: {:.1f}%'.format(insideHumidity))
draw.text((10, 50), 'Innen: {:.1f}°'.format(insideTemp), font = font24, fill = 0)
draw.text((10, 100), 'Rel: {:.1f}%'.format(insideHumidity), font = font24, fill = 0)
else:
logging.info( "Could not read from Inside temp/Humidity")
'''
#res = subprocess.run("sudo pigpiod", shell=True, check=True, text=True)
#logging.info(res.stdout)
'''
logging.info("Read CO2 and TVOC...")
if sensor.data_available():
sensor.read_logorithm_results()
logging.info( "CO2: {0:.1f} TVOC: {1:.1f}".format(sensor.CO2, sensor.tVOC))
draw.text((10, 150), "CO2: {0:.1f} TVOC: {1:.1f}".format(sensor.CO2, sensor.tVOC), font = font24, fill = 0)
elif sensor.check_for_error():
logging.info( "Could not read from CO2/TVOC Sensor")
'''
#res = subprocess.run("sudo killall pigpiod", shell=True, check=True, text=True)
#logging.info(res.stdout)
logging.info("Adding visuals to image...")
upper = 80
lower = 299
left = 70
right = 330
#house frame
draw.line((left, lower, right, lower), fill = 0, width = 3)
draw.line((left, lower, left, upper), fill = 0, width = 3)
draw.line((left, upper, right, upper), fill = 0, width = 3)
draw.line((right, upper, right, lower), fill = 0, width = 3)
#roof
mid = 200
y = mid*upper/(mid-left)
draw.line((mid, 1, 0, y), fill = 0, width = 3)
draw.line((mid, 1, 400, y), fill = 0, width = 3)
'''draw.line((70, 50, 20, 100), fill = 0)
draw.rectangle((20, 50, 70, 100), outline = 0)
draw.line((165, 50, 165, 100), fill = 0)
draw.line((140, 75, 190, 75), fill = 0)
draw.arc((140, 50, 190, 100), 0, 360, fill = 0)
draw.rectangle((80, 50, 130, 100), fill = 0)'''
#draw.chord((200, 50, 250, 100), 0, 360, fill = 0)
epd.display(epd.getbuffer(image))
time.sleep(30)
except IOError as e:
logging.info(e)
except KeyboardInterrupt:
logging.info("ctrl + c:")
epd.Clear()
epd.sleep()
epd4in2.epdconfig.module_exit()
exit()
| 37.718085 | 206 | 0.626287 | 940 | 7,091 | 4.655319 | 0.311702 | 0.070384 | 0.025137 | 0.027422 | 0.280165 | 0.249543 | 0.227377 | 0.1883 | 0.1883 | 0.1883 | 0 | 0.05788 | 0.217882 | 7,091 | 187 | 207 | 37.919786 | 0.730436 | 0.052038 | 0 | 0.113208 | 0 | 0.009434 | 0.198966 | 0.027719 | 0 | 0 | 0.002114 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.150943 | 0 | 0.198113 | 0.028302 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f28a7ab38e9b7eef62b3b54becbde2c0fcfdf3c | 7,025 | py | Python | polyply/src/polyply_parser.py | jan-stevens/polyply_1.0 | 17578a0ea546584164722129f0d718a5c9533a1a | [
"Apache-2.0"
] | 34 | 2020-07-23T14:50:22.000Z | 2022-03-17T02:03:41.000Z | polyply/src/polyply_parser.py | jan-stevens/polyply_1.0 | 17578a0ea546584164722129f0d718a5c9533a1a | [
"Apache-2.0"
] | 136 | 2020-06-12T15:06:18.000Z | 2022-03-31T11:31:09.000Z | polyply/src/polyply_parser.py | jan-stevens/polyply_1.0 | 17578a0ea546584164722129f0d718a5c9533a1a | [
"Apache-2.0"
] | 7 | 2020-07-30T10:53:47.000Z | 2022-03-11T19:27:57.000Z | # Copyright 2020 University of Groningen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter, defaultdict
import numpy as np
import networkx as nx
import vermouth
import vermouth.gmx
from vermouth.parser_utils import SectionLineParser
from vermouth.gmx.itp_read import ITPDirector
class PolyplyParser(ITPDirector):
'''
Parser for polyply input format.
'''
def __init__(self, force_field):
super().__init__(force_field)
self.citations = set()
@SectionLineParser.section_parser('moleculetype', 'citation')
def _parse_citation(self, line, lineno=0):
cite_keys = line.split()
self.current_block.citations.update(cite_keys)
@SectionLineParser.section_parser('citations')
def _pase_ff_citations(self, line, lineno=0):
# parses force-field wide citations
cite_keys = line.split()
self.citations.update(cite_keys)
# overwritten to allow for dangling bonds
def _treat_block_interaction_atoms(self, atoms, context, section):
all_references = []
for atom in atoms:
reference = atom[0]
if reference.isdigit():
if int(reference) < 1:
msg = ('In section {} is a negative atom reference, which is not allowed.')
raise IOError(msg.format(section.name))
# The indices in the file are 1-based
reference = int(reference) - 1
atom[0] = reference
else:
msg = ('Atom names in blocks cannot be prefixed with + or -. '
'The name "{}", used in section "{}" of the block "{}" '
'is not valid in a block.')
raise IOError(msg.format(reference, section, context.name))
all_references.append(reference)
return all_references
def treat_link_multiple(self):
"""
Iterates over a :class:`vermouth.force_field.ForceField` and
adds version tags for all interactions within a
:class:`vermouth.molecule.Link` that are applied to the same atoms.
"""
for link in self.force_field.links:
for key in link.interactions:
terms = link.interactions[key]
count_terms = Counter(tuple(term.atoms) for term in terms)
for term in terms:
tag = count_terms[tuple(term.atoms)]
if tag >= 1:
term.meta.update({"version":tag})
count_terms[tuple(term.atoms)] = tag -1
def _treat_link_atoms(self, block, link, inter_type):
# we need to convert the atom index to an atom-name
n_atoms = len(block.nodes)
# the uncommented statement does not work because node and
# atom name are couple for blocks, which is debatably useful
#atom_names = list(nx.get_node_attributes(block, 'atomname'))
atom_names = [block.nodes[node]["atomname"] for node in block.nodes]
for inter_type in link.interactions:
for interaction in link.interactions[inter_type]:
new_atoms = []
for atom in interaction.atoms:
prefix = ""
while atom/n_atoms >= 1:
atom = atom - n_atoms
prefix = prefix + "+"
new_name = prefix + atom_names[atom]
new_atoms.append(new_name)
attrs = block.nodes[atom]
link.add_node(new_name, **attrs)
order = prefix.count("+")
nx.set_node_attributes(link, {new_name:order}, "order")
interaction.atoms[:] = new_atoms
return new_atoms
def _split_links_and_blocks(self, block):
# Make sure to add the atomtype resdidue number etc to
# the proper nodes.
n_atoms = len(block.nodes)
res_name = block.name
prev_atoms = []
links = []
for key in block.interactions:
block_interactions = []
for interaction in block.interactions[key]:
if any(isinstance(atom, str) for atom in interaction.atoms):
return
if np.sum(np.array(interaction.atoms) > n_atoms - 1) > 0:
if interaction.atoms != prev_atoms:
prev_atoms[:] = interaction.atoms
new_link = vermouth.molecule.Link()
new_link.interactions = defaultdict(list)
new_link.citations = block.citations
new_link.name = res_name
links.append(new_link)
links[-1].interactions[key].append(interaction)
else:
block_interactions.append(interaction)
block.interactions[key] = block_interactions
for link in links:
self._treat_link_atoms(block, link, key)
self.force_field.links.append(link)
def _make_edges(self):
for block in self.force_field.blocks.values():
inter_types = list(block.interactions.keys())
for inter_type in inter_types:
block.make_edges_from_interaction_type(type_=inter_type)
for link in self.force_field.links:
inter_types = list(link.interactions.keys())
for inter_type in inter_types:
link.make_edges_from_interaction_type(type_=inter_type)
# overwrites the finalize method to deal with dangling bonds
# and to deal with multiple interactions in the way needed
# for polyply to work
def finalize(self, lineno=0):
if self.current_meta is not None:
raise IOError("Your #ifdef/#ifndef section is orderd incorrectly."
"There is no #endif for the last pragma..")
prev_section = self.section
self.section = []
self.finalize_section(prev_section, prev_section)
self.macros = {}
self.section = None
for block in self.force_field.blocks.values():
block.citations.update(self.citations)
if len(block.nodes) > 0:
n_atoms = len(block.nodes)
self._split_links_and_blocks(block)
self.treat_link_multiple()
self._make_edges()
def read_polyply(lines, force_field):
director = PolyplyParser(force_field)
return list(director.parse(iter(lines)))
| 39.24581 | 95 | 0.601423 | 831 | 7,025 | 4.932611 | 0.277978 | 0.026836 | 0.020493 | 0.015614 | 0.120273 | 0.083923 | 0.070749 | 0.057087 | 0 | 0 | 0 | 0.004791 | 0.316584 | 7,025 | 178 | 96 | 39.466292 | 0.84899 | 0.186904 | 0 | 0.110169 | 0 | 0 | 0.059826 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084746 | false | 0 | 0.059322 | 0 | 0.186441 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f28c2e82480941f199ea350974100507d9d1569 | 8,138 | py | Python | timeflux/nodes/query.py | jonathanjfshaw/timeflux | a17100bce754bd6d85d2c9c65a95bfcc0e7fe219 | [
"MIT"
] | null | null | null | timeflux/nodes/query.py | jonathanjfshaw/timeflux | a17100bce754bd6d85d2c9c65a95bfcc0e7fe219 | [
"MIT"
] | null | null | null | timeflux/nodes/query.py | jonathanjfshaw/timeflux | a17100bce754bd6d85d2c9c65a95bfcc0e7fe219 | [
"MIT"
] | null | null | null |
import numpy as np
from timeflux.core.exceptions import WorkerInterrupt
from timeflux.core.node import Node
class SelectRange(Node):
"""Select a subset of the given data along vertical (index) or horizontal (columns) axis.
Attributes:
i (Port): default data input, expects DataFrame with eventually MultiIndex.
o (Port): default output, provides DataFrame with eventually MultiIndex.
Args:
ranges (dict): Dict with keys are level names and values are selection ranges.
axis (int): If 0, the level concerns row index, if 1, columns index (`0` or `1`). Default: `0`.
inclusive (bool) : Whether the boundaries are strict or included. Default: `False`.
Example:
In this example, we have an input DataFrame with multi level columns and we want to select data with index from level of name `second` in range `[1,1.5]`.
We set:
* ``ranges`` = `{"second": [1, 1.5]}`
* ``axis`` = `1`
* ``inclusive`` = `True`
If the data received on port ``i`` is: ::
first A ... B
second 1.3 1.6 1.9 1.3 1.6 1.9
2017-12-31 23:59:59.998745401 0.185133 0.541901 0.806561 ... 0.732225 0.806561 0.658783
2018-01-01 00:00:00.104507143 0.692277 0.849196 0.987668 ... 0.489425 0.221209 0.987668
2018-01-01 00:00:00.202319939 0.944059 0.039427 0.567945 ... 0.925248 0.180575 0.567945
The data provided on port ``o`` will be: ::
first A B
second 1.3 1.3
2017-12-31 23:59:59.998745401 0.185133 0.732225
2018-01-01 00:00:00.104507143 0.692277 0.489425
2018-01-01 00:00:00.202319939 0.944059 0.925248
"""
def __init__(self, ranges, axis=0, inclusive=False):
self._ranges = ranges # list of ranges per level
self._inclusive = inclusive # include boundaries.
self._axis = axis
def update(self):
if not self.i.ready():
return
self.o.meta = self.i.meta
if self._axis == 1:
self.i.data = self.i.data.T
mask = self._mask()
self.o.data = self.i.data[np.logical_and.reduce(mask)]
if self._axis == 1:
self.o.data = self.o.data.T
def _mask(self):
if self._inclusive:
mask = [(self.i.data.index.get_level_values(l) >= r[0]) &
(self.i.data.index.get_level_values(l) <= r[1])
for l, r in (self._ranges).items() if r is not None]
else:
mask = [(self.i.data.index.get_level_values(l) > r[0]) &
(self.i.data.index.get_level_values(l) < r[1])
for l, r in (self._ranges).items() if r is not None]
return mask
class XsQuery(Node):
"""Returns a cross-section (row(s) or column(s)) from the data.
Attributes:
i (Port): default input, expects DataFrame with eventually MultiIndex.
o (Port): default output, provides DataFrame with eventually MultiIndex.
Args:
key (str|tuple): Some label contained in the index, or partially in a MultiIndex index.
axis (int): Axis to retrieve cross-section on (`0` or `1`). Default: `0`.
level (str|int|tuple) : In case of a key partially contained in a MultiIndex, indicates which levels are used. Levels can be referred by label or position.
drop_level (bool) : If False, returns DataFrame with same level. Default: `False`.
Example:
In this example, we have an input DataFrame with multi level columns and we want to select cross section between `B` from level of name `first` and `1` from level of name `second`.
We set:
* ``key`` = `("B", 1)`
* ``axis`` = `1`
* ``level`` = `["first", "second"]`
* ``drop_level`` = `False`
If the data received on port ``i`` is: ::
first A ... B
second 1 2 ... 1 2
2017-12-31 23:59:59.998745401 0.185133 0.541901 ... 0.297349 0.806561
2018-01-01 00:00:00.104507143 0.692277 0.849196 ... 0.844549 0.221209
2018-01-01 00:00:00.202319939 0.944059 0.039427 ... 0.120567 0.180575
The data provided on port ``o`` will be: ::
first B
second 1
2018-01-01 00:00:00.300986584 0.297349
2018-01-01 00:00:00.396560186 0.844549
2018-01-01 00:00:00.496559945 0.120567
References:
See the documentation of `pandas.DataFrame.xs <https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.xs.html>`_ .
"""
def __init__(self, key, **kwargs):
"""
Args:
key (str|tuple): Some label contained in the index, or partially in a MultiIndex index.
kwargs: Keyword arguments to call pandas xs method: axis, level, drop_level
"""
self._key = key
self._kwargs = kwargs
self._ready = False
def update(self):
if not self.i.ready():
return
self.o.meta = self.i.meta
if not self._ready:
try:
self._query()
self._ready = True
except KeyError as e:
raise WorkerInterrupt(e)
else:
self._query()
def _query(self):
self.o.data = self.i.data.xs(key=self._key, **self._kwargs)
class LocQuery(Node):
"""Slices DataFrame on group of rows and columns by label(s)
Attributes:
i (Port): default data input, expects DataFrame.
o (Port): default output, provides DataFrame.
Args:
key (str|list|tuple): Label selection specification.
axis (int): Axis to query the label from (`0` or `1`). Default: `1`.
Example:
In this example, we have an input DataFrame with 5 columns `[A, B, C, D, E]` and we want to select columns A and E.
We set:
* ``key`` = `["A", "E"]`
* ``axis`` = `1`
If the data received on port ``i`` is: ::
A B ... E F
2017-12-31 23:59:59.998745401 0.185133 0.541901 ... 0.806561 0.658783
2018-01-01 00:00:00.104507143 0.692277 0.849196 ... 0.221209 0.987668
2018-01-01 00:00:00.202319939 0.944059 0.039427 ... 0.180575 0.567945
The data provided on port ``o`` will be: ::
A E
2017-12-31 23:59:59.998745401 0.185133 0.806561
2018-01-01 00:00:00.104507143 0.692277 0.221209
2018-01-01 00:00:00.202319939 0.944059 0.180575
References:
See the documentation of `pandas.DataFrame.loc <https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.loc.html>`_ .
"""
def __init__(self, key, axis=1):
self._axis = axis
if not isinstance(key, (list, tuple)):
self._key = [key]
else:
self._key = key
self._ready = False
def update(self):
if not self.i.ready():
return
self.o = self.i
if not self.i.ready():
return
self.o.meta = self.i.meta
if not self._ready:
try:
self._query()
self._ready = True
except KeyError as e:
raise WorkerInterrupt(e)
else:
self.o.data = self.i.data.loc[:, self._key]
def _query(self):
if self._axis == 0:
self.o.data = self.i.data.loc[self._key, :]
else: # self._axis == 1:
self.o.data = self.i.data.loc[:, self._key]
| 35.229437 | 188 | 0.52986 | 1,087 | 8,138 | 3.912604 | 0.185833 | 0.024453 | 0.024453 | 0.030567 | 0.628968 | 0.597461 | 0.568775 | 0.536798 | 0.51352 | 0.485775 | 0 | 0.166347 | 0.359548 | 8,138 | 230 | 189 | 35.382609 | 0.649655 | 0.635291 | 0 | 0.613333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.04 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f2a45cc54e29cb7e353983ef17c8b140cf637fe | 2,205 | py | Python | tools/report-converter/codechecker_report_converter/analyzers/tslint/analyzer_result.py | hyker/codechecker | 8ddb9bd1aa037d4499a2be8d35e9c1d470163baf | [
"Apache-2.0"
] | 1 | 2021-04-08T16:51:45.000Z | 2021-04-08T16:51:45.000Z | tools/report-converter/codechecker_report_converter/analyzers/tslint/analyzer_result.py | hyker/codechecker | 8ddb9bd1aa037d4499a2be8d35e9c1d470163baf | [
"Apache-2.0"
] | 1 | 2021-11-30T10:43:49.000Z | 2021-11-30T10:43:49.000Z | tools/report-converter/codechecker_report_converter/analyzers/tslint/analyzer_result.py | hyker/codechecker | 8ddb9bd1aa037d4499a2be8d35e9c1d470163baf | [
"Apache-2.0"
] | null | null | null | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
import logging
import os
import json
from typing import Dict, List
from codechecker_report_converter.report import File, get_or_create_file, \
Report
from ..analyzer_result import AnalyzerResultBase
LOG = logging.getLogger('report-converter')
class AnalyzerResult(AnalyzerResultBase):
""" Transform analyzer result of the TSLint analyzer. """
TOOL_NAME = 'tslint'
NAME = 'TSLint'
URL = 'https://palantir.github.io/tslint'
def get_reports(self, result_file_path: str) -> List[Report]:
""" Parse the given analyzer result. """
reports: List[Report] = []
if not os.path.exists(result_file_path):
LOG.error("Report file does not exist: %s", result_file_path)
return reports
try:
with open(result_file_path, 'r',
encoding="utf-8", errors="ignore") as report_f:
bugs = json.load(report_f)
except (IOError, json.decoder.JSONDecodeError):
LOG.error("Failed to parse the given analyzer result '%s'. Please "
"give a valid json file generated by TSLint.",
result_file_path)
return reports
file_cache: Dict[str, File] = {}
for bug in bugs:
file_path = os.path.join(
os.path.dirname(result_file_path), bug.get('name'))
if not os.path.exists(file_path):
LOG.warning("Source file does not exists: %s", file_path)
continue
end_pos = bug['startPosition']
line = int(end_pos['line'] + 1)
col = int(end_pos['character'] + 1)
reports.append(Report(
get_or_create_file(os.path.abspath(file_path), file_cache),
line, col, bug['failure'], bug['ruleName']
))
return reports
| 32.426471 | 79 | 0.565079 | 249 | 2,205 | 4.86747 | 0.437751 | 0.066007 | 0.069307 | 0.024752 | 0.117162 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004367 | 0.273016 | 2,205 | 67 | 80 | 32.910448 | 0.751716 | 0.187755 | 0 | 0.073171 | 0 | 0 | 0.156321 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.146341 | 0 | 0.341463 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f2b5175127736ab4e0d2ce18a1b4d34dfabbaf8 | 5,040 | py | Python | FetchPatents.py | tymrail/PatentFPO | 530f73148a8249cdc146ca2c3cedef1995df998e | [
"MIT"
] | null | null | null | FetchPatents.py | tymrail/PatentFPO | 530f73148a8249cdc146ca2c3cedef1995df998e | [
"MIT"
] | null | null | null | FetchPatents.py | tymrail/PatentFPO | 530f73148a8249cdc146ca2c3cedef1995df998e | [
"MIT"
] | 1 | 2020-05-21T11:42:02.000Z | 2020-05-21T11:42:02.000Z | import requests
from bs4 import BeautifulSoup
import sqlite3
import re
from OperateDatabase import *
import socket
import random
import time
socket.setdefaulttimeout(1000)
base_url = 'http://www.freepatentsonline.com'
additional_url = ['/result.html?p=',
'&sort=relevance&srch=top&query_txt=AN%2F%22',
'%22&patents=on']
cx = sqlite3.connect('patents.db')
company_list = ['nintendo']
utils = {'Title': 'title',
'Inventors': 'inventor',
'Application Number': 'app_num',
'Abstract': 'abstract',
'Publication Date': 'pub_date',
'Filing Date': 'fil_date',
'Assignee': 'assignee',
}
user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60'
'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50'
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50'
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0'
'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10'
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2'
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16'
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0'
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)'
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36'
]
def make_up(page, assignee_name):
return base_url + additional_url[0] \
+ str(page) + additional_url[1] \
+ assignee_name + additional_url[2]
def lets_rock(companies):
for ic in companies:
try:
company_url = make_up(1, ic)
r = requests.get(company_url,
headers={'User-Agent': random.choice(user_agents)})
soup = BeautifulSoup(r.text, 'lxml')
page_count = int(str(soup.find_all(text=re.compile('Matches'))[0]).strip().split(' ')[-1])
print(page_count)
print('let\'s rock')
for i in range(1, 5):
print('fetching page' + str(i))
fetch_page(make_up(i, ic))
except:
with open('error_report.txt', 'w+') as f:
f.write('fail fetching company ' + ic + '\n')
else:
time.sleep(random.randint(0, 3) / 10)
def fetch_page(company_url):
r = requests.get(company_url,
headers={'User-Agent': random.choice(user_agents)})
soup = BeautifulSoup(r.text, 'lxml')
text = soup.find_all('tr', 'rowalt')
for it in text:
url = str(it.select('a')[0]['href']).strip()
doc_number = str(it.select('td')[1].text).strip()
try:
fetch_detail(base_url + url, doc_number)
except:
with open('error_report.txt', 'w+') as f:
f.write('fail fetching ' + url + '\n')
else:
time.sleep(random.randint(0, 3) / 10)
def fetch_detail(detail_url, doc_number):
# print(detail_url)
# print(doc_number)
print('fetching patent: ' + str(doc_number))
r = requests.get(detail_url,
headers={'User-Agent': random.choice(user_agents)})
soup = BeautifulSoup(r.text, 'lxml')
text = soup.find_all('div', 'disp_doc2')
# print(text[0].find('div', 'disp_elm_title').text)
# print(str(text[0].find('div', 'disp_elm_text').text).strip())
data_dict = {}
for it in text:
title = it.find('div', 'disp_elm_title')
t = it.find('div', 'disp_elm_text')
if title is not None and t is not None:
title_text = str(title.text).strip().replace(':', '')
t_text = str(t.text) \
.strip() \
.replace('\t', '') \
.replace(' ', '')
if title_text in utils:
# print(utils[title_text])
# print(t_text)
data_dict[utils[title_text]] = t_text
# print(str(title.text).strip().replace(':', ''))
# print(str(t.text).strip())
# print('\n')
data_dict['app_num'] = doc_number
insert_data(data_dict)
if __name__ == '__main__':
# fetch_detail('http://www.freepatentsonline.com/y2017/0346746.html', 'D1234567')
lets_rock(company_list)
| 38.769231 | 131 | 0.591667 | 735 | 5,040 | 3.944218 | 0.278912 | 0.008969 | 0.037254 | 0.044153 | 0.393929 | 0.328389 | 0.307347 | 0.28803 | 0.279407 | 0.266989 | 0 | 0.087909 | 0.248413 | 5,040 | 129 | 132 | 39.069767 | 0.677402 | 0.07004 | 0 | 0.204082 | 0 | 0.142857 | 0.393713 | 0.009196 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.081633 | 0.010204 | 0.132653 | 0.040816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f2bf725dc4bfa7321dd8446ecf83336132ad3f8 | 3,005 | py | Python | ib_trading_calendars/status.py | alexanu/ib-trading-calendars | 5a92770d106542968e856aa54ae48d48b306d7f3 | [
"Apache-2.0"
] | 9 | 2019-02-04T19:42:12.000Z | 2021-08-04T18:36:43.000Z | ib_trading_calendars/status.py | alexanu/ib-trading-calendars | 5a92770d106542968e856aa54ae48d48b306d7f3 | [
"Apache-2.0"
] | 1 | 2020-03-12T17:32:38.000Z | 2020-03-12T17:32:38.000Z | ib_trading_calendars/status.py | alexanu/ib-trading-calendars | 5a92770d106542968e856aa54ae48d48b306d7f3 | [
"Apache-2.0"
] | 8 | 2019-02-04T21:08:38.000Z | 2021-08-04T18:36:45.000Z | #!/usr/bin/env python
# Copyright 2019 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pandas as pd
import json
from ib_trading_calendars.calendar_utils import ib_calendar_factories
from trading_calendars.calendar_utils import _default_calendar_factories
def get_exchange_status(exchange, dt):
"""
Returns the exchange status at the specified datetime.
"""
try:
calendar_cls = ib_calendar_factories[exchange]
except KeyError:
calendar_cls = _default_calendar_factories[exchange]
asof_datetime = pd.Timestamp(dt, tz=calendar_cls.tz)
start = asof_datetime - pd.Timedelta(days=30)
start = pd.Timestamp(start.date(), tz="UTC")
end = asof_datetime + pd.Timedelta(days=30)
end = pd.Timestamp(end.date(), tz="UTC")
calendar = calendar_cls(start=start, end=end)
is_open = calendar.is_open_on_minute(asof_datetime)
# Note: The `trading_calendars` package sets exchange open times 1 minute
# later than the actual open. For example, the exchange hours for NYSE
# are 9:31-16:00 in `trading_calendars`, even though NYSE actually opens
# at 9:30. This behavior reflects the needs of zipline. To deal with
# this, we consider the exchange open if it is open this minute, or next
# minute.
if not is_open:
is_open = calendar.is_open_on_minute(asof_datetime + pd.Timedelta(minutes=1))
if is_open:
# Rewind open 1 minute
since = calendar.previous_open(asof_datetime) - pd.Timedelta(minutes=1)
until = calendar.next_close(asof_datetime)
else:
since = calendar.previous_close(asof_datetime)
# Rewind open 1 minute
until = calendar.next_open(asof_datetime) - pd.Timedelta(minutes=1)
since = since.tz_convert(asof_datetime.tz.zone).strftime("%Y-%m-%dT%H:%M:%S")
until = until.tz_convert(asof_datetime.tz.zone).strftime("%Y-%m-%dT%H:%M:%S")
return dict(
status="open" if is_open else "closed",
since=since,
until=until)
def main():
parser = argparse.ArgumentParser(
description="check the status of an exchange at the specified time")
parser.add_argument(
"exchange",
help="the IB exchange code")
parser.add_argument(
"dt",
help="the ISO format datetime to check")
args = parser.parse_args()
args = vars(args)
status = get_exchange_status(args["exchange"], args["dt"])
print(json.dumps(status))
| 36.646341 | 85 | 0.706156 | 429 | 3,005 | 4.815851 | 0.39627 | 0.063892 | 0.040658 | 0.055663 | 0.184414 | 0.150532 | 0.113262 | 0.07938 | 0.07938 | 0.040658 | 0 | 0.011642 | 0.199667 | 3,005 | 81 | 86 | 37.098765 | 0.847401 | 0.349418 | 0 | 0.045455 | 0 | 0 | 0.091241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.113636 | 0 | 0.181818 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f2c6071c31a2ed0bd4232d7ae1073c3257e303e | 8,520 | py | Python | src/data_preparation/data_prep.py | christianhilscher/dynasim | 881cfd3bd9d4b9291d289d703ec7da4a617a479a | [
"MIT"
] | null | null | null | src/data_preparation/data_prep.py | christianhilscher/dynasim | 881cfd3bd9d4b9291d289d703ec7da4a617a479a | [
"MIT"
] | 2 | 2020-08-06T10:01:59.000Z | 2021-05-17T12:14:44.000Z | src/data_preparation/data_prep.py | christianhilscher/dynasim | 881cfd3bd9d4b9291d289d703ec7da4a617a479a | [
"MIT"
] | 2 | 2020-08-19T06:52:09.000Z | 2021-12-10T08:57:54.000Z | import numpy as np
import pandas as pd
def SOEP_to_df_old(dataf):
"""
This function takes the SOEP data as a dataframe and returns the the harmonized data such that the rest of the code can work with it. It also renames the columns etc
"""
dataf = dataf.copy()
# Checking whether some adjustments have already been made
if "emplstatus" in dataf.columns.tolist():
dataf = dataf.drop(['emplstatus', 'married_h'], axis = 1)
print('Attention, this data is not the original SOEP data but already preprocessed.')
else:
dataf = dataf
dataf = dataf.rename(columns={'syear': 'year',
'phrf': 'personweight',
'pglabgro': 'gross_earnings',
'hhrf': 'hhweight',
'hgheat': 'heizkosten',
'kaltmiete': 'bruttokaltmiete',
'kind': 'child',
'pgpsbil': 'education',
'whours_actual': 'hours'
})
dataf['orighid'] = dataf['hid']
# For now motherpid is 0 as a placeholder and maximum age is set to 99
dataf['motherpid'] = 0
dataf['age_max'] = 99
dataf = _numeric_eduation(dataf)
dataf = _numeric_employment_status(dataf)
dataf = _numeric_laborforce(dataf)
dataf = _numeric_working(dataf)
dataf = _numeric_hours(dataf)
dataf = _numeric_migration(dataf)
dataf = make_hh_vars(dataf)
return dataf
def _numeric_eduation(dataf):
dataf = dataf.copy()
dataf.loc[:, "educ"] = 0
dataf.loc[(dataf['education'] == "[1] Hauptschulabschluss"), "educ"] = 0
dataf.loc[(dataf['education'] == "[2] Realschulabschluss"), "educ"] = 1
dataf.loc[(dataf['education'] == "[3] Fachhochschulreife"), "educ"] = 2
dataf.loc[(dataf['education'] == "[4] Abitur"), "educ"] = 3
dataf.loc[(dataf['education'] == "[5] Anderer Abschluss"), "educ"] = 4
dataf.loc[(dataf['education'] == "[6] Ohne Abschluss verlassen"), "educ"] = 5
dataf.loc[(dataf['education'] == "[7] Noch kein Abschluss"), "educ"] = 6
dataf.drop("education", axis = 1, inplace = True)
dataf.rename(columns={'educ': 'education'}, inplace=True)
return dataf
def _numeric_employment_status(dataf):
dataf = dataf.copy()
dataf.loc[:, "emp"] = 0
dataf.loc[(dataf['employment_status'] == "Teilzeit"), "emp"] = 2
dataf.loc[(dataf['employment_status'] == "Vollzeit"), "emp"] = 3
dataf.loc[(dataf['employment_status'] == "Bildung"), "emp"] = 0
dataf.loc[(dataf['employment_status'] == "Nicht erwerbstaetig"), "emp"] = 0
dataf.loc[(dataf['employment_status'] == "Rente"), "emp"] = 1
dataf.drop("employment_status", axis = 1, inplace = True)
dataf.rename(columns={'emp': 'employment_status'}, inplace=True)
dataf['fulltime'] = 0
dataf.loc[dataf['employment_status'] == 3, 'fulltime'] = 1
return dataf
def _numeric_laborforce(dataf):
dataf = dataf.copy()
dataf.loc[:,'lfs'] = 0
dataf.loc[dataf['pglfs'] == '[11] Working', 'lfs'] = 1
dataf.loc[dataf['pglfs'] == "[12] Working but NW past 7 days" , 'lfs'] = 1
dataf.drop("pglfs", axis = 1, inplace = True)
return dataf
def _numeric_working(dataf):
dataf = dataf.copy()
dataf.loc[:,'working'] = 0
dataf.loc[dataf['employment_status'] == 2, 'working'] = 1
dataf.loc[dataf['employment_status'] == 3, 'working'] = 1
return dataf
def _numeric_migration(dataf):
dataf = dataf.copy()
dataf['migration'] = 0
dataf.loc[dataf['migback'] == 0, 'migration'] = 1
dataf.loc[dataf['migback'] == "[1] kein Migrationshintergrund", 'migration'] = 0
dataf.drop('migback', axis=1, inplace=True)
dataf.rename(columns={'migration': 'migback'}, inplace=True)
return dataf
def _numeric_hours(dataf):
dataf = dataf.copy()
condition = [type(typ)==str for typ in dataf['hours']]
dataf.loc[condition, 'hours'] = np.nan
dataf['hours'] = dataf['hours'].astype(np.float64)
dataf.loc[(dataf["hours"].isna()) & (dataf["employment_status"] == 0) & (dataf["lfs"]==0), "hours"] = 0
dataf.loc[(dataf["hours"].isna()) & (dataf["employment_status"] == 1) & (dataf["lfs"]==0), "hours"] = 0
return dataf
# Making household wide variables
def make_hh_vars(dataf):
dataf = dataf.copy()
dataf = _get_multiindex(dataf)
dataf = _hh_income(dataf)
dataf = _hh_age_youngest(dataf)
dataf = _hh_fraction_working(dataf)
# dataf = _hh_children(dataf)
# dataf = indicate_births(dataf)
dataf = _indicate_birth(dataf)
dataf.reset_index(inplace=True, drop=True)
return dataf
def _get_tupleindices(dataf):
years = dataf['year'].tolist()
hids = dataf['hid'].tolist()
return list(zip(years, hids))
def _get_multiindex(dataf):
dataf = dataf.copy()
index_list = _get_tupleindices(dataf)
mindex = pd.MultiIndex.from_tuples(index_list, names=['year' ,
'hid'])
dataf_out = dataf.set_index(mindex)
dataf_out = dataf_out.sort_index(level=1)
return dataf_out
def _hh_income(dataf):
dataf = dataf.copy()
earnings = dataf.groupby(level=['year', 'hid'])['gross_earnings'].sum()
dataf['hh_income'] = earnings
return dataf
def _hh_size(dataf):
dataf = dataf.copy()
size = dataf.groupby(level=['year', 'hid'])['gross_earnings'].size()
dataf['n_people'] = size
return dataf
def _hh_children(dataf):
dataf = dataf.copy()
children = dataf.groupby(level=['year', 'hid'])['child'].sum()
dataf['n_children'] = children
return dataf
def _hh_fraction_working(dataf):
dataf = dataf.copy()
dataf = _hh_size(dataf)
dataf = _hh_children(dataf)
total = dataf.groupby(level=['year', 'hid'])['working'].sum()
dataf['total_working'] = total
dataf['n_adults'] = dataf['n_people'] - dataf['n_children']
dataf['hh_frac_working'] = dataf['total_working']/dataf['n_adults']
dataf.loc[dataf['n_adults']==0, 'hh_frac_working'] = 0
# Children could also be working, but bound it at 1
dataf.loc[dataf["hh_frac_working"]>1, "hh_frac_working"] = 1
dataf.drop(['total_working', 'n_adults'], axis=1, inplace=True)
return dataf
def _hh_age_youngest(dataf):
dataf = dataf.copy()
smallest_age = dataf.groupby(level=['year', 'hid'])['age'].min()
dataf['hh_youngest_age'] = smallest_age
return dataf
def _make_motherpid(dataf):
dataf = dataf.copy()
# Mothers in cildbearing age
interv = np.arange(18, 50)
mother_cond = (dataf["female"]==1) & (dataf["age"].isin(interv))
child_cond = dataf["child"]==1
baby_df = dataf[mother_cond | child_cond]
rest_df = dataf[(~mother_cond) & (~child_cond)]
baby_hh = baby_df.groupby("pid")["hid"].median()
baby_df = pd.merge(baby_df, baby_hh, on="pid", suffixes=("_current", ""))
baby_df.drop("hid_current", axis=1, inplace=True)
mother_pids = baby_df[baby_df["child"]==0].groupby("hid")["pid"].min()
merged = pd.merge(baby_df, mother_pids, on="hid", suffixes=("", "mother_pid"))
merged.loc[merged["child"]==1, "motherpid"] = merged.loc[merged["child"]==1, "pidmother_pid"]
merged.drop("pidmother_pid",axis=1, inplace=True)
df_out = pd.concat([rest_df, merged])
return df_out
def indicate_births(dataf):
dataf = dataf.copy()
df_motherpids = _make_motherpid(dataf)
tmp = df_motherpids.loc[df_motherpids["motherpid"]!=0, :].groupby("pid")[["year", "age", "motherpid"]].min()
tmp["child_birthyear"] = tmp["year"] - tmp["age"]
tmp.reset_index(inplace=True, drop=True)
mother_birth_list = list(zip(tmp["motherpid"], tmp["child_birthyear"]))
df_motherpids["mother_pid"] = list(zip(df_motherpids["pid"], df_motherpids["year"]))
df_motherpids.loc[df_motherpids["mother_pid"].isin(mother_birth_list), "birth"] = 1
df_motherpids.drop("mother_pid", axis=1, inplace=True)
return df_motherpids
def _indicate_birth(dataf):
"""
Indictaes whether a mother has had a baby in that particular year
"""
dataf = dataf.copy()
minage = dataf.groupby(level=['year', 'hid'])['age'].min()
dataf["minage"] = minage
dataf["birth"] = 0
dataf.loc[(dataf["minage"]==0)&(dataf["female"]==1)&(dataf["child"]==0), "birth"] = 1
dataf.drop("minage", axis=1, inplace=True)
return dataf | 32.643678 | 169 | 0.617019 | 1,067 | 8,520 | 4.761949 | 0.206186 | 0.098406 | 0.061405 | 0.056091 | 0.301122 | 0.194844 | 0.096635 | 0.030703 | 0 | 0 | 0 | 0.013176 | 0.21608 | 8,520 | 261 | 170 | 32.643678 | 0.747567 | 0.061737 | 0 | 0.179191 | 0 | 0 | 0.202815 | 0.002639 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104046 | false | 0 | 0.011561 | 0 | 0.219653 | 0.00578 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f2cd64976be46ed25ddd2ae11ac7035e1a5d54f | 1,404 | py | Python | Tracker_LoRa/test_sdb_sof/main.py | pbaron2/tracker-lora | e713ae090748c4416c4c689b1c2a2dcc7cc44a11 | [
"MIT"
] | null | null | null | Tracker_LoRa/test_sdb_sof/main.py | pbaron2/tracker-lora | e713ae090748c4416c4c689b1c2a2dcc7cc44a11 | [
"MIT"
] | null | null | null | Tracker_LoRa/test_sdb_sof/main.py | pbaron2/tracker-lora | e713ae090748c4416c4c689b1c2a2dcc7cc44a11 | [
"MIT"
] | null | null | null | from network import LoRa
import socket
import time
modes=[LoRa.LORA]
coding=[LoRa.CODING_4_5,LoRa.CODING_4_6,LoRa.CODING_4_7,LoRa.CODING_4_8]
pub=[True,False]
for k in modes:
for i in range(0,16):
for j in range(0,6):
for l in coding:
for p in pub:
#LoRa
# Europe = LoRa.EU868
#print("TEST1")
lora = LoRa(mode=k, region=LoRa.EU868)
#print("TEST2")
lora.init(frequency=885000000, mode=k, tx_power=14, sf=7+int(j),preamble=int(i),coding_rate=l,public=p)
#print("TEST3")
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
#print("TEST4")
s.setblocking(False)
#print("LoRa initialise !")
flag= True
compteur = 0
print('Debut Test : ', 'sf =', 7+int(j), ', mode =', k ,', preamble = ', i, ', coding_rate =', l)
while flag:
data = s.recv(128)
if data != b'':
print (data)
time.sleep(0.1)
compteur+=1
if compteur >= 10:
flag=False
s.close()
print('Fin Test\n')
| 28.653061 | 123 | 0.421652 | 156 | 1,404 | 3.711538 | 0.435897 | 0.086356 | 0.075993 | 0.02418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05984 | 0.464387 | 1,404 | 48 | 124 | 29.25 | 0.710106 | 0.075499 | 0 | 0 | 0 | 0 | 0.048799 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.107143 | 0 | 0.107143 | 0.107143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f2e724ea35fff6427fd9fe2bf26c971384b0524 | 1,268 | py | Python | setup.py | lnknguyen/niimpy | 2aebd1d59b0b7562103128d5eaff90f20091f6fd | [
"MIT"
] | null | null | null | setup.py | lnknguyen/niimpy | 2aebd1d59b0b7562103128d5eaff90f20091f6fd | [
"MIT"
] | null | null | null | setup.py | lnknguyen/niimpy | 2aebd1d59b0b7562103128d5eaff90f20091f6fd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
from os.path import join, dirname
with open("README.md", "r") as fh:
long_description = fh.read()
version_ns = { }
exec(open('niimpy/_version.py').read(), version_ns)
version = version_ns['__version__']
del version_ns
requirementstxt = join(dirname(__file__), "requirements.txt")
requirements = [ line.strip() for line in open(requirementstxt, "r") if line.strip() ]
setup(name='niimpy',
version=version,
description='Behavorial data analysis',
long_description=long_description,
long_description_content_type='text/markdown',
author='Richard Darst',
author_email='rkd@zgib.net',
url='https://github.com/digitraceslab/niimpy',
#packages=['niimpy', 'niimpy.preprocessing', 'niimpy.reading'],
packages=find_packages(where='.'),
package_data={'niimpy': ['sampledata/*.sqlite3', 'sampledata/*.csv']},
include_package_data=True,
python_requires=">=3.6",
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
],
)
| 32.512821 | 86 | 0.67429 | 141 | 1,268 | 5.87234 | 0.602837 | 0.072464 | 0.031401 | 0.072464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004798 | 0.178233 | 1,268 | 38 | 87 | 33.368421 | 0.789827 | 0.064669 | 0 | 0 | 0 | 0 | 0.309122 | 0.018581 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f3143b48ee084919d22e8532883574dc131cbba | 4,374 | py | Python | robovat/io/hdf5_utils.py | leobxpan/robovat | 0d360c34c677cf018c4daab0b8e758943ae1d2c1 | [
"MIT"
] | 62 | 2020-04-08T11:26:24.000Z | 2021-09-06T02:45:53.000Z | robovat/io/hdf5_utils.py | leobxpan/robovat | 0d360c34c677cf018c4daab0b8e758943ae1d2c1 | [
"MIT"
] | 7 | 2020-04-12T13:10:10.000Z | 2022-03-12T00:15:03.000Z | robovat/io/hdf5_utils.py | leobxpan/robovat | 0d360c34c677cf018c4daab0b8e758943ae1d2c1 | [
"MIT"
] | 17 | 2020-04-12T17:37:01.000Z | 2021-09-07T01:51:46.000Z | """File IO utilities using HDF5.
The data element saved in HDF5 should be a dictionary. The types of each value
should be in HDF5_DATA_TYPES.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import h5py
import numpy as np
import uuid
import traceback
def write_data_to_hdf5(f, data, compress_size_thresh=100):
"""Wrte data to HDF5 group.
Args:
f: The HDF5 group to write the data to.
data: The data to be writtent, which can be a group, a dict or a list.
compress_size_thresh: Data larger or equal to this size will be
compressed by the gzip format.
"""
for key, value in data.items():
if isinstance(value, dict):
group = f.create_group(key)
write_data_to_hdf5(group, value)
elif isinstance(value, list):
group_list = f.create_group(key + '[]')
for i, value_i in enumerate(value):
assert isinstance(value_i, (dict, np.ndarray)), (
'List \'%s\' has type %s value %s, which is forbidden.'
' Lists should only have dict or numpy.ndarray values.'
% (key, type(value_i), value_i))
group = group_list.create_group(str(i))
write_data_to_hdf5(group, value_i)
else:
try:
if value is None:
f[key] = 'None'
else:
value = np.array(value)
if np.prod(value.shape) >= compress_size_thresh:
f.create_dataset(
key,
data=value,
compression="gzip", compression_opts=9)
else:
f.create_dataset(key, data=value)
except Exception:
traceback.print_exc()
raise ValueError('Unsupported data \'%s\' of type %s.'
% (key, type(value)))
def read_data_from_hdf5(f):
"""Read data from HDF5 group.
Args:
f: The HDF5 group to read the data from.
Returns:
The data read from the group.
"""
data = dict()
for key, value in f.items():
if isinstance(value, h5py._hl.group.Group):
if key[-2:] != '[]':
# Read dictionary.
data[key] = read_data_from_hdf5(value)
else:
# Read list.
list_var = [None] * len(value)
for ind, element in value.items():
list_var[int(ind)] = read_data_from_hdf5(element)
data[key[:-2]] = list_var
else:
# Read numpy array or scalar.
value = value.value
if value == 'None':
data[key] = None
else:
value = np.array(value)
if value.shape == ():
data[key] = np.asscalar(value)
else:
data[key] = value
# assert isinstance(value, h5py._hl.dataset.Dataset), (
# 'Item \'%s\' has type %s.' % (key, type(value)))
# data[key] = np.array(value.value)
return data
class HDF5Writer(object):
"""A class to dump pickle to file.
"""
def __init__(self, filename):
"""Initialize.
Args:
filename: The filename of the HDF5 file.
"""
self._file = h5py.File(filename, 'w')
def write(self, data):
"""Write data to a pickle file.
Args:
data: An element of the data.
"""
assert isinstance(data, dict)
name = str(uuid.uuid4())
group = self._file.create_group(name)
write_data_to_hdf5(group, data)
def close(self):
"""Close the HDF5 file.
"""
self._file.close()
def read(filename):
"""Read data from an HDF5 file.
Args:
filename: The path to the HDf5 file.
Yields:
data: An element of the data.
"""
with h5py.File(filename, 'r') as f:
for name, group in f.items():
try:
data = read_data_from_hdf5(group)
yield data
except Exception:
raise ValueError('Errors in reading data from the HDF5 file.')
| 30.587413 | 79 | 0.518747 | 521 | 4,374 | 4.214971 | 0.241843 | 0.021858 | 0.032787 | 0.03643 | 0.17623 | 0.119308 | 0.052823 | 0.052823 | 0 | 0 | 0 | 0.012263 | 0.384774 | 4,374 | 142 | 80 | 30.802817 | 0.80379 | 0.238455 | 0 | 0.166667 | 0 | 0 | 0.060309 | 0 | 0 | 0 | 0 | 0 | 0.025641 | 1 | 0.076923 | false | 0 | 0.089744 | 0 | 0.192308 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f33ed65f4ac4089a0724c48004c99fbc453b64f | 344 | py | Python | tests/python/test_sparse_basics.py | ericjang/taichi | 0dec0bac6d9c16b7b62ba528f9e0d4268d1d05d2 | [
"MIT"
] | null | null | null | tests/python/test_sparse_basics.py | ericjang/taichi | 0dec0bac6d9c16b7b62ba528f9e0d4268d1d05d2 | [
"MIT"
] | null | null | null | tests/python/test_sparse_basics.py | ericjang/taichi | 0dec0bac6d9c16b7b62ba528f9e0d4268d1d05d2 | [
"MIT"
] | null | null | null | import taichi as ti
@ti.program_test
def test_while():
x = ti.var(ti.f32)
s = ti.var(ti.i32)
n = 128
@ti.layout
def place():
ti.root.dense(ti.i, n).bitmasked().dense(ti.i, n).place(x)
ti.root.place(s)
@ti.kernel
def func():
for i in x:
ti.atomic_add(s[None], 1)
x[0] = 1
func()
assert s[None] == 128
| 14.333333 | 62 | 0.572674 | 65 | 344 | 2.984615 | 0.476923 | 0.046392 | 0.072165 | 0.092784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0.244186 | 344 | 23 | 63 | 14.956522 | 0.696154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.176471 | false | 0 | 0.058824 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f34018b23497a72c128d5bbb73251d18c61b077 | 2,752 | py | Python | idea.py | szandala/autoencoder | 3e9e41fa3f0f69a9e12acfa661e0de05bf190aeb | [
"MIT"
] | null | null | null | idea.py | szandala/autoencoder | 3e9e41fa3f0f69a9e12acfa661e0de05bf190aeb | [
"MIT"
] | null | null | null | idea.py | szandala/autoencoder | 3e9e41fa3f0f69a9e12acfa661e0de05bf190aeb | [
"MIT"
] | null | null | null | from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten, MaxPooling2D
from sklearn.neighbors import NearestNeighbors
from keras.datasets import cifar10
import numpy as np
from keras import backend as K
################################################################
# input image dimensions
img_rows, img_cols = 32, 32
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
################################################################
x = np.array([image.flatten() for image in x_train])
print(x[0])
# import sys
# sys.exit(0)
model = Sequential()
# hiddens
model.add(Conv2D(64, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(48, activation='relu'))
# output
model.add(Dense(img_rows * img_cols * 3, activation='linear'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
# Fit the model
model.fit(x_train, x, epochs=3, batch_size=10)
# remove last layer
model.pop()
# prepare map of points
points_values = model.predict(x_train)
print(points_values[0])
# https://stackabuse.com/k-nearest-neighbors-algorithm-in-python-and-scikit-learn/
knn = NearestNeighbors()
knn.fit(points_values)
for image in [120, 300, 700, 1000]:
fitting = knn.kneighbors([points_values[image]], n_neighbors=5, return_distance=False)[0]
for fit in fitting:
print("ID: {}, class: {}".format(fit, y_train[fit]))
# ID: 120, class: [2]
# ID: 827, class: [2]
# ID: 7509, class: [0]
# ID: 30612, class: [2]
# ID: 49719, class: [4]
# ID: 300, class: [2]
# ID: 31691, class: [2]
# ID: 2573, class: [4]
# ID: 18888, class: [2]
# ID: 40966, class: [2]
# ID: 700, class: [0]
# ID: 35599, class: [0]
# ID: 33571, class: [8]
# ID: 36547, class: [8]
# ID: 48158, class: [8]
# ID: 1000, class: [9]
# ID: 20544, class: [7]
# ID: 24979, class: [1]
# ID: 40359, class: [0]
# ID: 17782, class: [7]
| 28.371134 | 93 | 0.65189 | 426 | 2,752 | 4.051643 | 0.316901 | 0.05562 | 0.04635 | 0.06489 | 0.178447 | 0.161066 | 0.151796 | 0.117034 | 0.117034 | 0.048667 | 0 | 0.075777 | 0.146439 | 2,752 | 96 | 94 | 28.666667 | 0.659004 | 0.24782 | 0 | 0 | 0 | 0 | 0.074346 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f343eb3682ac8f3b2f2549023501deaa06faf0e | 2,291 | py | Python | setup.py | copernicusmarine/cmemsapi | b2b2f8e9c80d989fe1aa1374d8174a30c819847e | [
"MIT"
] | null | null | null | setup.py | copernicusmarine/cmemsapi | b2b2f8e9c80d989fe1aa1374d8174a30c819847e | [
"MIT"
] | null | null | null | setup.py | copernicusmarine/cmemsapi | b2b2f8e9c80d989fe1aa1374d8174a30c819847e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2020 E.U. Copernicus Marine Service Information
import sys
from pathlib import Path # noqa E402
from setuptools import find_packages, setup
assert sys.version_info >= (3, 6, 0), "cmtb requires Python 3.6+"
CURRENT_DIR = Path(__file__).parent
sys.path.insert(0, str(CURRENT_DIR))
#def get_long_description() -> str:
# return (
# (CURRENT_DIR / "README.md").read_text(encoding="utf8")
# + "\n\n"
# + (CURRENT_DIR / "CHANGES.md").read_text(encoding="utf8")
#)
#with open('HISTORY.rst') as history_file:
# history = history_file.read()
#with open('README.md') as readme_file:
# README = readme_file.read()
#REQUIREMENTS = [line.strip() for line in open('requirements_prod.txt')]
REQUIREMENTS = ["dask fire funcy ipython jedi<0.18.0 lxml motuclient==1.8.4 netCDF4<=1.5.4 pandas requests scipy toolz xarray ".split(' ')]
SETUP_REQUIREMENTS = []
TEST_REQUIREMENTS = []
setup(
author="E.U. Copernicus Marine Service Information",
author_email='servicedesk.cmems@mercator-ocean.eu',
python_requires='>=3.6',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
],
description="A package to help generating reliable data requests"
" about earth observation and marine related information "
"from Copernicus Marine Database.",
install_requires=REQUIREMENTS,
license="MIT",
long_description='long description',
long_description_content_type="text/markdown",
include_package_data=True,
keywords='cmemsapi',
name='cmemsapi',
packages=find_packages(include=['cmemsapi', 'cmemsapi.*']),
setup_requires=SETUP_REQUIREMENTS,
test_suite='tests',
tests_require=TEST_REQUIREMENTS,
url='https://github.com/copernicusmarine/cmemsapi',
version='0.1.17',
zip_safe=False,
entry_points={'console_scripts':['cmust=cmemsapi.cmemsapi:cli']},
)
| 33.202899 | 139 | 0.67787 | 278 | 2,291 | 5.446043 | 0.568345 | 0.005284 | 0.049538 | 0.051519 | 0.076618 | 0.047556 | 0 | 0 | 0 | 0 | 0 | 0.021299 | 0.180271 | 2,291 | 68 | 140 | 33.691176 | 0.784878 | 0.226102 | 0 | 0 | 0 | 0.023256 | 0.473265 | 0.047782 | 0 | 0 | 0 | 0 | 0.023256 | 1 | 0 | false | 0 | 0.069767 | 0 | 0.069767 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f34aa81b3070681377031f89c73e262ce096a91 | 1,520 | py | Python | application/services/tag_service.py | Praveen-0208/developer-cheatsheet | 0fe494c2b01e320891c4048172569eacd15c73a4 | [
"Apache-2.0"
] | null | null | null | application/services/tag_service.py | Praveen-0208/developer-cheatsheet | 0fe494c2b01e320891c4048172569eacd15c73a4 | [
"Apache-2.0"
] | null | null | null | application/services/tag_service.py | Praveen-0208/developer-cheatsheet | 0fe494c2b01e320891c4048172569eacd15c73a4 | [
"Apache-2.0"
] | null | null | null | from application.models.Tag import Tag
from application.app import db
from flask import jsonify
import traceback
class TagService:
def create_tag(self, params):
try:
tag_object = Tag(tag_name = params["tag_name"])
db.session.add(tag_object)
db.session.commit()
return {"Message": "Tag created successfully"}, 200
except Exception as ex:
return {"Message": "Something went wrong", "exception": traceback.format_exc()}, 500
def get_all_tags(self):
try:
tags = Tag.query.filter_by(is_deleted= False).all()
response = [{"id": tag.id, "tag_name": tag.tag_name, "is_deleted": tag.is_deleted, "created_date": tag.created_date, "updated_date": tag.updated_date } for tag in tags]
return jsonify(response), 200
except Exception as ex:
return {"Message": "Something went wrong", "exception": traceback.format_exc()}, 500
def update_tag(self, id, params):
try:
tag = Tag.query.filter_by(id=id, is_deleted=False).first()
setattr(tag, "tag_name", params["tag_name"])
db.session.commit()
return {"Message": "Tag updated successfully"}, 200
except Exception as ex:
return {"Message": "Something went wrong", "exception": traceback.format_exc()}, 500
def delete_tag(self, id):
try:
tag = Tag.query.filter_by(id=id, is_deleted=False).first()
setattr(tag, "is_deleted", True)
db.session.commit()
return {"Message": "Tag deleted successfully"}, 200
except Exception as ex:
return {"Message": "Something went wrong", "exception": traceback.format_exc()}, 500 | 38 | 171 | 0.709868 | 215 | 1,520 | 4.87907 | 0.265116 | 0.086749 | 0.068637 | 0.076263 | 0.602479 | 0.602479 | 0.522402 | 0.522402 | 0.464252 | 0.464252 | 0 | 0.018547 | 0.148684 | 1,520 | 40 | 172 | 38 | 0.792117 | 0 | 0 | 0.472222 | 0 | 0 | 0.207101 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.472222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f38e58a4f42e68862f6e882856a02eb76e3e7d3 | 479 | py | Python | fopp/Chapter 12. Functions/num_test.py | H2u-Hwng/EVC | c650fe7356a333011514cf9025dfd97bf71b1de3 | [
"MIT"
] | null | null | null | fopp/Chapter 12. Functions/num_test.py | H2u-Hwng/EVC | c650fe7356a333011514cf9025dfd97bf71b1de3 | [
"MIT"
] | null | null | null | fopp/Chapter 12. Functions/num_test.py | H2u-Hwng/EVC | c650fe7356a333011514cf9025dfd97bf71b1de3 | [
"MIT"
] | null | null | null | # Check number and return result
def num_test(num):
# Check number
if num > 10:
return 'Greater than 10.'
elif num < 10:
return 'Less than 10.'
else:
return 'Equal to 10.'
# Define main function
def main():
# Prompt user for a number
number = float(input('Enter a number: '))
# Check number
check_number = num_test(number)
# Display result
print(check_number)
# Call main function
main()
| 17.740741 | 45 | 0.586639 | 62 | 479 | 4.467742 | 0.483871 | 0.198556 | 0.079422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03096 | 0.325679 | 479 | 26 | 46 | 18.423077 | 0.826625 | 0.283925 | 0 | 0 | 0 | 0 | 0.170149 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.416667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f3cda38afd2ff0962b2ae79ebdaa829c5bed6a1 | 556 | py | Python | example/nn-andor.py | Sean-Mabli/aiinpy | 827e4f85861436c0332046fa8aa84e24153513d6 | [
"MIT"
] | 2 | 2021-04-19T21:49:34.000Z | 2021-05-17T21:03:08.000Z | example/nn-andor.py | Sean-Mabli/aiinpy | 827e4f85861436c0332046fa8aa84e24153513d6 | [
"MIT"
] | null | null | null | example/nn-andor.py | Sean-Mabli/aiinpy | 827e4f85861436c0332046fa8aa84e24153513d6 | [
"MIT"
] | null | null | null | import aiinpy as ai
import numpy as np
# Create Dataset
inTrainData = np.random.choice(([0, 1]), (2, 100))
outTrainData = np.zeros((2, 100))
for i in range(100):
outTrainData[:, i] = [1, 0] if sum(inTrainData[:, i]) == 1 else [0, 1]
# NN model
model = ai.model(2, 2, [
ai.nn(outshape=16, activation=ai.relu(), learningrate=0.1),
ai.nn(outshape=16, activation=ai.relu(), learningrate=0.1),
ai.nn(outshape=2, activation=ai.sigmoid(), learningrate=0.1)
])
model.train((inTrainData, outTrainData), 100)
print(model.test((inTrainData, outTrainData))) | 30.888889 | 72 | 0.681655 | 87 | 556 | 4.356322 | 0.402299 | 0.026385 | 0.094987 | 0.073879 | 0.263852 | 0.263852 | 0.263852 | 0.263852 | 0.263852 | 0.263852 | 0 | 0.070103 | 0.127698 | 556 | 18 | 73 | 30.888889 | 0.71134 | 0.041367 | 0 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f3e415b02824042a222510b85f1f06332620f82 | 10,212 | py | Python | evaluation.py | twankim/svdped | afe67b5c4636bca03a6d68b7a9cf89485a4dabec | [
"Apache-2.0"
] | 1 | 2020-01-02T03:01:28.000Z | 2020-01-02T03:01:28.000Z | evaluation.py | twankim/svdped | afe67b5c4636bca03a6d68b7a9cf89485a4dabec | [
"Apache-2.0"
] | null | null | null | evaluation.py | twankim/svdped | afe67b5c4636bca03a6d68b7a9cf89485a4dabec | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Tensorflow. All Rights Reserved.
# Modifications copyright 2018 UT Austin/Saharsh Oza & Taewan Kim
# We follow the object detection API of Tensorflow
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import csv
import numpy as np
import tensorflow as tf
import _init_paths
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import metrics
from object_detection.utils import object_detection_evaluation as obj_eval
from object_detection.core import standard_fields
from skvideo.io import FFmpegWriter
from skimage.io import imread
tf.app.flags.DEFINE_string('gt_dir', '', 'Location of root directory for the '
'ground truth data. Folder structure is assumed to be:'
'<gt_dir>/cstopp_train.tfrecord,'
'<gt_dir>/cstopp_test.tfrecord'
'<gt_dir>/cstopp_val.tfrecord')
tf.app.flags.DEFINE_string('det_dir', '', 'Location of root directory for the '
'inference data. Folder structure is assumed to be:'
'<det_dir>/cstopp_train.tfrecord,'
'<det_dir>/cstopp_test.tfrecord'
'<det_dir>/cstopp_val.tfrecord')
tf.app.flags.DEFINE_string('output_dir', '', 'Path to which metrics'
'will be written.')
tf.app.flags.DEFINE_string('split', 'train', 'Data split when record file is being read from gt_dir and det_dir ex: train, test, val')
tf.app.flags.DEFINE_string(
'label_map_path',
'configs/cstopp_label_map.pbtxt',
'file path for the labels')
tf.app.flags.DEFINE_integer(
'num_class', 1,
'Number of Classes to consider from 1 in the label map')
tf.app.flags.DEFINE_boolean(
'is_vout', False, 'Generate a video with bounding boxes')
FLAGS = tf.app.flags.FLAGS
gt_feature = {
'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),
'image/object/class/label': tf.VarLenFeature(tf.int64),
'image/filename': tf.FixedLenFeature([], tf.string),
'image/object/difficult': tf.VarLenFeature(tf.int64),
}
det_feature = {
'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),
'image/object/class/label': tf.VarLenFeature(tf.int64),
'image/object/score': tf.VarLenFeature(tf.float32),
'image/filename': tf.FixedLenFeature([], tf.string),
}
class Reader:
def __init__(self, record_path, split, is_infer=False):
data_path = []
if is_infer:
data_path.append(os.path.join(record_path, 'cstopp_inference_{}.tfrecord'.format(split)))
else:
data_path.append(os.path.join(record_path, 'cstopp_{}.tfrecord'.format(split)))
self.read_graph = tf.Graph()
with self.read_graph.as_default():
# old_graph_def = tf.GraphDef()
self.filename_queue = tf.train.string_input_producer(data_path)
self.reader = tf.TFRecordReader()
self.num_records = 0
for f in data_path:
self.num_records += sum(1 for _ in tf.python_io.tf_record_iterator(f))
# tf.import_graph_def(old_graph_def, name='')
self.sess = tf.Session(graph=self.read_graph)
def get_field(self, field, decode=False):
if not decode:
if type(self.features[field])==tf.SparseTensor:
return tf.sparse_tensor_to_dense(self.features[field])
else:
return self.features[field]
else:
return tf.image.decode_png(self.features[field])
def get_fields(self, feature_dict):
# Modify graph to add these ops
with self.read_graph.as_default():
list_fields = feature_dict.keys()
# old_graph_def = tf.GraphDef()
# Read next record from queue
_, serialized_example = self.reader.read(self.filename_queue)
self.features = tf.parse_single_example(
serialized_example, features=feature_dict)
# Get required fields from record
fields_out = [self.get_field(f) for f in list_fields]
# Close queue
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=self.sess, coord=coord)
# Import updated graph in current read_graph
# tf.import_graph_def(old_graph_def, name='')
eval_out = self.sess.run(fields_out)
out_dict = dict(zip(list_fields, eval_out))
return out_dict
def get_bbox(box_list):
ymin_eval = box_list['image/object/bbox/ymin']
xmin_eval = box_list['image/object/bbox/xmin']
ymax_eval = box_list['image/object/bbox/ymax']
xmax_eval = box_list['image/object/bbox/xmax']
return np.vstack((ymin_eval,xmin_eval,ymax_eval,xmax_eval)).T
def write_metrics(metrics, output_path):
"""Write metrics to the output directory.
Args:
metrics: A dictionary containing metric names and values.
output_dir: Directory to write metrics to.
"""
tf.logging.info('Writing metrics.')
with open(output_path, 'w') as csvfile:
metrics_writer = csv.writer(csvfile, delimiter=',')
for metric_name, metric_value in metrics.items():
metrics_writer.writerow([metric_name, str(metric_value)])
def evaluate(gt_dir=FLAGS.gt_dir, det_dir=FLAGS.det_dir,
output_dir=FLAGS.output_dir, split='train',
label_map_path=None, is_vout=False, num_class=1, fps_out=5):
gt_reader = Reader(gt_dir, split)
num_records = gt_reader.num_records
det_reader = Reader(det_dir, split, is_infer=True)
label_map = label_map_util.load_labelmap(label_map_path)
categories = label_map_util.convert_label_map_to_categories(
label_map,
max_num_classes=num_class,
use_display_name=True)
evaluator = obj_eval.ObjectDetectionEvaluator(categories)
output_path = os.path.join(output_dir, 'cstopp_{}_eval.csv'.format(split))
if is_vout:
category_index = label_map_util.create_category_index(categories)
list_valid_ids = [int(cat_dict['id']) for cat_dict in categories]
vwriter = FFmpegWriter(os.path.join(output_dir,split+'_det_gt.mp4'),
inputdict={'-r':str(fps_out)},
outputdict={'-r':str(fps_out)})
for image_num in range(0, num_records):
print('Evaluating {}/{}'.format(image_num+1,num_records))
gt_fields = gt_reader.get_fields(gt_feature)
gt_bbox = get_bbox(gt_fields)
gt_classes = gt_fields['image/object/class/label'].astype(np.int32)
gt_diff = gt_fields['image/object/difficult']
det_fields = det_reader.get_fields(det_feature)
det_bbox = get_bbox(det_fields)
det_scores = det_fields['image/object/score']
det_classes = det_fields['image/object/class/label'].astype(np.int32)
filename = gt_fields['image/filename']
ground_dict = {
standard_fields.InputDataFields.groundtruth_boxes: gt_bbox,
standard_fields.InputDataFields.groundtruth_classes: gt_classes,
standard_fields.InputDataFields.groundtruth_difficult: gt_diff}
det_dict = {
standard_fields.DetectionResultFields.detection_boxes: det_bbox,
standard_fields.DetectionResultFields.detection_scores: det_scores,
standard_fields.DetectionResultFields.detection_classes: det_classes}
if is_vout:
image = imread(filename)
# Visualization of the results of a detection.
image_labeled = np.copy(image)
vis_util.visualize_boxes_and_labels_on_image_array(
image_labeled,
gt_bbox,
gt_classes,
None,
category_index,
max_boxes_to_draw=None,
min_score_thresh=0,
use_normalized_coordinates=True,
line_thickness=2)
idx_consider = [cid in list_valid_ids for cid in det_classes]
vis_util.visualize_boxes_and_labels_on_image_array(
image_labeled,
det_bbox[idx_consider,:],
det_classes[idx_consider],
det_scores[idx_consider],
category_index,
max_boxes_to_draw=None,
min_score_thresh=0,
use_normalized_coordinates=True,
line_thickness=2)
vwriter.writeFrame(image_labeled)
evaluator.add_single_ground_truth_image_info(filename, ground_dict)
evaluator.add_single_detected_image_info(filename, det_dict)
eval_result = evaluator.evaluate()
print(eval_result)
write_metrics(eval_result, output_path)
if is_vout:
vwriter.close()
if __name__ == '__main__':
evaluate(
gt_dir=FLAGS.gt_dir,
det_dir=FLAGS.det_dir,
output_dir=FLAGS.output_dir,
split=FLAGS.split,
label_map_path=FLAGS.label_map_path,
is_vout=FLAGS.is_vout,
num_class=FLAGS.num_class)
| 42.198347 | 134 | 0.658637 | 1,311 | 10,212 | 4.864226 | 0.234935 | 0.034499 | 0.028226 | 0.033872 | 0.295907 | 0.251372 | 0.205112 | 0.18504 | 0.162772 | 0.137055 | 0 | 0.006812 | 0.238151 | 10,212 | 241 | 135 | 42.373444 | 0.812853 | 0.122405 | 0 | 0.185792 | 0 | 0 | 0.144828 | 0.071909 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032787 | false | 0 | 0.087432 | 0 | 0.153005 | 0.016393 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f416087cddccd8f2f43da2aae4ed7a5d932a824 | 2,246 | py | Python | Tweet_Parser.py | berkehanozen/481project | e1f8f98871ad47bfc7ac2363824b86eca3ef80e3 | [
"MIT"
] | 1 | 2019-04-03T20:04:10.000Z | 2019-04-03T20:04:10.000Z | Tweet_Parser.py | berkehanozen/481project | e1f8f98871ad47bfc7ac2363824b86eca3ef80e3 | [
"MIT"
] | 8 | 2019-03-17T20:03:15.000Z | 2019-04-26T23:36:50.000Z | Tweet_Parser.py | berkehanozen/481project | e1f8f98871ad47bfc7ac2363824b86eca3ef80e3 | [
"MIT"
] | null | null | null | import tweepy
CONSUMER_KEY = 'EQxFiVurRxZRKZzH4tJ2PtsX8'
CONSUMER_SECRET = '59WSgueMvx7VUbuYywwC6rqwdlLUkc0yufiTnITvgmPJAN9RzE'
ACCESS_TOKEN = "349586645-4e7WmYpjvzKUmsKh3C9pNyv0QzlbVB80nlvR4q02"
ACCESS_TOKEN_SECRET = "qblYvvigttmP5elDFMsIJacO7gOknN794ubMThlyV0pfj"
auth = tweepy.OAuthHandler('EQxFiVurRxZRKZzH4tJ2PtsX8', '59WSgueMvx7VUbuYywwC6rqwdlLUkc0yufiTnITvgmPJAN9RzE')
auth.set_access_token("349586645-4e7WmYpjvzKUmsKh3C9pNyv0QzlbVB80nlvR4q02", "qblYvvigttmP5elDFMsIJacO7gOknN794ubMThlyV0pfj")
api = tweepy.API(auth)
class ParseTweets(object):
@staticmethod
def getTweets(userId):
user=api.get_user(userId)
if user.protected:
print("User is protected")
return ""
timeline=api.user_timeline(screen_name=userId,count=5,tweet_mode="extended")
tweetTexts=[]
followerCount=user.followers_count
followingCount=user.friends_count
tweetCount=user.statuses_count
for tweet in timeline: #taking tweet texts
if 'retweeted_status' in tweet._json: #getting full text for retweets
tweetTexts.append(tweet._json['retweeted_status']['full_text'])
else:
tweetTexts.append(tweet.full_text) #getting full text for self tweets
tweetDates=[[tweet.created_at]for tweet in timeline]#taking tweet dates
imageUrls=[]
for tweet in timeline:
if 'media' in tweet.entities:
for media in tweet.extended_entities['media']:
imageUrls.append(media['media_url'])
else:
imageUrls.append("")
dates=[]
for t in tweetDates:
for i in t:
date=str(i).split(" ")[0].split("-")
dates.append(date[2]+"."+date[1]+"."+date[0])
profileImage=user.profile_image_url
informations=[]
informations.append(tweetTexts)
informations.append(dates)
informations.append(tweetCount)
informations.append(followingCount)
informations.append(followerCount)
informations.append(imageUrls)
informations.append(profileImage)
# for info in informations:
# print(info)
return informations
| 43.192308 | 124 | 0.670971 | 209 | 2,246 | 7.090909 | 0.38756 | 0.08502 | 0.020243 | 0.036437 | 0.039136 | 0.039136 | 0 | 0 | 0 | 0 | 0 | 0.042665 | 0.238201 | 2,246 | 51 | 125 | 44.039216 | 0.823495 | 0.062778 | 0 | 0.083333 | 0 | 0 | 0.204383 | 0.161982 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0 | 0.020833 | 0 | 0.104167 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f482aee1ac3fa8273f5cc77f82ac708531977d2 | 3,115 | py | Python | src/main/python/apache/thermos/cli/commands/run.py | jeremyvdw/aurora | fa9d83a7ef3a96c522884089a471bbb0bef74c48 | [
"Apache-2.0"
] | 479 | 2015-03-27T22:59:49.000Z | 2022-03-09T08:40:49.000Z | src/main/python/apache/thermos/cli/commands/run.py | jeremyvdw/aurora | fa9d83a7ef3a96c522884089a471bbb0bef74c48 | [
"Apache-2.0"
] | 69 | 2015-05-26T20:06:29.000Z | 2020-01-13T19:18:59.000Z | src/main/python/apache/thermos/cli/commands/run.py | jeremyvdw/aurora | fa9d83a7ef3a96c522884089a471bbb0bef74c48 | [
"Apache-2.0"
] | 226 | 2015-03-27T20:02:59.000Z | 2022-03-09T08:40:53.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import getpass
from twitter.common import app
from apache.thermos.cli.common import get_task_from_options, really_run
from apache.thermos.common.options import add_binding_to, add_port_to
@app.command
@app.command_option("--user", metavar="USER", default=getpass.getuser(), dest='user',
help="run as this user. if not $USER, must have setuid privilege.")
@app.command_option("--enable_chroot", dest="chroot", default=False, action='store_true',
help="chroot tasks to the sandbox before executing them, requires "
"root privileges.")
@app.command_option("--task", metavar="TASKNAME", default=None, dest='task',
help="The thermos task within the config that should be run. Only required if "
"there are multiple tasks exported from the thermos configuration.")
@app.command_option("--task_id", metavar="STRING", default=None, dest='task_id',
help="The id to which this task should be bound, synthesized from the task "
"name if none provided.")
@app.command_option("--json", default=False, action='store_true', dest='json',
help="Read the source file in json format.")
@app.command_option("--sandbox", metavar="PATH", default="/var/lib/thermos/sandbox", dest='sandbox',
help="The sandbox in which to run the task.")
@app.command_option("-P", "--port", type="string", nargs=1, action="callback",
callback=add_port_to('prebound_ports'), dest="prebound_ports", default=[],
metavar="NAME:PORT", help="bind named PORT to NAME.")
@app.command_option("-E", "--environment", type="string", nargs=1, action="callback",
callback=add_binding_to('bindings'), default=[], dest="bindings",
metavar="NAME=VALUE",
help="bind the configuration environment variable NAME to VALUE.")
@app.command_option("--daemon", default=False, action='store_true', dest='daemon',
help="fork and daemonize the thermos runner.")
def run(args, options):
"""Run a thermos task.
Usage: thermos run [options] config
"""
thermos_task = get_task_from_options(args, options)
really_run(thermos_task,
options.root,
options.sandbox,
task_id=options.task_id,
user=options.user,
prebound_ports=options.prebound_ports,
chroot=options.chroot,
daemon=options.daemon)
| 47.923077 | 100 | 0.660353 | 402 | 3,115 | 5.007463 | 0.380597 | 0.049677 | 0.071535 | 0.034277 | 0.084948 | 0.071535 | 0.040735 | 0.040735 | 0 | 0 | 0 | 0.002481 | 0.223756 | 3,115 | 64 | 101 | 48.671875 | 0.830025 | 0.185875 | 0 | 0 | 0 | 0 | 0.336121 | 0.009558 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0.05 | 0.125 | 0 | 0.15 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f48681294588a03fc4aad0737c8a3c34dabc142 | 280 | py | Python | gallery/random_color.py | Commander07/Enviroment | bdf05dca42a14a5f061c8dda64c3ec6b085e4db3 | [
"MIT"
] | null | null | null | gallery/random_color.py | Commander07/Enviroment | bdf05dca42a14a5f061c8dda64c3ec6b085e4db3 | [
"MIT"
] | null | null | null | gallery/random_color.py | Commander07/Enviroment | bdf05dca42a14a5f061c8dda64c3ec6b085e4db3 | [
"MIT"
] | null | null | null | import random
import time
from enviroment import Enviroment
ENV = Enviroment("")
def random_color():
return random.randint(0, 256)
while True:
ENV.console.print(
"[color({color})]COLOR SUPPORT[/color({color})]".format(color=random_color()))
time.sleep(1)
| 17.5 | 86 | 0.685714 | 36 | 280 | 5.277778 | 0.555556 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021459 | 0.167857 | 280 | 15 | 87 | 18.666667 | 0.793991 | 0 | 0 | 0 | 0 | 0 | 0.164286 | 0.160714 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0.1 | 0.5 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f494b88262e2106d77b9c0a20561febf55c8994 | 5,933 | py | Python | radiation/python/encoding.py | dfridovi/exploration | 5e66115178988bd264a920041dfeab6d3539caec | [
"BSD-3-Clause"
] | 5 | 2018-07-08T08:32:49.000Z | 2022-03-13T10:17:09.000Z | radiation/python/encoding.py | dfridovi/exploration | 5e66115178988bd264a920041dfeab6d3539caec | [
"BSD-3-Clause"
] | 5 | 2016-11-30T02:52:58.000Z | 2018-05-24T04:46:49.000Z | radiation/python/encoding.py | dfridovi/exploration | 5e66115178988bd264a920041dfeab6d3539caec | [
"BSD-3-Clause"
] | 2 | 2016-12-01T04:06:40.000Z | 2019-06-19T16:32:28.000Z | """
Copyright (c) 2015, The Regents of the University of California (Regents).
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Please contact the author(s) of this library if you have any questions.
Authors: David Fridovich-Keil ( dfk@eecs.berkeley.edu )
"""
###########################################################################
#
# Encoders and decoders for measurements, trajectories, and maps.
#
###########################################################################
from grid_pose_2d import GridPose2D
from grid_map_2d import GridMap2D
from source_2d import Source2D
from sensor_2d import Sensor2D
import numpy as np
import math
# Encode a list of (dx, dy, da) tuples in an integer from 0 to
# (len(delta_xs) * len(delta_ys) * len(delta_as))**len(delta_sequence) - 1.
def EncodeTrajectory(delta_xs, delta_ys, delta_as, delta_sequence):
base = len(delta_xs) * len(delta_ys) * len(delta_as)
trajectory_id = 0
for ii, delta in enumerate(delta_sequence):
x_id = delta_xs.index(delta[0])
y_id = delta_ys.index(delta[1])
a_id = delta_as.index(delta[2])
delta_id = x_id + y_id*len(delta_xs) + a_id*len(delta_xs)*len(delta_ys)
trajectory_id += delta_id * base**ii
return trajectory_id
# Decode a trajectory id into a list of GridPose2Ds.
def DecodeTrajectory(delta_xs, delta_ys, delta_as, trajectory_id,
initial_pose, num_steps):
base = len(delta_xs) * len(delta_ys) * len(delta_as)
trajectory = []
current_pose = initial_pose
while trajectory_id > 0:
remainder = trajectory_id % base
# Convert remainder to delta tuple (dx, dy, da).
x_id = remainder % len(delta_xs)
y_id = ((remainder - x_id) / len(delta_xs)) % len(delta_ys)
a_id = ((remainder - x_id - y_id*len(delta_xs)) /
(len(delta_xs)*len(delta_ys)))
dx = delta_xs[x_id]
dy = delta_ys[y_id]
da = delta_as[a_id]
# Append to 'trajectory'.
next_pose = GridPose2D.Copy(current_pose)
assert next_pose.MoveBy(dx, dy, da)
trajectory.append(next_pose)
# Update 'trajectory_id'.
trajectory_id = (trajectory_id - remainder) / base
# Reset 'current_pose'.
current_pose = next_pose
# If not the right length, that means that the last remainders were 0.
# Update 'trajectory' accordingly.
while len(trajectory) < num_steps:
next_pose = GridPose2D.Copy(current_pose)
assert next_pose.MoveBy(delta_xs[0], delta_ys[0], delta_as[0])
trajectory.append(next_pose)
current_pose = next_pose
return trajectory
# Encode a list of measurements in an integer.
def EncodeMeasurements(max_measurement, measurement_sequence):
base = max_measurement + 1
measurement_id = 0
for ii, measurement in enumerate(measurement_sequence):
measurement_id += measurement * base**ii
return measurement_id
# Decode a measurement id into a list of measurements.
def DecodeMeasurements(max_measurement, measurement_id, num_measurements):
base = max_measurement + 1
measurements = []
while measurement_id > 0:
remainder = measurement_id % base
measurements.append(remainder)
# Update 'measurement_id'.
measurement_id = (measurement_id - remainder) / base
# If not enough measurements, the rest must have been zero.
while len(measurements) < num_measurements:
measurements.append(0)
return measurements
# Encode a map (list of sources) as an integer.
def EncodeMap(num_rows, num_cols, sources):
base = num_rows * num_cols
map_id = 0
for ii, source in enumerate(sources):
source_id = int(source.x_) + int(source.y_) * num_rows
map_id += source_id * base**ii
return map_id
# Decode a map id into a list of sources.
def DecodeMap(num_rows, num_cols, map_id, num_sources):
base = num_rows * num_cols
sources = []
while map_id > 0:
remainder = map_id % base
# Unpack remainder into (x, y) coordinates of a source.
source = Source2D(float(remainder % num_rows) + 0.5,
float(remainder // num_rows) + 0.5)
sources.append(source)
# Update 'map_id'.
map_id = (map_id - remainder) / base
# If not enough sources, the remainders must have been zero.
while len(sources) < num_sources:
sources.append(Source2D(0.5, 0.5))
return sources
| 35.106509 | 79 | 0.68178 | 819 | 5,933 | 4.781441 | 0.279609 | 0.038815 | 0.022983 | 0.023238 | 0.222676 | 0.18335 | 0.111338 | 0.091931 | 0.091931 | 0.08427 | 0 | 0.00995 | 0.220799 | 5,933 | 168 | 80 | 35.315476 | 0.837119 | 0.432328 | 0 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026316 | 1 | 0.078947 | false | 0 | 0.078947 | 0 | 0.236842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f49f6fa3ef035698bb0c0bf44b54064ea2d62dc | 504 | py | Python | Cogs/game.py | k-rite/mirai-kuriyama-discord.py | 2974e2a544fe2d12491eae83e3ff16f5b776294c | [
"Apache-2.0"
] | 1 | 2021-12-02T00:22:08.000Z | 2021-12-02T00:22:08.000Z | Cogs/game.py | k-rite/mirai-kuriyama-discord.py | 2974e2a544fe2d12491eae83e3ff16f5b776294c | [
"Apache-2.0"
] | null | null | null | Cogs/game.py | k-rite/mirai-kuriyama-discord.py | 2974e2a544fe2d12491eae83e3ff16f5b776294c | [
"Apache-2.0"
] | null | null | null | import discord
from discord.ext import commands
from discord import Game
class game(commands.Cog):
def __init__(self, Bot):
self.bot = Bot
#jus a comfy seperate cog to change game status, will add versions setup nd other backend codes here
@commands.Cog.listener()
async def on_ready(self):
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="KRITE DYING VOL 79"))
print('Game status is changed')
def setup(bot):
bot.add_cog(game(bot)) | 33.6 | 124 | 0.757937 | 78 | 504 | 4.807692 | 0.589744 | 0.056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004651 | 0.146825 | 504 | 15 | 125 | 33.6 | 0.867442 | 0.196429 | 0 | 0 | 0 | 0 | 0.098765 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.25 | 0 | 0.5 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f4caadc8cb6f4e82da5982e34a205614c81d65e | 1,475 | py | Python | myapp/Entry.py | abars/illustbook | 3e790a688c19205b7384cc5815ca76c23b88f09a | [
"MIT"
] | 3 | 2016-06-16T20:11:45.000Z | 2022-01-27T04:23:09.000Z | myapp/Entry.py | abars/illustbook | 3e790a688c19205b7384cc5815ca76c23b88f09a | [
"MIT"
] | 1 | 2017-10-23T00:23:13.000Z | 2017-10-23T00:23:13.000Z | myapp/Entry.py | abars/illustbook | 3e790a688c19205b7384cc5815ca76c23b88f09a | [
"MIT"
] | null | null | null | #!-*- coding:utf-8 -*-
#!/usr/bin/env python
#---------------------------------------------------
#コメント構造体
#copyright 2010-2012 ABARS all rights reserved.
#---------------------------------------------------
from google.appengine.ext import db
from google.appengine.api import users
from myapp.Bbs import Bbs
from myapp.MesThread import MesThread
from myapp.ThreadImage import ThreadImage
from myapp.CachedDbModel import CachedDbModel
class Entry(CachedDbModel):
bbs_key = db.ReferenceProperty(Bbs)
thread_key = db.ReferenceProperty(MesThread)
editor = db.StringProperty()
mail = db.StringProperty()
homepage_addr = db.StringProperty()
content = db.TextProperty()
image = db.BlobProperty()
thumbnail = db.BlobProperty()
del_flag = db.IntegerProperty()
res_list=db.ListProperty(item_type=db.Key)
create_date = db.DateTimeProperty()
date = db.DateTimeProperty(auto_now=False)
illust_reply = db.IntegerProperty()
illust_reply_image = db.StringProperty() #Deleted
illust_reply_image_key = db.ReferenceProperty(ThreadImage)
last_update_editor = db.StringProperty() #Deleted(old:for response update to comment cache)
user_id= db.StringProperty() #Submitter
hidden_flag = db.IntegerProperty() #HiddenComment
violate_terms = db.IntegerProperty()
remote_addr = db.StringProperty()
remote_host = db.StringProperty()
comment_no = db.IntegerProperty()
search_index_version= db.IntegerProperty()
adult = db.IntegerProperty()
sand = db.StringProperty()
| 34.302326 | 92 | 0.728814 | 168 | 1,475 | 6.255952 | 0.494048 | 0.137012 | 0.062797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006823 | 0.105763 | 1,475 | 42 | 93 | 35.119048 | 0.789992 | 0.185763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f53b321819c0bfc40c5ab85b02bbe5ae5205ad3 | 5,386 | py | Python | src/kegnet/utils/tucker.py | videoturingtest/KegNet | fe5d1eb5ab5453be70c4be473fd3da71afe4b06c | [
"Apache-2.0"
] | 42 | 2019-10-01T02:14:34.000Z | 2022-03-07T06:57:58.000Z | src/kegnet/utils/tucker.py | videoturingtest/KegNet | fe5d1eb5ab5453be70c4be473fd3da71afe4b06c | [
"Apache-2.0"
] | 5 | 2019-12-18T16:44:52.000Z | 2021-10-02T15:45:37.000Z | src/kegnet/utils/tucker.py | videoturingtest/KegNet | fe5d1eb5ab5453be70c4be473fd3da71afe4b06c | [
"Apache-2.0"
] | 13 | 2019-10-04T02:55:51.000Z | 2021-08-13T09:10:17.000Z | """
Knowledge Extraction with No Observable Data (NeurIPS 2019)
Authors:
- Jaemin Yoo (jaeminyoo@snu.ac.kr), Seoul National University
- Minyong Cho (chominyong@gmail.com), Seoul National University
- Taebum Kim (k.taebum@snu.ac.kr), Seoul National University
- U Kang (ukang@snu.ac.kr), Seoul National University
This software may be used only for research evaluation purposes.
For other purposes (e.g., commercial), please contact the authors.
"""
import tensorly as tl
import torch
from tensorly import decomposition as decomp
from tensorly import tucker_tensor as tucker
from torch import nn
from kegnet.utils import vbmf
tl.set_backend('pytorch')
class DecomposedConv2d(nn.Module):
"""
Decomposed (or compressed) convolutional layer.
"""
@staticmethod
def choose_ranks(weight, ranks):
"""
Choose the target ranks.
"""
out_channels, in_channels, _, _ = weight.shape
if ranks == 'evbmf':
unfold_0 = tl.base.unfold(weight, 0)
unfold_1 = tl.base.unfold(weight, 1)
_, diag_0, _, _ = vbmf.EVBMF(unfold_0)
_, diag_1, _, _ = vbmf.EVBMF(unfold_1)
out_rank = diag_0.shape[0]
in_rank = diag_1.shape[1]
elif isinstance(ranks, float):
out_rank = int(out_channels * ranks)
in_rank = int(in_channels * ranks)
elif isinstance(ranks, tuple):
in_rank, out_rank = ranks
else:
raise ValueError(ranks)
return out_rank, in_rank
def __init__(self, layer, ranks='evbmf', init=True):
"""
Class initializer.
"""
super(DecomposedConv2d, self).__init__()
device = layer.weight.device
weight = layer.weight.data
out_channels, in_channels, _, _ = weight.shape
out_rank, in_rank = self.choose_ranks(weight, ranks)
self.in_channel_layer = nn.Conv2d(
in_channels=in_channels,
out_channels=in_rank,
kernel_size=1,
stride=1,
padding=0,
dilation=layer.dilation,
bias=False).to(device)
self.core_layer = nn.Conv2d(
in_channels=in_rank,
out_channels=out_rank,
kernel_size=layer.kernel_size,
stride=layer.stride,
padding=layer.padding,
dilation=layer.dilation,
bias=False).to(device)
self.out_channel_layer = nn.Conv2d(
in_channels=out_rank,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
dilation=layer.dilation,
bias=layer.bias is not None).to(device)
if init:
core, factors = decomp.partial_tucker(
weight, modes=[0, 1], ranks=(out_rank, in_rank), init='svd')
(out_channel_factor, in_channel_factor) = factors
if self.out_channel_layer.bias is not None:
self.out_channel_layer.bias.data = layer.bias.data
transposed = torch.transpose(in_channel_factor, 1, 0)
self.in_channel_layer.weight.data = \
transposed.unsqueeze(-1).unsqueeze(-1)
self.out_channel_layer.weight.data = \
out_channel_factor.unsqueeze(-1).unsqueeze(-1)
self.core_layer.weight.data = core
def forward(self, x):
"""
Forward propagation.
"""
x = self.in_channel_layer(x)
x = self.core_layer(x)
x = self.out_channel_layer(x)
return x
def recover(self):
"""
Recover the original shape.
"""
core = self.core_layer.weight.data
out_factor = self.out_channel_layer.weight.data.squeeze()
in_factor = self.in_channel_layer.weight.data.squeeze()
in_factor = torch.transpose(in_factor, 1, 0)
return tucker.tucker_to_tensor(core, [out_factor, in_factor])
class DecomposedLinear(nn.Module):
"""
Decomposed (or compressed) linear layer.
"""
def __init__(self, layer, ranks, init=True):
"""
Class initializer.
"""
super(DecomposedLinear, self).__init__()
device = layer.weight.device
weight = layer.weight.data
out_dim, in_dim = weight.shape
out_rank, in_rank = ranks
self.in_layer = nn.Linear(
in_features=in_dim,
out_features=in_rank,
bias=False).to(device)
self.core_layer = nn.Linear(
in_features=in_rank,
out_features=out_rank,
bias=False).to(device)
self.out_layer = nn.Linear(
in_features=out_rank,
out_features=out_dim,
bias=layer.bias is not None).to(device)
if init:
core, factors = decomp.tucker(weight, ranks=ranks, init='svd')
out_factor, in_factor = factors
if self.out_layer.bias is not None:
self.out_layer.bias.data = layer.bias.data
self.in_layer.weight.data = torch.transpose(in_factor, 1, 0)
self.out_layer.weight.data = out_factor
self.core_layer.weight.data = core
def forward(self, x):
"""
Forward propagation.
"""
x = self.in_layer(x)
x = self.core_layer(x)
x = self.out_layer(x)
return x
| 30.954023 | 76 | 0.59469 | 655 | 5,386 | 4.670229 | 0.207634 | 0.046747 | 0.053939 | 0.037267 | 0.518143 | 0.423668 | 0.239294 | 0.198758 | 0.165414 | 0.165414 | 0 | 0.010442 | 0.306535 | 5,386 | 173 | 77 | 31.132948 | 0.808568 | 0.123468 | 0 | 0.279279 | 0 | 0 | 0.005061 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.054054 | 0 | 0.162162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f56a46d74e7ecd5b432e74c3c2222b210440061 | 21,615 | py | Python | source/tpJointOrient/jointorient.py | tpoveda/tpJointOrient | deeebf847403c2938518c75a2b1a22d61e46f74a | [
"MIT"
] | 9 | 2017-08-21T01:04:22.000Z | 2020-08-28T02:54:54.000Z | source/tpJointOrient/jointorient.py | tpoveda/tpJointOrient | deeebf847403c2938518c75a2b1a22d61e46f74a | [
"MIT"
] | 1 | 2019-11-06T18:02:41.000Z | 2019-11-06T18:02:41.000Z | source/tpJointOrient/jointorient.py | tpoveda/tpJointOrient | deeebf847403c2938518c75a2b1a22d61e46f74a | [
"MIT"
] | 3 | 2018-04-03T00:14:50.000Z | 2020-11-04T00:33:45.000Z | from functools import partial
from Qt.QtCore import *
from Qt.QtWidgets import *
import tpDccLib as tp
import tpMayaLib as maya
from tpQtLib.core import window
from tpQtLib.widgets import splitters
from tpMayaLib.core import decorators
class JointOrient(window.MainWindow, object):
def __init__(self):
super(JointOrient, self).__init__(
name='JointOrientWindow',
title='Joint Orient',
size=(350, 700),
fixed_size=False,
auto_run=True,
frame_less=True,
use_style=True
)
def ui(self):
super(JointOrient, self).ui()
### Auto Orient Joint Widget ###
joint_ori_widget = QWidget()
joint_ori_widget.setLayout(QVBoxLayout())
joint_ori_widget.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
joint_ori_widget.layout().setContentsMargins(0, 0, 0, 0)
joint_ori_widget.layout().setSpacing(2)
self.main_layout.addWidget(joint_ori_widget)
joint_ori_splitter = splitters.Splitter('JOINT ORIENT')
joint_ori_widget.layout().addWidget(joint_ori_splitter)
aim_axis_layout = QHBoxLayout()
aim_axis_layout.setContentsMargins(5, 5, 5, 5)
aim_axis_layout.setSpacing(2)
# Aim Axis
aim_axis_box = QGroupBox()
aim_axis_box.setLayout(aim_axis_layout)
aim_axis_box.setTitle('Aim Axis')
joint_ori_widget.layout().addWidget(aim_axis_box)
self.aim_x_radio = QRadioButton('X')
self.aim_y_radio = QRadioButton('Y')
self.aim_z_radio = QRadioButton('Z')
self.aim_rev_cbx = QCheckBox('Reverse')
self.aim_x_radio.setChecked(True)
aim_axis_layout.addWidget(self.aim_x_radio)
aim_axis_layout.addWidget(self.aim_y_radio)
aim_axis_layout.addWidget(self.aim_z_radio)
aim_axis_layout.addWidget(self.aim_rev_cbx)
# Up Axis
up_axis_layout = QHBoxLayout()
up_axis_layout.setContentsMargins(5, 5, 5, 5)
up_axis_layout.setSpacing(2)
up_axis_box = QGroupBox()
up_axis_box.setLayout(up_axis_layout)
up_axis_box.setTitle('Up Axis')
joint_ori_widget.layout().addWidget(up_axis_box)
self.up_x_radio = QRadioButton('X')
self.up_y_radio = QRadioButton('Y')
self.upZRadio = QRadioButton('Z')
self.upRevCbx = QCheckBox('Reverse')
self.up_y_radio.setChecked(True)
up_axis_layout.addWidget(self.up_x_radio)
up_axis_layout.addWidget(self.up_y_radio)
up_axis_layout.addWidget(self.upZRadio)
up_axis_layout.addWidget(self.upRevCbx)
# Up World Axis
up_world_axis_layout = QHBoxLayout()
up_world_axis_layout.setContentsMargins(5, 5, 5, 5)
up_world_axis_layout.setSpacing(5)
up_world_axis_box = QGroupBox()
up_world_axis_box.setLayout(up_world_axis_layout)
up_world_axis_box.setTitle('Up World Axis')
joint_ori_widget.layout().addWidget(up_world_axis_box)
self.up_world_x_spin = QDoubleSpinBox()
self.up_world_y_spin = QDoubleSpinBox()
self.up_world_z_spin = QDoubleSpinBox()
self.up_world_x_spin.setDecimals(3)
self.up_world_y_spin.setDecimals(3)
self.up_world_z_spin.setDecimals(3)
self.up_world_x_spin.setRange(-360, 360)
self.up_world_y_spin.setRange(-360, 360)
self.up_world_z_spin.setRange(-360, 360)
self.up_world_x_spin.setLocale(QLocale.English)
self.up_world_y_spin.setLocale(QLocale.English)
self.up_world_z_spin.setLocale(QLocale.English)
self.up_world_x_spin.setValue(1.0)
up_world_x = QPushButton('X')
up_world_y = QPushButton('Y')
up_world_z = QPushButton('Z')
up_world_x.setMaximumWidth(20)
up_world_y.setMaximumWidth(20)
up_world_z.setMaximumWidth(20)
up_world_axis_layout.addWidget(self.up_world_x_spin)
up_world_axis_layout.addWidget(self.up_world_y_spin)
up_world_axis_layout.addWidget(self.up_world_z_spin)
up_world_axis_layout.addWidget(up_world_x)
up_world_axis_layout.addWidget(up_world_y)
up_world_axis_layout.addWidget(up_world_z)
joint_ori_widget.layout().addLayout(splitters.SplitterLayout())
joint_orient_btn_layout = QHBoxLayout()
joint_orient_btn_layout.setAlignment(Qt.AlignCenter)
joint_ori_widget.layout().addLayout(joint_orient_btn_layout)
spacer_item = QSpacerItem(2, 2, QSizePolicy.Minimum, QSizePolicy.Minimum)
joint_orient_btn_layout.addSpacerItem(spacer_item)
joint_orient_btn = QPushButton('Apply')
self.joint_orient_cbx = QCheckBox('Hierarchy')
joint_orient_btn.setMaximumWidth(80)
self.joint_orient_cbx.setChecked(True)
joint_orient_btn_layout.addWidget(joint_orient_btn)
joint_orient_btn_layout.addWidget(self.joint_orient_cbx)
spacer_item = QSpacerItem(2, 2, QSizePolicy.Fixed)
self.main_layout.addSpacerItem(spacer_item)
### Manual Orient Joint Widget ###
manual_joint_ori_widget = QWidget()
manual_joint_ori_widget.setLayout(QVBoxLayout())
manual_joint_ori_widget.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
manual_joint_ori_widget.layout().setContentsMargins(5, 5, 5, 5)
manual_joint_ori_widget.layout().setSpacing(10)
self.main_layout.addWidget(manual_joint_ori_widget)
manual_joint_ori_splitter = splitters.Splitter('MANUAL JOINT ORIENT')
manual_joint_ori_widget.layout().addWidget(manual_joint_ori_splitter)
manual_joint_ori_layout = QHBoxLayout()
manual_joint_ori_widget.layout().addLayout(manual_joint_ori_layout)
manual_joint_ori_lbl = QLabel(' X Y Z ')
self.manual_joint_ori_x_spin = QDoubleSpinBox()
self.manual_joint_ori_y_spin = QDoubleSpinBox()
self.manual_joint_ori_z_spin = QDoubleSpinBox()
self.manual_joint_ori_x_spin.setDecimals(3)
self.manual_joint_ori_y_spin.setDecimals(3)
self.manual_joint_ori_z_spin.setDecimals(3)
self.manual_joint_ori_x_spin.setRange(-360, 360)
self.manual_joint_ori_y_spin.setRange(-360, 360)
self.manual_joint_ori_z_spin.setRange(-360, 360)
self.manual_joint_ori_x_spin.setLocale(QLocale.English)
self.manual_joint_ori_y_spin.setLocale(QLocale.English)
self.manual_joint_ori_z_spin.setLocale(QLocale.English)
manualJointOriResetBtn = QPushButton('Reset')
manual_joint_ori_layout.addWidget(manual_joint_ori_lbl)
manual_joint_ori_layout.addWidget(self.manual_joint_ori_x_spin)
manual_joint_ori_layout.addWidget(self.manual_joint_ori_y_spin)
manual_joint_ori_layout.addWidget(self.manual_joint_ori_z_spin)
manual_joint_ori_layout.addWidget(manualJointOriResetBtn)
manual_joint_splitter_layout = QVBoxLayout()
manual_joint_ori_widget.layout().addLayout(manual_joint_splitter_layout)
degree_layout = QHBoxLayout()
degree_layout.setContentsMargins(5, 5, 5, 5)
degree_layout.setSpacing(2)
degree_box = QGroupBox()
degree_box.setLayout(degree_layout)
degree_box.setStyleSheet("border:0px;")
manual_joint_splitter_layout.layout().addWidget(degree_box)
self.degree1_radio = QRadioButton('1')
self.degree5_radio = QRadioButton('5')
self.degree10_radio = QRadioButton('10')
self.degree20_radio = QRadioButton('20')
self.degree45_radio = QRadioButton('45')
self.degree90_radio = QRadioButton('90')
self.degree90_radio.setChecked(True)
self._set_value_change(90)
degree_layout.addWidget(self.degree1_radio)
degree_layout.addWidget(self.degree5_radio)
degree_layout.addWidget(self.degree10_radio)
degree_layout.addWidget(self.degree20_radio)
degree_layout.addWidget(self.degree45_radio)
degree_layout.addWidget(self.degree90_radio)
manual_joint_splitter_layout.addLayout(splitters.SplitterLayout())
manual_joint_ori_buttons_layout = QHBoxLayout()
manual_joint_ori_buttons_layout.setContentsMargins(2, 2, 2, 2)
manual_joint_ori_buttons_layout.setSpacing(5)
manual_joint_ori_widget.layout().addLayout(manual_joint_ori_buttons_layout)
manual_joint_ori_add_btn = QPushButton('Add ( + ) ')
manual_joint_ori_subtract_btn = QPushButton('Subract ( - ) ')
manual_joint_ori_buttons_layout.addWidget(manual_joint_ori_add_btn)
manual_joint_ori_buttons_layout.addWidget(manual_joint_ori_subtract_btn)
manual_joint_ori_set_btn_layout = QVBoxLayout()
manual_joint_ori_set_btn_layout.setAlignment(Qt.AlignCenter)
manual_joint_ori_set_btn_layout.setContentsMargins(2, 2, 2, 2)
manual_joint_ori_set_btn_layout.setSpacing(5)
manual_joint_ori_widget.layout().addLayout(manual_joint_ori_set_btn_layout)
manual_joint_ori_set_btn = QPushButton('Set')
manual_joint_ori_set_btn.setMaximumWidth(100)
self.manual_joint_ori_set_cbx = QCheckBox('Affect children')
manual_joint_ori_set_btn_layout.addWidget(manual_joint_ori_set_btn)
manual_joint_ori_set_btn_layout.addWidget(self.manual_joint_ori_set_cbx)
set_rot_axis_widget = QWidget()
set_rot_axis_widget.setLayout(QVBoxLayout())
set_rot_axis_widget.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
set_rot_axis_widget.layout().setContentsMargins(5, 5, 5, 5)
set_rot_axis_widget.layout().setSpacing(10)
self.main_layout.addWidget(set_rot_axis_widget)
set_rot_axis_splitter = splitters.Splitter('SET ROTATION AXIS')
set_rot_axis_widget.layout().addWidget(set_rot_axis_splitter)
set_rot_axis_layout = QVBoxLayout()
set_rot_axis_widget.layout().addLayout(set_rot_axis_layout)
set_rot_top_layout = QHBoxLayout()
set_rot_top_layout.setSpacing(5)
set_rot_axis_layout.addLayout(set_rot_top_layout)
self.set_rot_axis_box = QComboBox()
set_rot_top_layout.addWidget(self.set_rot_axis_box)
for rotAxis in ['xyz', 'yzx', 'zxy', 'xzy', 'yxz', 'zyx']:
self.set_rot_axis_box.addItem(rotAxis)
set_rot_axis_common_btn = QPushButton(' <')
set_rot_axis_common_btn.setMaximumWidth(45)
set_rot_axis_common_btn.setStyleSheet("QPushButton::menu-indicator{image:url(none.jpg);}")
self.set_rot_axis_common_btn_menu = QMenu(self)
self._set_common_rotation_axis()
set_rot_axis_common_btn.setMenu(self.set_rot_axis_common_btn_menu)
set_rot_top_layout.addWidget(set_rot_axis_common_btn)
set_rot_axis_btn_layout = QHBoxLayout()
set_rot_axis_btn_layout.setAlignment(Qt.AlignCenter)
set_rot_axis_layout.addLayout(set_rot_axis_btn_layout)
set_rot_axis_btn = QPushButton('Set')
set_rot_axis_btn.setMaximumWidth(100)
set_rot_axis_btn_layout.addWidget(set_rot_axis_btn)
set_rot_axis_splitter_layout = QVBoxLayout()
set_rot_axis_widget.layout().addLayout(set_rot_axis_splitter_layout)
set_rot_axis_splitter_layout.addLayout(splitters.SplitterLayout())
spacer_item = QSpacerItem(2, 2, QSizePolicy.Fixed)
self.main_layout.addSpacerItem(spacer_item)
layout_lra_buttons = QHBoxLayout()
self.main_layout.addLayout(layout_lra_buttons)
display_lra_btn = QPushButton('Display LRA')
hide_lra_btn = QPushButton('Hide LRA')
layout_lra_buttons.addWidget(display_lra_btn)
layout_lra_buttons.addWidget(hide_lra_btn)
select_hierarchy_btn = QPushButton('Select Hierarchy')
self.main_layout.addWidget(select_hierarchy_btn)
# ==== SIGNALS ==== #
up_world_x.clicked.connect(partial(self._reset_axis, 'x'))
up_world_y.clicked.connect(partial(self._reset_axis, 'y'))
up_world_z.clicked.connect(partial(self._reset_axis, 'z'))
joint_orient_btn.clicked.connect(self.orient_joints)
manualJointOriResetBtn.clicked.connect(self._reset_manual_orient)
manual_joint_ori_add_btn.clicked.connect(partial(self.manual_orient_joints, 'add'))
manual_joint_ori_subtract_btn.clicked.connect(partial(self.manual_orient_joints, 'subtract'))
manual_joint_ori_set_btn.clicked.connect(self.set_manual_orient_joints)
self.degree1_radio.clicked.connect(partial(self._set_value_change, 0))
self.degree5_radio.clicked.connect(partial(self._set_value_change, 5))
self.degree10_radio.clicked.connect(partial(self._set_value_change, 10))
self.degree20_radio.clicked.connect(partial(self._set_value_change, 20))
self.degree45_radio.clicked.connect(partial(self._set_value_change, 45))
self.degree90_radio.clicked.connect(partial(self._set_value_change, 90))
set_rot_axis_btn.clicked.connect(self.set_rot_axis)
display_lra_btn.clicked.connect(partial(self.set_lra, True))
hide_lra_btn.clicked.connect(partial(self.set_lra, False))
select_hierarchy_btn.clicked.connect(self.select_hierarchy)
def _reset_axis(self, axis):
for spin in [self.up_world_x_spin, self.up_world_y_spin, self.up_world_z_spin]:
spin.setValue(0.0)
if axis == 'x':
self.up_world_x_spin.setValue(1.0)
elif axis == 'y':
self.up_world_y_spin.setValue(1.0)
elif axis == 'z':
self.up_world_z_spin.setValue(1.0)
def _reset_manual_orient(self):
for spin in [self.manual_joint_ori_x_spin, self.manual_joint_ori_y_spin, self.manual_joint_ori_z_spin]:
spin.setValue(0.0)
def _set_value_change(self, value):
for spin in [self.manual_joint_ori_x_spin, self.manual_joint_ori_y_spin, self.manual_joint_ori_z_spin]:
spin.setSingleStep(value)
def _set_common_rotation_axis(self):
self.set_rot_axis_common_btn_menu.addAction('Wrist (YXZ)', partial(self._set_common_rot_order, 'yxz'))
self.set_rot_axis_common_btn_menu.addAction('Finger (XYZ)', partial(self._set_common_rot_order, 'xyz'))
self.set_rot_axis_common_btn_menu.addAction('Spine (ZYX)', partial(self._set_common_rot_order, 'zyx'))
self.set_rot_axis_common_btn_menu.addAction('Hips (ZYX)', partial(self._set_common_rot_order, 'zyx'))
self.set_rot_axis_common_btn_menu.addAction('Root (ZYX)', partial(self._set_common_rot_order, 'zyx'))
self.set_rot_axis_common_btn_menu.addAction('Upper Leg (ZYX)', partial(self._set_common_rot_order, 'zyx'))
self.set_rot_axis_common_btn_menu.addAction('Knee (YXZ)', partial(self._set_common_rot_order, 'yxz'))
self.set_rot_axis_common_btn_menu.addAction('Ankle (XZY)', partial(self._set_common_rot_order, 'xzy'))
def _set_common_rot_order(self, rot_axis):
rot_order = self._get_rot_order(rot_axis)
self.set_rot_axis_box.setCurrentIndex(rot_order)
@staticmethod
def _get_rot_order(rot_axis):
rot_order = {}
for i, order in enumerate(['xyz', 'yzx', 'zxy', 'xzy', 'yxz', 'zyx']):
rot_order[order] = i
rot_order[order.upper()] = i
return rot_order[rot_axis]
@decorators.undo_chunk
def orient_joints(self):
reset_joints = []
# Get up and aim axis
aim_axis = [0, 0, 0]
up_axis = [0, 0, 0]
for i, aim_radio in enumerate([self.aim_x_radio, self.aim_y_radio, self.aim_z_radio]):
if aim_radio.isChecked():
aim_axis_num = i
for i, up_radio in enumerate([self.up_x_radio, self.up_y_radio, self.upZRadio]):
if up_radio.isChecked():
up_axup_axis_nums_num = i
if aim_axis_num == up_axup_axis_nums_num:
tp.logger.warning('tpJointOrient: aim and up axis are the same, maybe orientation wont work correctly!')
aim_axis_reverse = 1.0
if self.aim_rev_cbx.isChecked():
aim_axis_reverse = -1.0
up_axis_reverse = 1.0
if self.upRevCbx.isChecked():
up_axis_reverse = -1.0
aim_axis[aim_axis_num] = aim_axis_reverse
up_axis[up_axup_axis_nums_num] = up_axis_reverse
world_up_axis = [self.up_world_x_spin.value(), self.up_world_y_spin.value(), self.up_world_z_spin.value()]
# Get selected joints
if self.joint_orient_cbx.isChecked():
maya.cmds.select(hierarchy=True)
joints = maya.cmds.ls(selection=True, type='joint')
# =======================================================================
# Loop all selected joints ...
for jnt in reversed(joints):
# Get child node
childs = maya.cmds.listRelatives(jnt, children=True, type=['transform', 'joint'])
# If the joints has direct childs, unparent that childs and store names
if childs:
if len(childs) > 0:
childs = maya.cmds.parent(childs, world=True)
# Get parent of this joints for later use
parent = ''
parents = maya.cmds.listRelatives(jnt, parent=True)
if parents:
parent = parents[0]
# Aim to the child
aim_target = ''
if childs:
for child in childs:
if maya.cmds.nodeType(child) == 'joint':
aim_target = child
break
# print '//DEBUG: JNT=' + jnt + " Parent=" + parent + " AimTarget=" + aim_target + "//\n"
if aim_target != '':
# Apply an aim constraint from the joint to its child (target)
maya.cmds.delete(maya.cmds.aimConstraint(aim_target, jnt, aim=aim_axis, upVector=up_axis, worldUpVector=world_up_axis,
worldUpType='vector', weight=1.0))
# Clear joint axis
maya.cmds.joint(jnt, edit=True, zeroScaleOrient=True)
maya.cmds.makeIdentity(jnt, apply=True)
elif parent != '':
reset_joints.append(jnt)
# Reparent child
if childs:
if len(childs) > 0:
maya.cmds.parent(childs, jnt)
for jnt in reset_joints:
# If there is no target, the joint will take its parent orientation
for axis in ['x', 'y', 'z']:
maya.cmds.setAttr(jnt + '.jointOrient' + axis.upper(), maya.cmds.getAttr(jnt + '.r' + axis))
maya.cmds.setAttr(jnt + '.r' + axis, 0)
@decorators.undo_chunk
def manual_orient_joints(self, type):
if type == 'add':
tweak = 1.0
else:
tweak = -1.0
tweak_rot = [self.manual_joint_ori_x_spin.value() * tweak, self.manual_joint_ori_y_spin.value() * tweak,
self.manual_joint_ori_z_spin.value() * tweak]
joints = maya.cmds.ls(selection=True, type='joint')
for jnt in joints:
# Adjust the rotation axis
maya.cmds.xform(jnt, rotateAxis=[tweak_rot[0], tweak_rot[1], tweak_rot[2]], relative=True, objectSpace=True)
# Clear joint axis
maya.cmds.joint(jnt, edit=True, zeroScaleOrient=True)
maya.cmds.makeIdentity(jnt, apply=True)
maya.cmds.select(joints, replace=True)
@decorators.undo_chunk
def set_manual_orient_joints(self):
tweak_rot = [self.manual_joint_ori_x_spin.value(), self.manual_joint_ori_y_spin.value(),
self.manual_joint_ori_z_spin.value()]
joints = maya.cmds.ls(selection=True, type='joint')
for jnt in joints:
if not self.manual_joint_ori_set_cbx.isChecked():
childs = maya.cmds.listRelatives(jnt, children=True, type=['transform', 'joint'])
if childs:
if len(childs) > 0:
for child in childs:
maya.cmds.parent(child, world=True)
# Set the rotation axis
for i, axis in enumerate(['x', 'y', 'z']):
maya.cmds.setAttr(jnt + '.jointOrient' + axis.upper(), tweak_rot[i])
# Clear joint axis
maya.cmds.joint(jnt, edit=True, zeroScaleOrient=True)
maya.cmds.makeIdentity(jnt, apply=True)
if childs:
for child in childs:
maya.cmds.parent(child, jnt)
maya.cmds.select(joints, replace=True)
@decorators.undo_chunk
def set_rot_axis(self):
sel = maya.cmds.ls(selection=True, type=['joint', 'transform'])
for obj in sel:
rot_order = self._get_rot_order(self.set_rot_axis_box.currentText())
maya.cmds.setAttr(obj + '.rotateOrder', rot_order)
@staticmethod
@decorators.undo_chunk
def set_lra(state):
sel = maya.cmds.ls(selection=True)
for obj in sel:
if maya.cmds.attributeQuery('displayLocalAxis', node=obj, exists=True):
maya.cmds.setAttr(obj + '.displayLocalAxis', state)
@staticmethod
def select_hierarchy():
"""
Method that selects the hierachy of the selected nodes
"""
sel = maya.cmds.ls(selection=True)
for obj in sel:
maya.cmds.select(obj, hi=True, add=True)
def run():
win = JointOrient()
win.show()
return win
| 41.567308 | 134 | 0.670275 | 2,799 | 21,615 | 4.802429 | 0.102179 | 0.052968 | 0.078113 | 0.040173 | 0.584958 | 0.424937 | 0.338119 | 0.261122 | 0.169841 | 0.137554 | 0 | 0.012814 | 0.230997 | 21,615 | 519 | 135 | 41.647399 | 0.795873 | 0.035623 | 0 | 0.127937 | 0 | 0 | 0.038484 | 0.002357 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039164 | false | 0 | 0.020888 | 0 | 0.067885 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f58bef75ba0ec877afcf1b719c8b7a1c29e9cfb | 2,584 | py | Python | run.py | tobias-fyi/print-fiction | b0befb9906fea85bf1a553fb4bc4d229b8ed957b | [
"MIT"
] | null | null | null | run.py | tobias-fyi/print-fiction | b0befb9906fea85bf1a553fb4bc4d229b8ed957b | [
"MIT"
] | null | null | null | run.py | tobias-fyi/print-fiction | b0befb9906fea85bf1a553fb4bc4d229b8ed957b | [
"MIT"
] | null | null | null | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app, server
from pages import index
# ====== Navigation ====== #
# nav = html.Ul(
# [
# html.Li(html.A("print(fiction)", href="/", className="nav-link")),
# html.Li(html.A("Introduction", href="/#introduction", className="nav-link")),
# html.Li(html.A("Predict", href="/#predict", className="nav-link")),
# ],
# className="nav",
# )
# ====== Footer ====== #
customFooter = dbc.Container(
html.Div(
html.Div(
[
html.H2("Tobias Reaper"),
html.Ul(
[
html.Li(
html.A(
html.I(className="fas fa-globe mr-1"),
href="https://tobias.fyi/",
)
),
html.Li(
html.A(
html.I(className="fab fa-github-square mr-1"),
href="https://github.com/tobias-fyi/print-fiction/",
)
),
html.Li(
html.A(
html.I(className="fab fa-linkedin mr-1"),
href="https://www.linkedin.com/in/tobias-reaper/",
)
),
html.Li(
html.A(
html.I(className="fab fa-twitter-square mr-1"),
href="https://twitter.com/tobiasfyi/",
)
),
],
className="icons",
),
],
className="container medium",
),
id="footer",
),
fluid=True,
)
# ====== URL Routing ====== #
# https://dash.plot.ly/urls #
app.layout = html.Div(
[
dcc.Location(id="url", refresh=False),
# nav,
dbc.Container(id="page-content", fluid=True),
html.Hr(),
customFooter,
]
)
@app.callback(Output("page-content", "children"), [Input("url", "pathname")])
def display_page(pathname):
if pathname == "/":
return index.layout
else:
return dcc.Markdown("## Page not found")
if __name__ == "__main__":
app.run_server(debug=True)
| 29.033708 | 87 | 0.417957 | 230 | 2,584 | 4.626087 | 0.369565 | 0.039474 | 0.065789 | 0.072368 | 0.214286 | 0.180451 | 0.158835 | 0.084586 | 0.084586 | 0 | 0 | 0.003453 | 0.439628 | 2,584 | 88 | 88 | 29.363636 | 0.731354 | 0.151703 | 0 | 0.296875 | 0 | 0 | 0.154236 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015625 | false | 0 | 0.109375 | 0 | 0.15625 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f5eb990fc9bad4f6475bb538b62266b6b5e7f41 | 7,978 | py | Python | legacy/examples/distribute_graphsage/model.py | zbmain/PGL | dbded6a1543248b0a33c05eb476ddc513401a774 | [
"Apache-2.0"
] | 1,389 | 2019-06-11T03:29:20.000Z | 2022-03-29T18:25:43.000Z | legacy/examples/distribute_graphsage/model.py | zbmain/PGL | dbded6a1543248b0a33c05eb476ddc513401a774 | [
"Apache-2.0"
] | 232 | 2019-06-21T06:52:10.000Z | 2022-03-29T08:20:31.000Z | legacy/examples/distribute_graphsage/model.py | zbmain/PGL | dbded6a1543248b0a33c05eb476ddc513401a774 | [
"Apache-2.0"
] | 229 | 2019-06-20T12:13:58.000Z | 2022-03-25T12:04:48.000Z | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
graphsage model.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import math
import pgl
import numpy as np
import paddle
import paddle.fluid.layers as L
import paddle.fluid as F
import paddle.fluid as fluid
def copy_send(src_feat, dst_feat, edge_feat):
return src_feat["h"]
def mean_recv(feat):
return fluid.layers.sequence_pool(feat, pool_type="average")
def sum_recv(feat):
return fluid.layers.sequence_pool(feat, pool_type="sum")
def max_recv(feat):
return fluid.layers.sequence_pool(feat, pool_type="max")
def lstm_recv(feat):
hidden_dim = 128
forward, _ = fluid.layers.dynamic_lstm(
input=feat, size=hidden_dim * 4, use_peepholes=False)
output = fluid.layers.sequence_last_step(forward)
return output
def graphsage_mean(gw, feature, hidden_size, act, name):
msg = gw.send(copy_send, nfeat_list=[("h", feature)])
neigh_feature = gw.recv(msg, mean_recv)
self_feature = feature
self_feature = fluid.layers.fc(self_feature,
hidden_size,
act=act,
name=name + '_l')
neigh_feature = fluid.layers.fc(neigh_feature,
hidden_size,
act=act,
name=name + '_r')
output = fluid.layers.concat([self_feature, neigh_feature], axis=1)
output = fluid.layers.l2_normalize(output, axis=1)
return output
def graphsage_meanpool(gw,
feature,
hidden_size,
act,
name,
inner_hidden_size=512):
neigh_feature = fluid.layers.fc(feature, inner_hidden_size, act="relu")
msg = gw.send(copy_send, nfeat_list=[("h", neigh_feature)])
neigh_feature = gw.recv(msg, mean_recv)
neigh_feature = fluid.layers.fc(neigh_feature,
hidden_size,
act=act,
name=name + '_r')
self_feature = feature
self_feature = fluid.layers.fc(self_feature,
hidden_size,
act=act,
name=name + '_l')
output = fluid.layers.concat([self_feature, neigh_feature], axis=1)
output = fluid.layers.l2_normalize(output, axis=1)
return output
def graphsage_maxpool(gw,
feature,
hidden_size,
act,
name,
inner_hidden_size=512):
neigh_feature = fluid.layers.fc(feature, inner_hidden_size, act="relu")
msg = gw.send(copy_send, nfeat_list=[("h", neigh_feature)])
neigh_feature = gw.recv(msg, max_recv)
neigh_feature = fluid.layers.fc(neigh_feature,
hidden_size,
act=act,
name=name + '_r')
self_feature = feature
self_feature = fluid.layers.fc(self_feature,
hidden_size,
act=act,
name=name + '_l')
output = fluid.layers.concat([self_feature, neigh_feature], axis=1)
output = fluid.layers.l2_normalize(output, axis=1)
return output
def graphsage_lstm(gw, feature, hidden_size, act, name):
inner_hidden_size = 128
neigh_feature = fluid.layers.fc(feature, inner_hidden_size, act="relu")
hidden_dim = 128
forward_proj = fluid.layers.fc(input=neigh_feature,
size=hidden_dim * 4,
bias_attr=False,
name="lstm_proj")
msg = gw.send(copy_send, nfeat_list=[("h", forward_proj)])
neigh_feature = gw.recv(msg, lstm_recv)
neigh_feature = fluid.layers.fc(neigh_feature,
hidden_size,
act=act,
name=name + '_r')
self_feature = feature
self_feature = fluid.layers.fc(self_feature,
hidden_size,
act=act,
name=name + '_l')
output = fluid.layers.concat([self_feature, neigh_feature], axis=1)
output = fluid.layers.l2_normalize(output, axis=1)
return output
def build_graph_model(graph_wrapper, num_class, k_hop, graphsage_type,
hidden_size):
node_index = fluid.layers.data(
"node_index", shape=[None], dtype="int64", append_batch_size=False)
node_label = fluid.layers.data(
"node_label", shape=[None, 1], dtype="int64", append_batch_size=False)
#feature = fluid.layers.gather(feature, graph_wrapper.node_feat['feats'])
feature = graph_wrapper.node_feat['feats']
feature.stop_gradient = True
for i in range(k_hop):
if graphsage_type == 'graphsage_mean':
feature = graphsage_mean(
graph_wrapper,
feature,
hidden_size,
act="relu",
name="graphsage_mean_%s" % i)
elif graphsage_type == 'graphsage_meanpool':
feature = graphsage_meanpool(
graph_wrapper,
feature,
hidden_size,
act="relu",
name="graphsage_meanpool_%s" % i)
elif graphsage_type == 'graphsage_maxpool':
feature = graphsage_maxpool(
graph_wrapper,
feature,
hidden_size,
act="relu",
name="graphsage_maxpool_%s" % i)
elif graphsage_type == 'graphsage_lstm':
feature = graphsage_lstm(
graph_wrapper,
feature,
hidden_size,
act="relu",
name="graphsage_maxpool_%s" % i)
else:
raise ValueError("graphsage type %s is not"
" implemented" % graphsage_type)
feature = fluid.layers.gather(feature, node_index)
logits = fluid.layers.fc(feature,
num_class,
act=None,
name='classification_layer')
proba = fluid.layers.softmax(logits)
loss = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=node_label)
loss = fluid.layers.mean(loss)
acc = fluid.layers.accuracy(input=proba, label=node_label, k=1)
return loss, acc
class GraphsageModel(object):
def __init__(self, args):
self.args = args
def forward(self):
args = self.args
graph_wrapper = pgl.graph_wrapper.GraphWrapper(
"sub_graph", node_feat=[('feats', [None, 602], np.dtype('float32'))])
loss, acc = build_graph_model(
graph_wrapper,
num_class=args.num_class,
hidden_size=args.hidden_size,
graphsage_type=args.graphsage_type,
k_hop=len(args.samples))
loss.persistable = True
self.graph_wrapper = graph_wrapper
self.loss = loss
self.acc = acc
return loss
| 35.145374 | 81 | 0.564051 | 888 | 7,978 | 4.82545 | 0.217342 | 0.089848 | 0.057643 | 0.074679 | 0.529755 | 0.512019 | 0.472812 | 0.439907 | 0.41797 | 0.385531 | 0 | 0.009218 | 0.34733 | 7,978 | 226 | 82 | 35.300885 | 0.813712 | 0.084232 | 0 | 0.50289 | 0 | 0 | 0.044505 | 0.002885 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069364 | false | 0 | 0.063584 | 0.023121 | 0.202312 | 0.00578 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f5f8e7da675ccdf94c2845ab889407163f48cad | 16,914 | py | Python | ensemble_detectors/src/Algorithm_1_matchfilter/spectral_lib/spectral/algorithms/perceptron.py | satish1901/Methane-detection-from-hyperspectral-imagery | 741dee02e76931f572cf3e06af8faabe871e8e4a | [
"MIT"
] | 27 | 2020-06-11T21:59:54.000Z | 2022-03-22T03:10:50.000Z | ensemble_detectors/src/Algorithm_1_matchfilter/spectral_lib/spectral/algorithms/perceptron.py | N-NSH/Methane-detection-from-hyperspectral-imagery | 741dee02e76931f572cf3e06af8faabe871e8e4a | [
"MIT"
] | 7 | 2020-09-25T22:41:18.000Z | 2022-02-09T23:41:04.000Z | ensemble_detectors/src/Algorithm_1_matchfilter/spectral_lib/spectral/algorithms/perceptron.py | N-NSH/Methane-detection-from-hyperspectral-imagery | 741dee02e76931f572cf3e06af8faabe871e8e4a | [
"MIT"
] | 4 | 2021-01-18T15:57:13.000Z | 2022-03-12T20:51:27.000Z | #########################################################################
#
# perceptron.py - This file is part of the Spectral Python (SPy) package.
#
# Copyright (C) 2001-2014 Thomas Boggs
#
# Spectral Python is free software; you can redistribute it and/
# or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Spectral Python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; if not, write to
#
# Free Software Foundation, Inc.
# 59 Temple Place, Suite 330
# Boston, MA 02111-1307
# USA
#
#########################################################################
#
# Send comments to:
# Thomas Boggs, tboggs@users.sourceforge.net
#
'''
Classes and functions for classification with neural networks.
'''
from __future__ import division, print_function, unicode_literals
import numpy as np
import sys
class PerceptronLayer:
'''A multilayer perceptron layer with sigmoid activation function.'''
def __init__(self, shape, k=1.0, weights=None):
'''
Arguments:
`shape` (2-tuple of int):
Should have the form (`num_inputs`, `num_neurons`), where
`num_inputs` does not include an input for the bias weights.
`k` (float):
Sigmoid shape parameter.
`weights` (ndarray):
Initial weights for the layer. Note that if provided, this
argument must have shape (`num_neurons`, `num_inputs` + 1). If
not provided, initial weights will be randomized.
'''
self.k = k
self.shape = (shape[1], shape[0] + 1)
if weights:
if weights.shape != self.shape:
raise Exception('Shape of weight matrix does not ' \
'match Perceptron layer shape.')
self.weights = np.array(weights, dtype=np.float64)
else:
self.randomize_weights()
self.dW = np.zeros_like(self.weights)
self.dW_buf = np.zeros_like(self.dW)
self.x = np.ones(self.shape[1], float)
def randomize_weights(self):
'''Randomizes the layer weight matrix.
The bias weight will be in the range [0, 1). The remaining weights will
correspond to a vector with unit length and uniform random orienation.
'''
import math
self.weights = 1. - 2. * np.random.rand(*self.shape)
for row in self.weights:
row[1:] /= math.sqrt(np.sum(row[1:]**2))
row[0] = -0.5 * np.random.rand() - 0.5 * np.sum(row[1:])
def input(self, x, clip=0.0):
'''Sets layer input and computes output.
Arguments:
`x` (sequence):
Layer input, not including bias input.
`clip` (float >= 0):
Optional clipping value to limit sigmoid output. The sigmoid
function has output in the range (0, 1). If the `clip` argument
is set to `a` then all neuron outputs for the layer will be
constrained to the range [a, 1 - a]. This can improve perceptron
learning rate in some situations.
Return value:
The ndarray of output values is returned and is also set in the `y`
attribute of the layer.
For classifying samples, call `classify` instead.
'''
self.x[1:] = x
self.z = np.dot(self.weights, self.x)
if clip > 0.:
self.y = np.clip(self.g(self.z), clip, 1. - clip)
else:
self.y = self.g(self.z)
return self.y
def g(self, a):
'''Neuron activation function (logistic sigmoid)'''
return 1. / (1. + np.exp(- self.k * a))
def dy_da(self):
'''Derivative of activation function at current activation level.'''
return self.k * (self.y * (1.0 - self.y))
class Perceptron:
''' A Multi-Layer Perceptron network with backpropagation learning.'''
def __init__(self, layers, k=1.0):
'''
Creates the Perceptron network.
Arguments:
layers (sequence of integers):
A list specifying the network structure. `layers`[0] is the number
of inputs. `layers`[-1] is the number of perceptron outputs.
`layers`[1: -1] are the numbers of units in the hidden layers.
`k` (float):
Sigmoid shape parameter.
'''
if type(layers) != list or len(layers) < 2:
raise Exception('ERROR: Perceptron argument must be list of 2 or '
'more integers.')
self.shape = layers[:]
self.layers = [PerceptronLayer((layers[i - 1], layers[i]), k)
for i in range(1, len(layers))]
self.accuracy = 0
self.error = 0
# To prevent overflow when scaling inputs
self.min_input_diff = 1.e-8
# If True, previous iteration weights are preserved after interrupting
# training (with CTRL-C)
self.cache_weights = True
def input(self, x, clip=0.0):
'''Sets Perceptron input, activates neurons and sets & returns output.
Arguments:
`x` (sequence):
Inputs to input layer. Should not include a bias input.
`clip` (float >= 0):
Optional clipping value to limit sigmoid output. The sigmoid
function has output in the range (0, 1). If the `clip` argument
is set to `a` then all neuron outputs for the layer will be
constrained to the range [a, 1 - a]. This can improve perceptron
learning rate in some situations.
For classifying samples, call `classify` instead of `input`.
'''
self.x = x[:]
x = self._scale * (x - self._offset)
for layer in self.layers:
x = layer.input(x, clip)
self.y = np.array(x)
return x
def classify(self, x):
'''Classifies the given sample.
This has the same result as calling input and rounding the result.
'''
return [int(round(xx)) for xx in self.input(x)]
def train(self, X, Y, max_iterations=10000, accuracy=100.0, rate=0.3,
momentum=0., batch=1, clip=0.0, on_iteration=None,
stdout=sys.stdout):
'''
Trains the Perceptron to classify the given samples.
Arguments:
`X`:
The sequence of observations to be learned. Each element of `X`
must have a length corresponding to the input layer of the
network. Values in `X` are not required to be scaled.
`Y`:
Truth values corresponding to elements of `X`. `Y` must contain
as many elements as `X` and each element of `Y` must contain a
number of elements corresponding to the output layer of the
network. All values in `Y` should be in the range [0, 1] and for
training a classifier, values in `Y` are typically *only* 0 or 1
(i.e., no intermediate values).
`max_iterations` (int):
Maximum number of iterations through the data to perform.
Training will end sooner if the specified accuracy is reached in
fewer iterations.
`accuracy` (float):
The percent training accuracy at which to terminate training, if
the maximum number of iterations are not reached first. This
value can be set greater than 100 to force a specified number of
training iterations to be performed (e.g., to continue reducing
the error term after 100% classification accuracy has been
achieved.
`rate` (float):
The perceptron learning rate (typically in the range (0, 1]).
`momentum` (float):
The perceptron learning momentum term, which specifies the
fraction of the previous update value that should be added to
the current update term. The value should be in the range [0, 1).
`batch` (positive integer):
Specifies how many samples should be evaluated before an update
is made to the perceptron weights. A value of 0 indicates batch
updates should be performed (evaluate all training inputs prior
to updating). Otherwise, updates will be aggregated for every
`batch` inputs (i.e., `batch` == 1 is stochastic learning).
`clip` (float >= 0):
Optional clipping value to limit sigmoid output during training.
The sigmoid function has output in the range (0, 1). If the
`clip` argument is set to `a` then all neuron outputs for the
layer will be constrained to the range [a, 1 - a]. This can
improve perceptron learning rate in some situations.
After training the perceptron with a clipping value, `train` can
be called again with clipping set to 0 to continue reducing the
training error.
`on_iteration` (callable):
A callable object that accepts the perceptron as input and
returns bool. If this argument is set, the object will be called
at the end of each training iteration with the perceptron as its
argument. If the callable returns True, training will terminate.
`stdout`:
An object with a `write` method that can be set to redirect
training status messages somewhere other than stdout. To
suppress output, set `stats` to None.
'''
import itertools
import os
if stdout is None:
stdout = open(os.devnull, 'w')
try:
self._set_scaling(X)
for layer in self.layers:
layer.dW_old = np.zeros_like(layer.dW)
for iteration in range(max_iterations):
self._reset_corrections()
self.error = 0
num_samples = 0
num_correct = 0
num_summed = 0
for (x, t) in zip(X, Y):
num_samples += 1
num_summed += 1
num_correct += np.all(np.round(self.input(x, clip)) == t)
delta = np.array(t) - self.y
self.error += 0.5 * sum(delta**2)
# Determine incremental weight adjustments
self._update_dWs(t)
if batch > 0 and num_summed == batch:
self._adjust_weights(rate, momentum, num_summed,
stdout)
num_summed = 0
# In case a partial batch is remaining
if batch > 0 and num_summed > 0:
self._adjust_weights(rate, momentum, num_summed, stdout)
num_summed = 0
self.accuracy = 100. * num_correct / num_samples
if on_iteration and on_iteration(self):
return True
stdout.write('Iter % 5d: Accuracy = %.2f%% E = %f\n' %
(iteration, self.accuracy, self.error))
if self.accuracy >= accuracy:
stdout.write('Network trained to %.1f%% sample accuracy '
'in %d iterations.\n'
% (self.accuracy, iteration + 1))
return True
# If doing full batch learning (batch == 0)
if num_summed > 0:
self._adjust_weights(rate, momentum, num_summed, stdout)
num_summed = 0
except KeyboardInterrupt:
stdout.write("KeyboardInterrupt: Terminating training.\n")
self._reset_corrections()
return False
stdout.write('Terminating network training after %d iterations.\n' %
(iteration + 1))
return False
def _update_dWs(self, t):
'''Update weight adjustment values for the current sample.'''
# Output layer:
# dE/dy = t - y
# dz/dW = x
layerK = self.layers[-1]
layerK.delta = layerK.dy_da() * (t - self.y)
layerK.dW += np.outer(layerK.delta, layerK.x)
# Hidden layers
for i in range(len(self.layers) - 2, -1, -1):
(layerJ, layerK) = self.layers[i: i + 2]
b = np.dot(layerK.delta, layerK.weights[:, 1:])
layerJ.delta = layerJ.dy_da() * b
layerJ.dW += np.outer(layerJ.delta, layerJ.x)
def _adjust_weights(self, rate, momentum, num_summed, stdout):
'''Applies aggregated weight adjustments to the perceptron weights.'''
if self.cache_weights:
weights = [np.array(layer.weights) for layer in self.layers]
try:
if momentum > 0:
for layer in self.layers:
layer.dW *= (float(rate) / num_summed)
layer.dW += momentum * layer.dW_old
layer.weights += layer.dW
(layer.dW_old, layer.dW) = (layer.dW, layer.dW_old)
else:
for layer in self.layers:
layer.dW *= (float(rate) / num_summed)
layer.weights += layer.dW
except KeyboardInterrupt:
if self.cache_weights:
stdout.write('Interrupt during weight adjustment. Restoring ' \
'previous weights.\n')
for i in range(len(weights)):
self.layers[i].weights = weights[i]
else:
stdout.write('Interrupt during weight adjustment. Weight ' \
'cacheing was disabled so current weights may' \
'be corrupt.\n')
raise
finally:
self._reset_corrections()
def _reset_corrections(self):
for layer in self.layers:
layer.dW.fill(0)
def _set_scaling(self, X):
'''Sets translation/scaling of inputs to map X to the range [0, 1].'''
mins = maxes = None
for x in X:
if mins is None:
mins = x
maxes = x
else:
mins = np.min([mins, x], axis=0)
maxes = np.max([maxes, x], axis = 0)
self._offset = mins
r = maxes - mins
self._scale = 1. / np.where(r < self.min_input_diff, 1, r)
# Sample data
xor_data = [
[[0, 0], [0]],
[[0, 1], [1]],
[[1, 0], [1]],
[[1, 1], [0]],
]
xor_data2 = [
[[0, 0], [0, 1]],
[[0, 1], [1, 0]],
[[1, 0], [1, 0]],
[[1, 1], [0, 1]],
]
and_data = [
[[0, 0], [0]],
[[0, 1], [0]],
[[1, 0], [0]],
[[1, 1], [1]],
]
def test_case(XY, shape, *args, **kwargs):
(X, Y) = list(zip(*XY))
p = Perceptron(shape)
trained = p.train(X, Y, *args, **kwargs)
return (trained, p)
def test_xor(*args, **kwargs):
XY = xor_data
shape = [2, 2, 1]
return test_case(XY, shape, *args, **kwargs)
def test_xor222(*args, **kwargs):
XY = xor_data2
shape = [2, 2, 2]
return test_case(XY, shape, *args, **kwargs)
def test_xor231(*args, **kwargs):
XY = xor_data
shape = [2, 3, 1]
return test_case(XY, shape, *args, **kwargs)
def test_and(*args, **kwargs):
XY = and_data
shape = [2, 1]
return test_case(XY, shape, *args, **kwargs)
if __name__ == '__main__':
tests = [('AND (2x1)', test_and),
('XOR (2x2x1)', test_xor),
('XOR (2x2x2)', test_xor222),
('XOR (2x3x1)', test_xor231)]
results = [test[1](5000)[0] for test in tests]
nr = [(p[0][0], p[1]) for p in zip(tests, results)]
print()
print('Training results for 5000 iterations')
print('------------------------------------')
for (name, result) in nr:
s = [ 'FAILED', 'PASSED'][result]
print('{0:<20}: {1}'.format(name, s))
if False in results:
print('\nNote: XOR convergence for these small network sizes is')
print('dependent on initial weights, which are randomized. Try')
print('running the test again.')
| 35.910828 | 82 | 0.541504 | 2,079 | 16,914 | 4.346801 | 0.213564 | 0.004426 | 0.007967 | 0.008853 | 0.212128 | 0.182583 | 0.148058 | 0.130574 | 0.125484 | 0.117517 | 0 | 0.022576 | 0.353139 | 16,914 | 470 | 83 | 35.987234 | 0.8034 | 0.417938 | 0 | 0.21028 | 0 | 0 | 0.087026 | 0.004122 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084112 | false | 0.004673 | 0.028037 | 0 | 0.186916 | 0.037383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |