hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dbad96b0fa05c373ff9f7995b182a8597ec11299 | 1,387 | py | Python | src/django/giraffe/blat/management/commands/reset_app.py | addgene/giraffe | c7d3b1f000ceea83e6c98cce06cd2a0f9e4f4c2c | [
"MIT"
] | 4 | 2016-10-13T15:46:06.000Z | 2018-08-22T21:43:28.000Z | src/django/giraffe/blat/management/commands/reset_app.py | addgene/giraffe | c7d3b1f000ceea83e6c98cce06cd2a0f9e4f4c2c | [
"MIT"
] | null | null | null | src/django/giraffe/blat/management/commands/reset_app.py | addgene/giraffe | c7d3b1f000ceea83e6c98cce06cd2a0f9e4f4c2c | [
"MIT"
] | 1 | 2015-07-26T21:42:31.000Z | 2015-07-26T21:42:31.000Z | from django.core.management.base import AppCommand, CommandError
from django.core.management.sql import sql_reset
from django.core.management.color import no_style
from django.db import connections
class Command(AppCommand):
help = "**********\nThis command resets data for any django app, the difference with the built-in command\n\n '$ python manage.py reset <app_name>'\n\nis that when a sql statement fails, it jumps to the next statement generated by command\n\n '$ python manage.py sqlreset <app_name>'\n\nUseful when the original reset fail when droping CONSTRAINTS\n**********"
output_transaction = True
def handle_app(self, app, **options):
connection = connections['default']
self.style = no_style()
custom_reset_statements = sql_reset(app, self.style, connection)
cursor = connection.cursor()
def execute_sqlreset():
failed_statements = []
for sql in custom_reset_statements:
print 'statement>>>> ' + sql
try:
cursor.execute(sql)
except Exception,e:
if e[0] == 1025:
failed_statements.append(sql)
if failed_statements:
print "These statements failed: "
for s in failed_statements:
print s
execute_sqlreset()
| 47.827586 | 370 | 0.626532 | 165 | 1,387 | 5.157576 | 0.466667 | 0.047004 | 0.049354 | 0.084606 | 0.054054 | 0.054054 | 0 | 0 | 0 | 0 | 0 | 0.005035 | 0.284066 | 1,387 | 28 | 371 | 49.535714 | 0.851964 | 0 | 0 | 0 | 0 | 0.038462 | 0.290555 | 0.031723 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.153846 | null | null | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dbb81ecf1571a74c986e0ef5e76802273692f79e | 1,106 | py | Python | data_interrogator/admin/views.py | s-i-l-k-e/django-data-interrogator | 0284168b81aaa31a8df84f3ea52166eded8a4362 | [
"MIT"
] | null | null | null | data_interrogator/admin/views.py | s-i-l-k-e/django-data-interrogator | 0284168b81aaa31a8df84f3ea52166eded8a4362 | [
"MIT"
] | null | null | null | data_interrogator/admin/views.py | s-i-l-k-e/django-data-interrogator | 0284168b81aaa31a8df84f3ea52166eded8a4362 | [
"MIT"
] | null | null | null | from django.contrib.auth.decorators import user_passes_test
from django.utils.decorators import method_decorator
from data_interrogator.admin.forms import AdminInvestigationForm, AdminPivotTableForm
from data_interrogator.interrogators import Allowable
from data_interrogator.views import InterrogationView, InterrogationAutocompleteUrls, PivotTableView, \
InterrogationAutoComplete
class AdminInterrogationRoom(InterrogationView):
template_name = 'admin/analytics/analytics.html'
form_class = AdminInvestigationForm
report_models = Allowable.ALL_MODELS
allowed = Allowable.ALL_APPS
excluded = []
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def get(self, request):
return super(AdminInterrogationRoom,self).get(request)
class AdminInterrogationAutocompleteUrls(InterrogationAutocompleteUrls):
interrogator_view_class = AdminInterrogationRoom
interrogator_autocomplete_class = InterrogationAutoComplete
class AdminPivotTableView(PivotTableView):
form_class = AdminPivotTableForm
template_name = 'admin/analytics/pivot.html'
| 35.677419 | 103 | 0.824593 | 104 | 1,106 | 8.567308 | 0.509615 | 0.026936 | 0.06734 | 0.058361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.118445 | 1,106 | 30 | 104 | 36.866667 | 0.913846 | 0 | 0 | 0 | 0 | 0 | 0.050633 | 0.050633 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0.095238 | 0.238095 | 0.047619 | 0.904762 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
dbb832b244c092d5e626be322221a0dd99c61a02 | 327 | py | Python | configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py | heytanay/mmsegmentation | 7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8 | [
"Apache-2.0"
] | 11 | 2022-02-04T01:09:45.000Z | 2022-03-08T05:49:16.000Z | configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py | heytanay/mmsegmentation | 7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8 | [
"Apache-2.0"
] | 2 | 2022-02-25T03:07:23.000Z | 2022-03-08T12:54:05.000Z | configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py | heytanay/mmsegmentation | 7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8 | [
"Apache-2.0"
] | 2 | 2021-04-23T05:32:00.000Z | 2021-11-11T02:45:08.000Z | _base_ = './pspnet_r50-d8_512x512_80k_loveda.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')),
decode_head=dict(
in_channels=512,
channels=128,
),
auxiliary_head=dict(in_channels=256, channels=64))
| 27.25 | 72 | 0.629969 | 40 | 327 | 4.85 | 0.775 | 0.082474 | 0.103093 | 0.185567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108 | 0.235474 | 327 | 11 | 73 | 29.727273 | 0.668 | 0 | 0 | 0 | 0 | 0 | 0.220183 | 0.189602 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dbbba499caecc6c455f90595eccf7b64b710a2e3 | 263 | py | Python | apps/utils/format/url_format.py | think-wang/osroom | 67bb5bbd7a63fbaeb0d919738859444b54500152 | [
"BSD-2-Clause"
] | 1 | 2020-04-03T08:01:07.000Z | 2020-04-03T08:01:07.000Z | apps/utils/format/url_format.py | dhgdhg/osroom | 4d693eaab96503cadd391bf924bffedcd931a07c | [
"BSD-2-Clause"
] | null | null | null | apps/utils/format/url_format.py | dhgdhg/osroom | 4d693eaab96503cadd391bf924bffedcd931a07c | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*-coding:utf-8-*-
from tld import get_tld
__author__ = "Allen Woo"
def get_domain(url):
'''
获取url中的全域名
:param url:
:return:
'''
res = get_tld(url, as_object=True)
return "{}.{}".format(res.subdomain, res.tld) | 18.785714 | 49 | 0.604563 | 36 | 263 | 4.194444 | 0.722222 | 0.07947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004831 | 0.212928 | 263 | 14 | 49 | 18.785714 | 0.724638 | 0.269962 | 0 | 0 | 0 | 0 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
dbbca7079e41d333542d3d27bb46afa6aecbe834 | 1,580 | py | Python | test/test_catalog_manager.py | weknowtraining/athena-glue-service-logs | b7cf77408486f2bfa941b8609617ed47aa3e2d02 | [
"Apache-2.0"
] | 133 | 2018-09-17T12:43:14.000Z | 2022-03-15T20:03:12.000Z | test/test_catalog_manager.py | weknowtraining/athena-glue-service-logs | b7cf77408486f2bfa941b8609617ed47aa3e2d02 | [
"Apache-2.0"
] | 22 | 2018-11-19T21:51:04.000Z | 2022-03-08T12:13:19.000Z | test/test_catalog_manager.py | weknowtraining/athena-glue-service-logs | b7cf77408486f2bfa941b8609617ed47aa3e2d02 | [
"Apache-2.0"
] | 46 | 2018-10-04T04:27:26.000Z | 2022-03-01T03:28:38.000Z | # pylint: skip-file
from athena_glue_service_logs.catalog_manager import BaseCatalogManager
def test_class_init(mocker):
mocker.patch.multiple(BaseCatalogManager, __abstractmethods__=set())
base_catalog = BaseCatalogManager('us-west-2', 'dbname', 'tablename', 's3://somewhere')
assert base_catalog.database_name == 'dbname'
assert base_catalog.s3_location == 's3://somewhere'
assert base_catalog.table_name == 'tablename'
def test_init_with_partitions(mocker):
mocker.patch.multiple(BaseCatalogManager, __abstractmethods__=set())
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.does_database_exist', return_value=True)
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_database')
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_table')
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_partitions')
base_catalog = BaseCatalogManager('us-west-2', 'dbname', 'tablename', 's3://somewhere')
base_catalog.initialize_with_partitions(['a', 'b', 'c'])
assert BaseCatalogManager.create_database.call_count == 0
BaseCatalogManager.create_table.assert_called_once()
BaseCatalogManager.create_partitions.assert_called_once_with(partition_list=['a', 'b', 'c'])
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.does_database_exist', return_value=False)
base_catalog.initialize_with_partitions(['a', 'b', 'c'])
assert BaseCatalogManager.create_database.call_count == 1
| 50.967742 | 119 | 0.79557 | 185 | 1,580 | 6.405405 | 0.297297 | 0.064979 | 0.086076 | 0.106329 | 0.751055 | 0.71308 | 0.683544 | 0.580591 | 0.580591 | 0.580591 | 0 | 0.005571 | 0.091139 | 1,580 | 30 | 120 | 52.666667 | 0.819638 | 0.010759 | 0 | 0.285714 | 0 | 0 | 0.317745 | 0.244715 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.095238 | false | 0 | 0.047619 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dbc13915cb653c37c09279f81347a4bfea838dd2 | 3,686 | py | Python | src_taxonomy/bubble_tree_map.py | sanja7s/SR_Twitter | 2eb499c9aa25ba6e9860cd77eac6832890d2c126 | [
"MIT"
] | null | null | null | src_taxonomy/bubble_tree_map.py | sanja7s/SR_Twitter | 2eb499c9aa25ba6e9860cd77eac6832890d2c126 | [
"MIT"
] | null | null | null | src_taxonomy/bubble_tree_map.py | sanja7s/SR_Twitter | 2eb499c9aa25ba6e9860cd77eac6832890d2c126 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import random
from ete2 import Tree, TreeStyle, NodeStyle, faces, AttrFace, CircleFace, TextFace
def layout(node):
if not node.is_root():
# Add node name to laef nodes
#N = AttrFace("name", fsize=14, fgcolor="black")
#faces.add_face_to_node(N, node, 0)
#pass
faces.add_face_to_node(TextFace(node.name), node, 0)
if "weight" in node.features:
# Creates a sphere face whose size is proportional to node's
# feature "weight"
C = CircleFace(radius=node.weight, color="RoyalBlue", style="sphere")
# Let's make the sphere transparent
C.opacity = 0.3
# And place as a float face over the tree
faces.add_face_to_node(C, node, 0, position="float")
def give_tree_layout(t):
# Some random features in all nodes
for n in t.traverse():
n.add_features(weight=n.dist*20)
# Create an empty TreeStyle
ts = TreeStyle()
# Set our custom layout function
ts.layout_fn = layout
# Draw a tree
#ts.mode = "c"
#ts.arc_start = -180
#ts.arc_span = 180
# We will add node names manually
#ts.show_leaf_name = True
# Show branch data
#ts.show_branch_length = True
#ts.show_branch_support = True
return ts
class Tree7s(object):
def __init__(self, lab):
self.root = Node7s(lab, 0, 0)
def find_root(self):
return self.root
class Node7s(object):
def __init__(self, data, score, lev):
self.data = data
self.score = score
self.level = lev
self.children = []
def add_child(self, lab, score, lev):
if int(self.level) == int(lev-1):
nn = self.find_child(lab)
if nn == None:
self.children.append(Node7s(lab, score, lev))
else:
nn.increase_score(score)
else:
print "Trying to add to a wrong level?", lev-1, self.level, lab, self.data
def find_child(self, label):
for el in self.children:
if el.data == label:
return el
return None
def increase_score(self, sc):
self.score += sc
def print_me(self):
print self.data, self.score
for el in self.children:
el.print_me()
def create_newick(self):
if self.children == []:
return str(self.data + ":" + str(self.score))
newick = "("
for el in self.children:
newick += el.create_newick() + ","
newick = newick[:-1]
if self.level == 0:
newick += ")" + str(self.data) + "."
else:
newick += ")" + str(self.data) + ":" + str(self.score)
return newick
def test_data():
D = {'taxonomy': [{"score": "0.718868", "label": "/art and entertainment/movies and tv/movies"},\
{"confident": "no", "score": "0.304296", "label": "/pets/cats"},\
{"score": "0.718868", "label": "/art and entertainment/movies and tv/series"}]}
t7s = Tree7s("ThingAdamsFamily")
for el in D["taxonomy"]:
#n = t7s
n = t7s.find_root()
taxonomy_tree = el["label"]
taxonomy_tree = taxonomy_tree.split("/")
taxonomy_tree.pop(0)
levels = len(taxonomy_tree)
score = float(el["score"])
print levels, taxonomy_tree, score
for i in range(levels):
label = taxonomy_tree[i]
#if n.find_child(label) == None:
n.add_child(label, score, i+1)
n = n.find_child(label)
t7s.find_root().print_me()
t = t7s.find_root()
S = t.create_newick() + ";"
print S
#S = "(((A,B,(C.,D)E)F,(S,N)K)R);"
#T = Tree(S, format=8)
T = Tree(S, format=1)
for node in T.traverse("postorder"):
# Do some analysis on node
print node.name
for node in T.traverse("levelorder"):
# Do some analysis on node
print node.name
#for branch in T
return T
if __name__ == "__main__":
#t.render("bubble_map.png", w=600, dpi=300, tree_style=ts)
#t.show(tree_style=ts)
t = test_data()
ts = give_tree_layout(t)
t.show(tree_style=ts)
t.render("bubble_map.png", w=600, dpi=300, tree_style=ts) | 24.091503 | 98 | 0.655724 | 588 | 3,686 | 3.986395 | 0.292517 | 0.023891 | 0.011945 | 0.017918 | 0.200085 | 0.138652 | 0.104949 | 0.104949 | 0.104949 | 0.074232 | 0 | 0.023435 | 0.189636 | 3,686 | 153 | 99 | 24.091503 | 0.761299 | 0.223549 | 0 | 0.088889 | 0 | 0 | 0.109463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.022222 | null | null | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dbc290ad28df369cc2a5189c66e670824982c619 | 28,719 | py | Python | compass/core/_scrapers/member.py | MrNoScript/compass-interface-core | 8c945ef36f7bee396bd5a744404eaa88d280a845 | [
"MIT"
] | null | null | null | compass/core/_scrapers/member.py | MrNoScript/compass-interface-core | 8c945ef36f7bee396bd5a744404eaa88d280a845 | [
"MIT"
] | null | null | null | compass/core/_scrapers/member.py | MrNoScript/compass-interface-core | 8c945ef36f7bee396bd5a744404eaa88d280a845 | [
"MIT"
] | null | null | null | from __future__ import annotations
import re
import time
from typing import get_args, Literal, TYPE_CHECKING, Union
from lxml import html
from compass.core.interface_base import InterfaceBase
from compass.core.logger import logger
from compass.core.schemas import member as schema
from compass.core.settings import Settings
from compass.core.utility import cast
from compass.core.utility import maybe_int
from compass.core.utility import parse
if TYPE_CHECKING:
import requests
MEMBER_PROFILE_TAB_TYPES = Literal[
"Personal", "Roles", "Permits", "Training", "Awards", "Emergency", "Comms", "Visibility", "Disclosures"
]
class PeopleScraper(InterfaceBase):
"""Class directly interfaces with Compass operations to extract member data.
Compass's MemberProfile.aspx has 13 tabs:
1. Personal Details (No Key)
2. Your Children (Page=CHILD)
3. Roles (Page=ROLES)
4. Permits (Page=PERMITS)
5. Training (Page=TRAINING)
6. Awards (Page=AWARDS)
7. Youth Badges/Awards (Page=BADGES)
8. Event Invitations (Page=EVENTS)
9. Emergency Details (Page=EMERGENCY)
10. Communications (Page=COMMS)
11. Visibility (Page=VISIBILITY)
12. Disclosures (Page=DISCLOSURES)
13. Parents/Guardians (Page=PARENT)
Of these, tabs 2, 7, 8, 13 are disabled functionality.
Tab 11 (Visibility) is only shown on the members' own profile.
For member-adjdacent operations there are additional endpoints:
- /Popups/Profile/AssignNewRole.aspx
- /Popups/Maint/NewPermit.aspx
- /Popups/Profile/EditProfile.aspx
Currently we only use one of these endpoints (AssignNewRole), as all
other data we need can be found from the MemberProfile tabs.
All functions in the class output native types.
"""
def __init__(self, session: requests.Session, validate: bool = False):
"""Constructor for PeopleScraper.
takes an initialised Session object from Logon
"""
super().__init__(session)
self.validate = validate
def _get_member_profile_tab(self, membership_num: int, profile_tab: MEMBER_PROFILE_TAB_TYPES) -> bytes:
"""Returns data from a given tab in MemberProfile for a given member.
Args:
membership_num: Membership Number to use
profile_tab: Tab requested from Compass
Returns:
A dict with content and encoding, e.g.:
{"content": b"...", "encoding": "utf-8"}
Both keys will always be present.
Raises:
ValueError: The given profile_tab value is illegal
Todo:
Other possible exceptions? i.e. from Requests
"""
profile_tab = profile_tab.upper()
tabs = tuple(tab.upper() for tab in get_args(MEMBER_PROFILE_TAB_TYPES))
url = f"{Settings.base_url}/MemberProfile.aspx?CN={membership_num}"
if profile_tab == "PERSONAL": # Personal tab has no key so is a special case
response = self._get(url)
elif profile_tab in tabs:
url += f"&Page={profile_tab}&TAB"
response = self._get(url)
else:
raise ValueError(f"Specified member profile tab {profile_tab} is invalid. Allowed values are {tabs}")
return response.content
def get_personal_tab(self, membership_num: int) -> Union[schema.MemberDetails, dict]:
"""Returns data from Personal Details tab for a given member.
Args:
membership_num: Membership Number to use
Returns:
A dict mapping keys to the corresponding data from the personal
data tab.
For example:
{'membership_number': ...,
'forenames': '...',
'surname': '...',
'main_phone': '...',
'main_email': '...',
'name': '...',
'known_as': '...',
'join_date': datetime.datetime(...),
'sex': '...',
'birth_date': datetime.datetime(...),
'nationality': '...',
'ethnicity': '...',
'religion': '...',
'occupation': '...',
'address': '...'}
Keys will be present only if valid data could be extracted and
parsed from Compass.
Raises:
PermissionError:
Access to the member is not given by the current authentication
Todo:
Other possible exceptions? i.e. from Requests
"""
response = self._get_member_profile_tab(membership_num, "Personal")
tree = html.fromstring(response)
if tree.forms[0].action == "./ScoutsPortal.aspx?Invalid=AccessCN":
raise PermissionError(f"You do not have permission to the details of {membership_num}")
details = dict()
# ### Extractors
# ## Core:
details["membership_number"] = membership_num
# Name(s)
names = tree.xpath("//title//text()")[0].strip().split(" ")[3:]
details["forenames"] = names[0]
details["surname"] = " ".join(names[1:])
# Main Phone
details["main_phone"] = tree.xpath('string(//*[text()="Phone"]/../../../td[3])')
# Main Email
details["main_email"] = tree.xpath('string(//*[text()="Email"]/../../../td[3])')
# ## Core - Positional:
# Full Name
details["name"] = tree.xpath("string(//*[@id='divProfile0']//tr[1]/td[2]/label)")
# Known As
details["known_as"] = tree.xpath("string(//*[@id='divProfile0']//tr[2]/td[2]/label)")
# Join Date # TODO Unknown - take date from earliest role?
join_date_str = tree.xpath("string(//*[@id='divProfile0']//tr[4]/td[2]/label)")
details["join_date"] = parse(join_date_str) if join_date_str != "Unknown" else None
# ## Position Varies, only if authorised:
# Gender
details["sex"] = tree.xpath("string(//*[@id='divProfile0']//*[text()='Gender:']/../../td[2])")
# DOB
details["birth_date"] = parse(tree.xpath("string(//*[@id='divProfile0']//*[text()='Date of Birth:']/../../td[2])"))
# Nationality
details["nationality"] = tree.xpath("string(//*[@id='divProfile0']//*[text()='Nationality:']/../../td[2])")
# Ethnicity
details["ethnicity"] = tree.xpath("normalize-space(//*[@id='divProfile0']//*[text()='Ethnicity:']/../../td[2])")
# Religion
details["religion"] = tree.xpath("normalize-space(//*[@id='divProfile0']//*[text()='Religion/Faith:']/../../td[2])")
# Occupation
details["occupation"] = tree.xpath("normalize-space(//*[@id='divProfile0']//*[text()='Occupation:']/../../td[2])")
# Address
details["address"] = tree.xpath('string(//*[text()="Address"]/../../../td[3])')
# Filter out keys with no value.
details = {k: v for k, v in details.items() if v}
if self.validate:
return schema.MemberDetails.parse_obj(details)
else:
return details
def get_roles_tab(self, membership_num: int, keep_non_volunteer_roles: bool = False) -> Union[schema.MemberRolesDict, dict]:
"""Returns data from Roles tab for a given member.
Sanitises the data to a common format, and removes Occasional Helper, Network, and PVG roles by default.
Args:
membership_num: Membership Number to use
keep_non_volunteer_roles: Keep Helper (OH/PVG) & Network roles?
Returns:
A dict of dicts mapping keys to the corresponding data from the roles tab.
E.g.:
{1234578:
{'role_number': 1234578,
'membership_number': ...,
'role_title': '...',
'role_class': '...',
'role_type': '...',
'location_id': ...,
'location_name': '...',
'role_start_date': datetime.datetime(...),
'role_end': datetime.datetime(...),
'role_status': '...'},
{...}
}
Keys will always be present.
Raises:
PermissionError:
Access to the member is not given by the current authentication
Todo:
Other possible exceptions? i.e. from Requests
primary_role
"""
logger.debug(f"getting roles tab for member number: {membership_num}")
response = self._get_member_profile_tab(membership_num, "Roles")
tree = html.fromstring(response)
if tree.forms[0].action == "./ScoutsPortal.aspx?Invalid=AccessCN":
raise PermissionError(f"You do not have permission to the details of {membership_num}")
roles_data = {}
rows = tree.xpath("//tbody/tr")
for row in rows:
# Get children (cells in row)
cells = list(row) # filter out empty elements
# If current role allows selection of role for editing, remove tickbox
if any(el.tag == "input" for el in cells[0]):
cells.pop(0)
role_number = int(row.get("data-pk"))
status_with_review = cells[5].text_content().strip()
if status_with_review.startswith("Full Review Due "):
role_status = "Full"
review_date = parse(status_with_review.removeprefix("Full Review Due "))
else:
role_status = status_with_review
review_date = None
role_details = dict(
role_number=role_number,
membership_number=membership_num,
role_title=cells[0].text_content().strip(),
role_class=cells[1].text_content().strip(),
# role_type only visible if access to System Admin tab
role_type=[*row.xpath("./td[1]/*/@title"), None][0],
# location_id only visible if role is in hierarchy AND location still exists
location_id=cells[2][0].get("data-ng_id"),
location_name=cells[2].text_content().strip(),
role_start=parse(cells[3].text_content().strip()),
role_end=parse(cells[4].text_content().strip()),
role_status=role_status,
review_date=review_date,
can_view_details=any("VIEWROLE" in el.get("class") for el in cells[6]),
)
# Remove OHs etc from list
if not keep_non_volunteer_roles and (
"helper" in role_details["role_class"].lower()
or {role_details["role_title"].lower()} <= {"occasional helper", "pvg", "network member"}
):
continue
roles_data[role_number] = role_details
if self.validate:
return schema.MemberRolesDict.parse_obj(roles_data)
else:
return roles_data
def get_training_tab(
self, membership_num: int, ongoing_only: bool = False
) -> Union[schema.MemberTrainingTab, schema.MemberMOGLList, dict]:
"""Returns data from Training tab for a given member.
Args:
membership_num: Membership Number to use
ongoing_only: Return a dataframe of role training & OGL info? Otherwise returns all data
Returns:
A dict mapping keys to the corresponding data from the training
tab.
E.g.:
{'roles': {1234567: {'role_number': 1234567,
'role_title': '...',
'role_start': datetime.datetime(...),
'role_status': '...',
'location': '...',
'ta_data': '...',
'ta_number': '...',
'ta_name': '...',
'completion': '...',
'wood_badge_number': '...'},
...},
'plps': {1234567: [{'pk': 6142511,
'module_id': ...,
'code': '...',
'name': '...',
'learning_required': False,
'learning_method': '...',
'learning_completed': '...',
'validated_membership_number': '...',
'validated_name': '...'},
...],
...},
'mandatory': {'GDPR':
{'name': 'GDPR',
'completed_date': datetime.datetime(...)},
...}}
Keys will always be present.
Todo:
Other possible exceptions? i.e. from Requests
"""
# pylint: disable=too-many-locals,too-many-statements
response = self._get_member_profile_tab(membership_num, "Training")
tree = html.fromstring(response)
rows = tree.xpath("//table[@id='tbl_p5_TrainModules']/tr")
training_plps = {}
training_roles = {}
for row in rows:
# Personal Learning Plan (PLP) data
if "trPLP" in row.classes:
plp = row
plp_table = plp.getchildren()[0].getchildren()[0]
plp_data = []
for module_row in plp_table:
if module_row.get("class") != "msTR trMTMN":
continue
module_data = {}
child_nodes = list(module_row)
module_data["pk"] = int(module_row.get("data-pk"))
module_data["module_id"] = int(child_nodes[0].get("id")[4:])
matches = re.match(r"^([A-Z0-9]+) - (.+)$", child_nodes[0].text_content()).groups()
if matches:
module_data["code"] = str(matches[0])
module_data["name"] = matches[1]
# Skip processing if we only want ongoing learning data and the module is not GDPR.
if ongoing_only and "gdpr" not in module_data["code"].lower():
continue
learning_required = child_nodes[1].text_content().lower()
module_data["learning_required"] = "yes" in learning_required if learning_required else None
module_data["learning_method"] = child_nodes[2].text_content() or None
module_data["learning_completed"] = parse(child_nodes[3].text_content())
module_data["learning_date"] = parse(child_nodes[3].text_content())
validated_by_string = child_nodes[4].text_content()
if validated_by_string:
# Add empty item to prevent IndexError
validated_by_data = validated_by_string.split(" ", maxsplit=1) + [""]
module_data["validated_membership_number"] = maybe_int(validated_by_data[0])
module_data["validated_name"] = validated_by_data[1]
module_data["validated_date"] = parse(child_nodes[5].text_content())
plp_data.append(module_data)
training_plps[int(plp_table.get("data-pk"))] = plp_data
# Role data
if "msTR" in row.classes:
role = row
child_nodes = list(role)
info = {} # NoQA
info["role_number"] = int(role.xpath("./@data-ng_mrn")[0])
info["role_title"] = child_nodes[0].text_content()
info["role_start"] = parse(child_nodes[1].text_content())
status_with_review = child_nodes[2].text_content()
if status_with_review.startswith("Full (Review Due: "):
info["role_status"] = "Full"
info["review_date"] = parse(status_with_review.removeprefix("Full (Review Due: ").removesuffix(")"))
else:
info["role_status"] = status_with_review
info["review_date"] = None
info["location"] = child_nodes[3].text_content()
training_advisor_string = child_nodes[4].text_content()
if training_advisor_string:
info["ta_data"] = training_advisor_string
# Add empty item to prevent IndexError
training_advisor_data = training_advisor_string.split(" ", maxsplit=1) + [""]
info["ta_number"] = maybe_int(training_advisor_data[0])
info["ta_name"] = training_advisor_data[1]
completion_string = child_nodes[5].text_content()
if completion_string:
info["completion"] = completion_string
parts = completion_string.split(":")
info["completion_type"] = parts[0].strip()
info["completion_date"] = parse(parts[1].strip())
assert len(parts) <= 2, parts[2:]
# info["ct"] = parts[3:] # TODO what is this? From CompassRead.php
info["wood_badge_number"] = child_nodes[5].get("id", "").removeprefix("WB_") or None
training_roles[info["role_number"]] = info
# Handle GDPR:
# Get latest GDPR date
training_ogl = {
"GDPR": dict(
name="GDPR",
completed_date=next(
reversed(
sorted(mod["validated_date"] for plp in training_plps.values() for mod in plp if mod["code"] == "GDPR")
),
None,
),
),
}
for ongoing_learning in tree.xpath("//tr[@data-ng_code]"):
cell_text = {c.get("id", "<None>").split("_")[0]: c.text_content() for c in ongoing_learning}
training_ogl[ongoing_learning.get("data-ng_code")] = dict(
name=cell_text.get("<None>"),
completed_date=parse(cell_text.get("tdLastComplete")),
renewal_date=parse(cell_text.get("tdRenewal")),
)
# TODO missing data-pk from list(cell)[0].tag == "input", and module names/codes. Are these important?
if ongoing_only:
return schema.MemberMOGLList.parse_obj(training_ogl) if self.validate else training_ogl
training_data = {
"roles": training_roles,
"plps": training_plps,
"mandatory": training_ogl,
}
return schema.MemberTrainingTab.parse_obj(training_data) if self.validate else training_data
def get_permits_tab(self, membership_num: int) -> Union[schema.MemberPermitsList, list]:
"""Returns data from Permits tab for a given member.
If a permit has been revoked, the expires value is None and the status is PERM_REV
Args:
membership_num: Membership Number to use
Returns:
A list of dicts mapping keys to the corresponding data from the
permits tab.
Keys will always be present.
Todo:
Other possible exceptions? i.e. from Requests
"""
response = self._get_member_profile_tab(membership_num, "Permits")
tree = html.fromstring(response)
# Get rows with permit content
rows = tree.xpath('//table[@id="tbl_p4_permits"]//tr[@class="msTR msTRPERM"]')
permits = []
for row in rows:
permit = dict(membership_number=membership_num)
child_nodes = list(row)
permit["permit_type"] = child_nodes[1].text_content()
permit["category"] = child_nodes[2].text_content()
permit["type"] = child_nodes[3].text_content()
permit["restrictions"] = child_nodes[4].text_content()
expires = child_nodes[5].text_content()
permit["expires"] = parse(expires) if expires != "Revoked" else None
permit["status"] = child_nodes[5].get("class")
permits.append(permit)
if self.validate:
return schema.MemberPermitsList.parse_obj(permits)
else:
return permits
# See getAppointment in PGS\Needle
def get_roles_detail(
self, role_number: int, response: Union[str, requests.Response] = None
) -> Union[schema.MemberRolePopup, dict]:
"""Returns detailed data from a given role number.
Args:
role_number: Role Number to use
response: Pre-generated response to use
Returns:
A dicts mapping keys to the corresponding data from the
role detail data.
E.g.:
{'hierarchy': {'organisation': 'The Scout Association',
'country': '...',
'region': '...',
'county': '...',
'district': '...',
'group': '...',
'section': '...'},
'details': {'role_number': ...,
'organisation_level': '...',
'birth_date': datetime.datetime(...),
'membership_number': ...,
'name': '...',
'role_title': '...',
'role_start': datetime.datetime(...),
'role_status': '...',
'line_manager_number': ...,
'line_manager': '...',
'ce_check': datetime.datetime(...),
'disclosure_check': '...',
'references': '...',
'appointment_panel_approval': '...',
'commissioner_approval': '...',
'committee_approval': '...'},
'getting_started': {...: {'name': '...',
'validated': datetime.datetime(...),
'validated_by': '...'},
...
}}
Keys will always be present.
Todo:
Other possible exceptions? i.e. from Requests
"""
# pylint: disable=too-many-locals,too-many-statements
renamed_levels = {
"County / Area / Scottish Region / Overseas Branch": "County",
}
renamed_modules = {
1: "module_01",
"TRST": "trustee_intro",
2: "module_02",
3: "module_03",
4: "module_04",
"GDPR": "GDPR",
}
unset_vals = {"--- Not Selected ---", "--- No Items Available ---", "--- No Line Manager ---"}
module_names = {
"Essential Information": "M01",
"Trustee Introduction": "TRST",
"PersonalLearningPlan": "M02",
"Tools for the Role (Section Leaders)": "M03",
"Tools for the Role (Managers and Supporters)": "M04",
"General Data Protection Regulations": "GDPR",
}
references_codes = {
"NC": "Not Complete",
"NR": "Not Required",
"RR": "References Requested",
"S": "References Satisfactory",
"U": "References Unsatisfactory",
}
start_time = time.time()
if response is None:
response = self._get(f"{Settings.base_url}/Popups/Profile/AssignNewRole.aspx?VIEW={role_number}")
logger.debug(f"Getting details for role number: {role_number}. Request in {(time.time() - start_time):.2f}s")
post_response_time = time.time()
if isinstance(response, (str, bytes)):
tree = html.fromstring(response)
else:
tree = html.fromstring(response.content)
form = tree.forms[0]
if form.action == "./ScoutsPortal.aspx?Invalid=Access":
raise PermissionError(f"You do not have permission to the details of role {role_number}")
member_string = form.fields.get("ctl00$workarea$txt_p1_membername")
ref_code = form.fields.get("ctl00$workarea$cbo_p2_referee_status")
role_details = dict()
# Approval and Role details
role_details["role_number"] = role_number
role_details["organisation_level"] = form.fields.get("ctl00$workarea$cbo_p1_level")
role_details["birth_date"] = parse(form.inputs["ctl00$workarea$txt_p1_membername"].get("data-dob"))
role_details["membership_number"] = int(form.fields.get("ctl00$workarea$txt_p1_memberno"))
role_details["name"] = member_string.split(" ", maxsplit=1)[1] # TODO does this make sense - should name be in every role??
role_details["role_title"] = form.fields.get("ctl00$workarea$txt_p1_alt_title")
role_details["role_start"] = parse(form.fields.get("ctl00$workarea$txt_p1_startdate"))
# Role Status
role_details["role_status"] = form.fields.get("ctl00$workarea$txt_p2_status")
# Line Manager
line_manager_el = next((op for op in form.inputs["ctl00$workarea$cbo_p2_linemaneger"] if op.get("selected")), None)
role_details["line_manager_number"] = maybe_int(line_manager_el.get("value")) if line_manager_el is not None else None
role_details["line_manager"] = line_manager_el.text.strip() if line_manager_el is not None else None
# Review Date
role_details["review_date"] = parse(form.fields.get("ctl00$workarea$txt_p2_review"))
# CE (Confidential Enquiry) Check # TODO if CE check date != current date then is valid
role_details["ce_check"] = parse(form.fields.get("ctl00$workarea$txt_p2_cecheck"))
# Disclosure Check
disclosure_with_date = form.fields.get("ctl00$workarea$txt_p2_disclosure")
if disclosure_with_date.startswith("Disclosure Issued : "):
disclosure_date = parse(disclosure_with_date.removeprefix("Disclosure Issued : "))
disclosure_check = "Disclosure Issued"
else:
disclosure_date = None
disclosure_check = disclosure_with_date
role_details["disclosure_check"] = disclosure_check # TODO extract date
role_details["disclosure_date"] = disclosure_date # TODO extract date
# References
role_details["references"] = references_codes.get(ref_code, ref_code)
approval_values = {}
for row in tree.xpath("//tr[@class='trProp']"):
select = row[1][0]
code = select.get("data-app_code")
approval_values[code] = select.get("data-db")
# select.get("title") gives title text, but this is not useful as it does not reflect latest changes,
# but only who added the role to Compass.
# Appointment Panel Approval
role_details["appointment_panel_approval"] = approval_values.get("ROLPRP|AACA")
# Commissioner Approval
role_details["commissioner_approval"] = approval_values.get("ROLPRP|CAPR")
# Committee Approval
role_details["committee_approval"] = approval_values.get("ROLPRP|CCA")
if role_details["line_manager_number"] in unset_vals:
role_details["line_manager_number"] = None
# Filter null values
role_details = {k: v for k, v in role_details.items() if v is not None}
# Getting Started
modules_output = {}
getting_started_modules = tree.xpath("//tr[@class='trTrain trTrainData']")
# Get all training modules and then extract the required modules to a dictionary
for module in getting_started_modules:
module_name = module[0][0].text.strip()
if module_name in module_names:
info = {
# "name": module_names[module_name], # short_name
"validated": parse(module[2][0].value), # Save module validation date
"validated_by": module[1][1].value or None, # Save who validated the module
}
mod_code = cast(module[2][0].get("data-ng_value")) # int or str
modules_output[renamed_modules[mod_code]] = info
# Get all levels of the org hierarchy and select those that will have information:
# Get all inputs with location data
org_levels = [v for k, v in sorted(dict(form.inputs).items()) if "ctl00$workarea$cbo_p1_location" in k]
# TODO
all_locations = {row.get("title"): row.findtext("./option") for row in org_levels}
clipped_locations = {
renamed_levels.get(key, key).lower(): value for key, value in all_locations.items() if value not in unset_vals
}
logger.debug(
f"Processed details for role number: {role_number}. "
f"Compass: {(post_response_time - start_time):.3f}s; Processing: {(time.time() - post_response_time):.4f}s"
)
# TODO data-ng_id?, data-rtrn_id?
full_details = {
"hierarchy": clipped_locations,
"details": role_details,
"getting_started": modules_output,
}
if self.validate:
return schema.MemberRolePopup.parse_obj(full_details)
else:
return full_details
| 41.262931 | 132 | 0.568126 | 3,153 | 28,719 | 4.994925 | 0.174437 | 0.020255 | 0.008254 | 0.011429 | 0.260461 | 0.189409 | 0.160455 | 0.123436 | 0.099879 | 0.090037 | 0 | 0.011589 | 0.305965 | 28,719 | 695 | 133 | 41.322302 | 0.778547 | 0.281765 | 0 | 0.10061 | 0 | 0.006098 | 0.213864 | 0.081701 | 0 | 0 | 0 | 0.01295 | 0.003049 | 1 | 0.021341 | false | 0.02439 | 0.039634 | 0 | 0.097561 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dbc52992fc79a5adada939783cc09ffe329b0264 | 1,623 | py | Python | konnection/settings/local.py | IanSeng/CMPUT404_PROJECT | 80acd2c57de4b091e0e66ad9f5f2df17801bf09e | [
"W3C-20150513"
] | null | null | null | konnection/settings/local.py | IanSeng/CMPUT404_PROJECT | 80acd2c57de4b091e0e66ad9f5f2df17801bf09e | [
"W3C-20150513"
] | null | null | null | konnection/settings/local.py | IanSeng/CMPUT404_PROJECT | 80acd2c57de4b091e0e66ad9f5f2df17801bf09e | [
"W3C-20150513"
] | null | null | null | from konnection.settings.base import *
from pathlib import Path
import os
import dotenv
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
SECRET_KEY = 'temporaryKey'
# For tests
# https://stackoverflow.com/a/35224204
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--with-spec', '--spec-color']
# Adding secrets to env file
# From StackOverflow https://stackoverflow.com/a/61437799
# From Zack Plauché https://stackoverflow.com/users/10415970/zack-plauch%c3%a9
dotenv_file = os.path.join(BASE_DIR, ".env")
if os.path.isfile(dotenv_file):
dotenv.load_dotenv(dotenv_file)
# Connecting PostgreSQL to Django
# From https://www.digitalocean.com/community/tutorials/how-to-use-postgresql-with-your-django-application-on-ubuntu-14-04
# From Digital Ocean
# From Justin Ellingwood https://www.digitalocean.com/community/users/jellingwood
if os.getenv('GITHUB_WORKFLOW'):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'github-actions',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': '5432'
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'myproject',
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': 'localhost',
'PORT': '',
}
} | 31.211538 | 122 | 0.653112 | 191 | 1,623 | 5.450262 | 0.554974 | 0.020173 | 0.060519 | 0.042267 | 0.153698 | 0.092219 | 0.092219 | 0 | 0 | 0 | 0 | 0.027216 | 0.20764 | 1,623 | 52 | 123 | 31.211538 | 0.782271 | 0.360444 | 0 | 0.176471 | 0 | 0 | 0.296982 | 0.095424 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.058824 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
dbc6414ac2f786d426d11b5f7b21e310e975369d | 23,614 | py | Python | pyx12/test/test_x12context.py | arenius/pyx12 | 537493deaa0b8e18a3fa72eb1b3eeae9ef043b11 | [
"BSD-3-Clause"
] | 1 | 2019-11-06T21:22:28.000Z | 2019-11-06T21:22:28.000Z | pyx12/test/test_x12context.py | arenius/pyx12 | 537493deaa0b8e18a3fa72eb1b3eeae9ef043b11 | [
"BSD-3-Clause"
] | null | null | null | pyx12/test/test_x12context.py | arenius/pyx12 | 537493deaa0b8e18a3fa72eb1b3eeae9ef043b11 | [
"BSD-3-Clause"
] | 1 | 2021-04-12T14:32:41.000Z | 2021-04-12T14:32:41.000Z | import unittest
#import tempfile
try:
from StringIO import StringIO
except:
from io import StringIO
import pyx12.error_handler
from pyx12.errors import EngineError # , X12PathError
import pyx12.x12context
import pyx12.params
from pyx12.test.x12testdata import datafiles
class X12fileTestCase(unittest.TestCase):
def setUp(self):
self.param = pyx12.params.params()
def _makeFd(self, x12str=None):
try:
if x12str:
fd = StringIO(x12str)
else:
fd = StringIO()
except:
if x12str:
fd = StringIO(x12str, encoding='ascii')
else:
fd = StringIO(encoding='ascii')
fd.seek(0)
return fd
class Delimiters(X12fileTestCase):
def test_arbitrary_delimiters(self):
str1 = 'ISA&00& &00& &ZZ&ZZ000 &ZZ&ZZ001 &030828&1128&U&00401&000010121&0&T&!+\n'
str1 += 'GS&HC&ZZ000&ZZ001&20030828&1128&17&X&004010X098A1+\n'
str1 += 'ST&837&11280001+\n'
str1 += 'REF&87&004010X098A1+\n'
str1 += 'SE&3&11280001+\n'
str1 += 'GE&1&17+\n'
str1 += 'IEA&1&000010121+\n'
fd = self._makeFd(str1)
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments():
pass
self.assertEqual(src.subele_term, '!')
self.assertEqual(src.ele_term, '&')
self.assertEqual(src.seg_term, '+')
def test_binary_delimiters(self):
str1 = 'ISA&00& &00& &ZZ&ZZ000 &ZZ&ZZ001 &030828&1128&U&00401&000010121&0&T&!+\n'
str1 += 'GS&HC&ZZ000&ZZ001&20030828&1128&17&X&004010X098A1+\n'
str1 += 'ST&837&11280001+\n'
str1 += 'REF&87&004010X098A1+\n'
str1 += 'SE&3&11280001+\n'
str1 += 'GE&1&17+\n'
str1 += 'IEA&1&000010121+\n'
str1 = str1.replace('&', chr(0x1C))
str1 = str1.replace('+', chr(0x1D))
str1 = str1.replace('!', chr(0x1E))
fd = self._makeFd(str1)
errors = []
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments():
pass
self.assertEqual(src.subele_term, chr(0x1E))
self.assertEqual(src.ele_term, chr(0x1C))
self.assertEqual(src.seg_term, chr(0x1D))
class TreeGetValue(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_get_line_numbers_2200(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(self.loop2300.seg_count, 19)
self.assertEqual(self.loop2300.cur_line_number, 21)
for seg in loop2400.select('CLM'):
self.assertEqual(seg.seg_count, 25)
self.assertEqual(seg.cur_line_number, 2271)
break
def test_get_line_numbers_2400(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(loop2400.seg_count, 35)
self.assertEqual(loop2400.cur_line_number, 37)
for svc in loop2400.select('SV1'):
self.assertEqual(svc.seg_count, 36)
self.assertEqual(svc.cur_line_number, 38)
break
def test_get_seg_value(self):
self.assertEqual(self.loop2300.get_value('CLM02'), '21')
self.assertEqual(self.loop2300.get_value('CLM99'), None)
def test_get_seg_value_fail_no_element_index(self):
self.assertRaises(IndexError, self.loop2300.get_value, 'CLM')
def test_get_parent_value(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(loop2400.get_value('../CLM01'), '3215338')
self.assertEqual(loop2400.get_value('../2310B/NM109'), '222185735')
def test_get_seg_value_idx(self):
for clm in self.loop2300.select('CLM'):
self.assertEqual(clm.get_value('02'), '21')
self.assertEqual(clm.get_value('05-3'), '1')
def test_get_first_value(self):
self.assertEqual(self.loop2300.get_value('2400/SV101'), 'HC:H2015:TT')
self.assertEqual(self.loop2300.get_value('2400/SV101-2'), 'H2015')
self.assertEqual(self.loop2300.get_value('2400/REF[6R]02'), '1057296')
self.assertEqual(self.loop2300.get_value('2400/2430/SVD02'), '21')
self.assertEqual(self.loop2300.get_value('2400/AMT[AAE]02'), '21')
def test_get_first_value_2400(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(loop2400.get_value('AMT[AAE]02'), '21')
self.assertEqual(loop2400.get_value('2430/AMT[AAE]02'), None)
def test_get_no_value(self):
self.assertEqual(self.loop2300.get_value('2400/SV199'), None)
self.assertEqual(self.loop2300.get_value('2400'), None)
def test_get_parent_no_value(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(loop2400.get_value('../2310E/NM109'), None)
def test_get_specific_qual(self):
self.assertEqual(self.loop2300.get_value('2400/REF[6R]02'), '1057296')
self.assertEqual(self.loop2300.get_value('2400/REF[G1]02'), None)
self.assertEqual(self.loop2300.get_value('2400/REF[XX]02'), None)
class TreeSetValue(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_set_seg_value(self):
self.loop2300.set_value('CLM02', '50')
self.assertEqual(self.loop2300.get_value('CLM02'), '50')
def test_set_first_value_2400(self):
loop2400 = self.loop2300.first('2400')
loop2400.set_value('AMT[AAE]02', '25')
self.assertEqual(loop2400.get_value('AMT[AAE]02'), '25')
class TreeSelect(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
self.param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
#def test_select_loop_and_parent(self):
# loop2400 = self.loop2300.first('2400')
# assert loop2400.id == '2400', 'Not in 2400'
# ct = 0
# newtree = loop2400.parent
# for newtree in loop2400.select('../'):
# self.assertEqual(newtree.id, '2300')
# ct += 1
# self.assertEqual(ct, 1)
def test_select_loops(self):
ct = 0
for newtree in self.loop2300.select('2400'):
self.assertEqual(newtree.id, '2400')
ct += 1
self.assertEqual(ct, 2)
def test_select_seg(self):
ct = 0
for newtree in self.loop2300.select('2400/SV1'):
self.assertEqual(newtree.id, 'SV1')
self.assertEqual(newtree.get_value('SV102'), '21')
ct += 1
self.assertEqual(ct, 2)
def test_select_parent_seg(self):
loop2400 = self.loop2300.first('2400')
assert loop2400.id == '2400', 'Not in 2400'
ct = 0
for newtree in loop2400.select('../CLM'):
self.assertEqual(newtree.id, 'CLM')
self.assertEqual(newtree.get_value('CLM01'), '3215338')
ct += 1
self.assertEqual(ct, 1)
def test_select_from_st(self):
fd = self._makeFd(datafiles['835id']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
ct = 0
for datatree in src.iter_segments('ST_LOOP'):
if datatree.id == 'ST_LOOP':
for claim in datatree.select('DETAIL/2000/2100'):
self.assertEqual(claim.id, '2100')
ct += 1
self.assertEqual(
ct, 3, 'Found %i 2100 loops. Should have %i' % (ct, 3))
def test_select_from_gs(self):
fd = self._makeFd(datafiles['simple_837i']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
ct = 0
for datatree in src.iter_segments('GS_LOOP'):
if datatree.id == 'GS_LOOP':
for sub in datatree.select('ST_LOOP/DETAIL/2000A/2000B/2300/2400'):
self.assertEqual(sub.id, '2400')
ct += 1
self.assertEqual(
ct, 6, 'Found %i 2400 loops. Should have %i' % (ct, 6))
class TreeSelectFromSegment(X12fileTestCase):
def test_select_from_seg_fail(self):
fd = self._makeFd(datafiles['835id']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in src.iter_segments('ST_LOOP'):
if datatree.id == 'GS':
#self.assertFalseRaises(AttributeError, datatree.select, 'DETAIL/2000/2100')
for claim in datatree.select('DETAIL/2000/2100'):
pass
class TreeAddSegment(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_add_new_plain(self):
seg_data = pyx12.segment.Segment('HCP*00*7.11~', '~', '*', ':')
new_node = self.loop2300.add_segment(seg_data)
self.assertNotEqual(new_node, None)
def test_add_new_id(self):
seg_data = pyx12.segment.Segment('REF*F5*6.11~', '~', '*', ':')
new_node = self.loop2300.add_segment(seg_data)
self.assertNotEqual(new_node, None)
def test_add_new_not_exists(self):
seg_data = pyx12.segment.Segment('ZZZ*00~', '~', '*', ':')
self.assertRaises(pyx12.errors.X12PathError,
self.loop2300.add_segment, seg_data)
class TreeAddSegmentString(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_add_new_plain(self):
new_node = self.loop2300.add_segment('HCP*00*7.11~')
self.assertNotEqual(new_node, None)
def test_add_new_id(self):
new_node = self.loop2300.add_segment('REF*F5*6.11')
self.assertNotEqual(new_node, None)
def test_add_new_not_exists(self):
self.assertRaises(pyx12.errors.X12PathError,
self.loop2300.add_segment, 'ZZZ*00~')
class SegmentExists(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
self.param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_qual_segment(self):
self.assertTrue(self.loop2300.exists('2310B'))
self.assertTrue(self.loop2300.exists('2310B/NM1[82]'))
for loop2310b in self.loop2300.select('2310B'):
self.assertTrue(loop2310b.exists('NM1'))
self.assertTrue(loop2310b.exists('NM1[82]'))
def test_qual_segment_sub_loop(self):
self.assertTrue(self.loop2300.exists('2400/2430'))
self.assertTrue(self.loop2300.exists('2400/2430/DTP[573]'))
self.assertFalse(self.loop2300.exists('2400/2430/DTP[111]'))
self.assertTrue(self.loop2300.exists('2400/2430/DTP[573]03'))
def test_qual_segment_select_sub_loop(self):
loop2430 = self.loop2300.first('2400/2430')
self.assertTrue(loop2430.exists('DTP'))
self.assertTrue(loop2430.exists('DTP[573]'))
self.assertTrue(loop2430.exists('DTP[573]03'))
def test_qual_834_dtp(self):
fd = self._makeFd(datafiles['834_lui_id']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments('2300'):
if datatree.id == '2300':
loop2300 = datatree
break
self.assertTrue(loop2300.exists('DTP[348]'))
self.assertFalse(loop2300.exists('DTP[349]'))
class TreeAddLoop(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_add_new_plain(self):
seg_data = pyx12.segment.Segment(
'NM1*82*2*Provider 1*****ZZ*9898798~', '~', '*', ':')
new_node = self.loop2300.add_loop(seg_data)
self.assertNotEqual(new_node, None)
self.assertTrue(self.loop2300.exists('2310B'))
for loop2310b in self.loop2300.select('2310B'):
self.assertTrue(loop2310b.exists('NM1'))
self.assertTrue(loop2310b.exists('NM1[82]'))
def test_add_new_string_seg(self):
old_ct = self.loop2300.count('2400')
new_node = self.loop2300.add_loop('LX*5~')
self.assertNotEqual(new_node, None)
self.assertTrue(self.loop2300.exists('2400'))
self.assertEqual(old_ct + 1, self.loop2300.count('2400'))
for loop2400 in self.loop2300.select('2400'):
self.assertTrue(loop2400.exists('LX'))
class TreeAddLoopDetail(X12fileTestCase):
def test_add_loops_under_detail(self):
str1 = 'ISA&00& &00& &ZZ&ZZ000 &ZZ&ZZ001 &030828&1128&U&00401&000010121&0&T&!+\n'
str1 += 'GS&BE&ZZ000&ZZ001&20030828&1128&17&X&004010X095A1+\n'
str1 += 'ST&834&11280001+\n'
str1 += 'BGN&+\n'
str1 += 'INS&Y&18&30&XN&AE&RT+\n'
str1 += 'SE&4&11280001+\n'
str1 += 'GE&1&17+\n'
str1 += 'IEA&1&000010121+\n'
fd = self._makeFd(str1)
errors = []
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(param, errh, fd)
for st_loop in src.iter_segments('ST_LOOP'):
if st_loop.id == 'ST_LOOP' and st_loop.exists('DETAIL'):
detail = st_loop.first('DETAIL')
self.assertTrue(detail.exists('2000'))
detail.first('2000').delete()
self.assertFalse(detail.exists('2000'))
detail.add_loop('INS&Y&18&30&XN&AE&RT+')
self.assertTrue(detail.exists('2000'))
class TreeAddNode(X12fileTestCase):
def setUp(self):
self.param = pyx12.params.params()
def test_add_loop(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
loop2300 = datatree
break
self.assertEqual(self._get_count(loop2300, '2400'), 2)
for node in loop2300.select('2400'):
loop2300.add_node(node)
self.assertEqual(self._get_count(loop2300, '2400'), 4)
def test_add_segment(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
loop2300 = datatree
break
self.assertEqual(self._get_count(loop2300, 'CN1'), 1)
for node in loop2300.select('CN1'):
loop2300.add_node(node)
self.assertEqual(self._get_count(loop2300, 'CN1'), 2)
def test_fail(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
loop2300 = datatree
break
for node in loop2300.select('CN1'):
cn1 = node
break
n2400 = None
for node in loop2300.select('2400'):
n2400 = node
break
assert n2400 is not None, 'Loop 2400 was not matched'
self.assertRaises(pyx12.errors.X12PathError, n2400.add_node, cn1)
def _get_count(self, node, loop_id):
ct = 0
for n in node.select(loop_id):
ct += 1
return ct
class CountRepeatingLoop(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300' and datatree.get_value('CLM01') == '5555':
self.loop2300 = datatree
break
def test_repeat_2400(self):
ct = 0
for loop_2400 in self.loop2300.select('2400'):
ct += 1
self.assertEqual(
ct, 3, 'Found %i 2400 loops. Should have %i' % (ct, 3))
def test_repeat_2430(self):
ct = 0
for loop_2430 in self.loop2300.select('2400/2430'):
ct += 1
self.assertEqual(
ct, 0, 'Found %i 2430 loops. Should have %i' % (ct, 0))
class IterateTree(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
def test_iterate_all(self):
ct_2000a = 0
ct_other = 0
for datatree in self.src.iter_segments('2000A'):
if datatree.id == '2000A':
ct_2000a += 1
else:
ct_other += 1
self.assertEqual(ct_2000a, 1,
'Found %i 2000A loops. Should have %i' % (ct_2000a, 1))
self.assertEqual(ct_other, 11, 'Found %i external segments. Should have %i' % (ct_other, 11))
class TreeDeleteSegment(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_delete(self):
assert self.loop2300.get_value('CN101') == '05'
seg_data = pyx12.segment.Segment('CN1*05~', '~', '*', ':')
self.assertTrue(self.loop2300.delete_segment(seg_data))
self.assertEqual(self.loop2300.get_value('CN101'), None)
def test_delete_fail(self):
seg_data = pyx12.segment.Segment('HCP*00*7.11~', '~', '*', ':')
self.assertFalse(self.loop2300.delete_segment(seg_data))
class TreeDeleteLoop(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_delete(self):
self.assertEqual(self.loop2300.get_value('2400/LX01'), '1')
self.assertTrue(self.loop2300.delete_node('2400'))
self.assertEqual(self.loop2300.get_value('2400/LX01'), '2')
def test_delete_fail(self):
self.assertFalse(self.loop2300.delete_node('2500'))
class NodeDeleteSelf(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_delete(self):
cn1 = self.loop2300.first('CN1')
assert cn1.id == 'CN1'
cn1.delete()
try:
a = cn1.id
except EngineError:
pass
except:
a = cn1.id
#self.assertRaises(EngineError, cn1.id)
class TreeCopy(X12fileTestCase):
def setUp(self):
self.param = pyx12.params.params()
def test_add_node(self):
fd = self._makeFd(datafiles['835id']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments('2100'):
if datatree.id == '2100':
for svc in datatree.select('2110'):
new_svc = svc.copy()
new_svc.set_value('SVC01', 'XX:AAAAA')
self.assertTrue(not svc is new_svc)
datatree.add_node(new_svc)
#for svc in datatree.select('2110'):
# print svc.get_value('SVC01')
break
def test_copy_seg(self):
fd = self._makeFd(datafiles['835id']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments('2100'):
if datatree.id == '2100':
for svc in datatree.select('2110'):
new_svc = svc.copy()
self.assertFalse(svc is new_svc)
self.assertEqual(svc.get_value('SVC01'),
new_svc.get_value('SVC01'))
new_svc.set_value('SVC01', 'XX:AAAAA')
self.assertFalse(svc is new_svc)
self.assertNotEqual(svc.get_value('SVC01'),
new_svc.get_value('SVC01'))
break
| 38.210356 | 125 | 0.598247 | 2,840 | 23,614 | 4.833451 | 0.091549 | 0.062942 | 0.030961 | 0.036716 | 0.776717 | 0.700517 | 0.641801 | 0.609383 | 0.562468 | 0.517739 | 0 | 0.122283 | 0.269289 | 23,614 | 617 | 126 | 38.272285 | 0.673254 | 0.021343 | 0 | 0.582178 | 0 | 0.005941 | 0.109331 | 0.01719 | 0 | 0 | 0.001039 | 0 | 0.20396 | 1 | 0.126733 | false | 0.007921 | 0.015842 | 0 | 0.182178 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dbce1d6ebf5fac46543c3b47688a5f1e1c7cc668 | 8,981 | py | Python | dmarc_storage.py | Schramp/dmarc-monitoring | 619a162f71a788e81d92ca281ec0bdcf13c2e8e8 | [
"MIT"
] | 1 | 2020-05-25T05:09:18.000Z | 2020-05-25T05:09:18.000Z | dmarc_storage.py | Schramp/dmarc-monitoring | 619a162f71a788e81d92ca281ec0bdcf13c2e8e8 | [
"MIT"
] | 30 | 2019-08-12T05:10:50.000Z | 2021-07-21T04:25:02.000Z | dmarc_storage.py | Schramp/dmarc-monitoring | 619a162f71a788e81d92ca281ec0bdcf13c2e8e8 | [
"MIT"
] | 1 | 2022-03-12T19:24:24.000Z | 2022-03-12T19:24:24.000Z | import sqlite3
import os
import datetime
__all__ = ['DMARCStorage', 'totimestamp']
def totimestamp(datetime_object):
if datetime_object.utcoffset() is not None:
utc_naive = datetime_object.replace(tzinfo=None) - datetime_object.utcoffset()
else:
utc_naive = datetime_object
return (utc_naive - datetime.datetime(1970, 1, 1)).total_seconds()
class DMARCStorage(object):
def __init__(self, database_filename='dmarc.sqlite', database_directory="./results"):
# Create or connect to the database:
database_path = os.path.join(database_directory, database_filename)
if not os.path.exists(database_directory):
os.makedirs(database_directory)
self._conn = sqlite3.connect(database_path)
# Set automcommit to true and initialise cursor:
self._conn.isolation_level = None
self._cur = self._conn.cursor()
# Create the tables if they don't exist already:
self._init_database()
def __del__(self):
if self._conn is not None:
self._close_connection()
def _init_database(self):
self._cur.execute("PRAGMA foreign_keys = ON;")
self._cur.execute("""CREATE TABLE IF NOT EXISTS dmarc_reports (
report_id TEXT PRIMARY KEY,
receiver TEXT,
report_filename TEXT,
report_start INTEGER,
report_end INTEGER
);""")
self._cur.execute("""CREATE TABLE IF NOT EXISTS dmarc_records (
report_id TEXT REFERENCES dmarc_reports(report_id) ON DELETE CASCADE,
record_id INTEGER,
ip_address TEXT,
hostname TEXT,
disposition TEXT,
reason TEXT,
spf_pass INTEGER,
dkim_pass INTEGER,
header_from TEXT,
envelope_from TEXT,
count INTEGER,
PRIMARY KEY (report_id, record_id)
);""")
self._cur.execute("""CREATE TABLE IF NOT EXISTS spf_results (
report_id TEXT,
record_id INTEGER,
spf_id INTEGER,
domain TEXT,
result TEXT,
PRIMARY KEY (report_id, record_id, spf_id),
FOREIGN KEY (report_id, record_id)
REFERENCES dmarc_records(report_id, record_id)
ON DELETE CASCADE
);""")
self._cur.execute("""CREATE TABLE IF NOT EXISTS dkim_signatures (
report_id TEXT,
record_id INTEGER,
signature_id INTEGER,
domain TEXT,
result TEXT,
selector TEXT,
PRIMARY KEY (report_id, record_id, signature_id),
FOREIGN KEY (report_id, record_id)
REFERENCES dmarc_records(report_id, record_id)
ON DELETE CASCADE,
CONSTRAINT unique_dkim_sig
UNIQUE (report_id, record_id, domain, result, selector)
);""")
def _delete_all_data(self):
# Drop the tables in the right order:
self._cur.execute("DROP TABLE dkim_signatures;")
self._cur.execute("DROP TABLE spf_results;")
self._cur.execute("DROP TABLE dmarc_records;")
self._cur.execute("DROP TABLE dmarc_reports;")
# Recreate them again, empty:
self._init_database()
def _close_connection(self):
self._conn.close()
self._conn = None
def report_already_exists(self, report_filename):
# Check if a report with that filename already exists:
self._cur.execute("SELECT report_filename FROM dmarc_reports WHERE report_filename=?;", (report_filename,))
already_exists = self._cur.fetchone() is not None
return already_exists
def save_new_report(self, report):
# Persist the report itself:
self._cur.execute("INSERT INTO dmarc_reports VALUES (?,?,?,?,?);",
[report.id, report.receiver, report.filename,
totimestamp(report.start_date), totimestamp(report.end_date)])
# Persist each record of that report with a generated ID:
for rec_id, rec in enumerate(report.records):
self._cur.execute("INSERT INTO dmarc_records VALUES (?,?,?,?,?,?,?,?,?,?,?);",
[report.id, rec_id, rec.ip, rec.host, rec.disposition, rec.reason,
rec.spf_pass, rec.dkim_pass, rec.header_from, rec.envelope_from,
rec.count])
# Persist the SPF data:
for spf_id, spf_result in enumerate(rec.spf_results):
self._cur.execute("INSERT INTO spf_results VALUES (?,?,?,?,?);",
[report.id, rec_id, spf_id, spf_result["domain"], spf_result["result"]])
# Persist all the DKIM signatures with generated IDs
for sig_id, sig in enumerate(rec.dkim_signatures):
self._cur.execute("INSERT INTO dkim_signatures VALUES (?,?,?,?,?,?);",
[report.id, rec_id, sig_id, sig["domain"], sig["result"], sig["selector"]])
def get_reporting_start_date(self):
self._cur.execute("SELECT min(report_start) FROM dmarc_reports;")
return datetime.datetime.utcfromtimestamp(self._cur.fetchone()[0])
def get_reporting_end_date(self):
self._cur.execute("SELECT max(report_start) FROM dmarc_reports;")
return datetime.datetime.utcfromtimestamp(self._cur.fetchone()[0])
def get_number_reports(self):
self._cur.execute("SELECT count(*) FROM dmarc_reports;")
return self._cur.fetchone()[0]
def get_count_by_disposition(self):
self._cur.execute("SELECT disposition, sum(count) FROM dmarc_records GROUP BY disposition;")
return {str(r[0]): r[1] for r in self._cur.fetchall()}
def get_count_by_hostnames(self):
self._cur.execute("SELECT hostname, ip_address, sum(count) FROM dmarc_records GROUP BY hostname, ip_address;")
return {str(r[0]) if r[0] is not None else str(r[1]): r[2] for r in self._cur.fetchall()}
def get_count_by_receiver(self):
self._cur.execute("SELECT receiver, sum(count) FROM dmarc_reports JOIN dmarc_records " +
"ON dmarc_reports.report_id=dmarc_records.report_id GROUP BY receiver;")
return {str(r[0]): r[1] for r in self._cur.fetchall()}
def get_count_by_dkim_domain(self):
self._cur.execute("SELECT domain, sum(count) FROM dmarc_records JOIN dkim_signatures " +
"ON dmarc_records.report_id=dkim_signatures.report_id AND " +
"dmarc_records.record_id=dkim_signatures.record_id GROUP BY domain;")
return {str(r[0]): r[1] for r in self._cur.fetchall()}
def get_count_by_status_string(self):
self._cur.execute("SELECT spf_pass, dkim_pass, sum(count) FROM dmarc_records GROUP BY spf_pass, dkim_pass;")
status = {1: "pass", 0: "fail", None: "n/a"}
return {"SPF:%s, DKIM:%s" % (status[r[0]], status[r[1]]): r[2] for r in self._cur.fetchall()}
def get_raw_spf_status_count_by_timestamp(self):
self._cur.execute("SELECT report_start, spf_pass, count FROM dmarc_reports JOIN dmarc_records " +
"ON dmarc_reports.report_id=dmarc_records.report_id;")
return self._cur.fetchall()
def get_raw_dkim_status_count_by_timestamp(self):
self._cur.execute("SELECT report_start, dkim_pass, count FROM dmarc_reports JOIN dmarc_records " +
"ON dmarc_reports.report_id=dmarc_records.report_id;")
return self._cur.fetchall()
def get_raw_dmarc_status_count_by_timestamp(self):
self._cur.execute("SELECT report_start, spf_pass + dkim_pass, count " +
"FROM dmarc_reports JOIN dmarc_records " +
"ON dmarc_reports.report_id=dmarc_records.report_id;")
return self._cur.fetchall()
def execute_query(self, sql, values=None):
if values is not None:
self._cur.execute(sql, values)
else:
self._cur.execute(sql)
return self._cur.fetchall()
| 49.894444 | 118 | 0.565639 | 1,012 | 8,981 | 4.75 | 0.156126 | 0.059705 | 0.078635 | 0.044934 | 0.462867 | 0.370917 | 0.302476 | 0.270647 | 0.255669 | 0.23861 | 0 | 0.004403 | 0.342501 | 8,981 | 179 | 119 | 50.173184 | 0.809653 | 0.04465 | 0 | 0.227586 | 0 | 0 | 0.471 | 0.044929 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0.055172 | 0.02069 | 0 | 0.262069 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
dbd0c614614154cd50e0792871e7aa778a2a1459 | 557 | py | Python | setup.py | mcdruid/sumologic-python-sdk | cb1d649d0166976fb104866e9174a41bd558b817 | [
"Apache-2.0"
] | 4 | 2019-05-09T01:31:15.000Z | 2019-12-08T03:35:32.000Z | setup.py | blaise-sumo/sumologic-python-sdk | 97c38fc2d493b94741fd17711923ec7e39264610 | [
"Apache-2.0"
] | null | null | null | setup.py | blaise-sumo/sumologic-python-sdk | 97c38fc2d493b94741fd17711923ec7e39264610 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="sumologic-sdk",
version="0.1.9",
packages=find_packages(),
install_requires=['requests>=2.2.1'],
# PyPI metadata
author="Yoway Buorn, Melchi Salins",
author_email="it@sumologic.com, melchisalins@icloud.com",
description="Sumo Logic Python SDK",
license="PSF",
keywords="sumologic python sdk rest api log management analytics logreduce splunk security siem collector forwarder",
url="https://github.com/SumoLogic/sumologic-python-sdk",
zip_safe=True
)
| 32.764706 | 121 | 0.716338 | 71 | 557 | 5.549296 | 0.746479 | 0.068528 | 0.091371 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012848 | 0.16158 | 557 | 16 | 122 | 34.8125 | 0.830835 | 0.023339 | 0 | 0 | 0 | 0 | 0.512915 | 0.042435 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dbd6cc6412096e169b145a7b948ae52708971c75 | 1,311 | py | Python | home/migrations/0010_auto_20180206_1625.py | RomanMahar/personalsite | ad0c7880e0ccfe81ea53b8bad8e0d4fcf0c5830b | [
"MIT"
] | null | null | null | home/migrations/0010_auto_20180206_1625.py | RomanMahar/personalsite | ad0c7880e0ccfe81ea53b8bad8e0d4fcf0c5830b | [
"MIT"
] | 10 | 2020-06-05T17:26:09.000Z | 2022-01-13T00:39:44.000Z | home/migrations/0010_auto_20180206_1625.py | RomanMahar/personalsite | ad0c7880e0ccfe81ea53b8bad8e0d4fcf0c5830b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-02-06 16:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('home', '0009_remove_homepagesection_sectiontitle'),
]
operations = [
migrations.CreateModel(
name='SnippetClass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=255)),
('page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='snippy', to='wagtailcore.Page')),
],
),
migrations.AlterField(
model_name='homepagesection',
name='sectionClassName',
field=models.SlugField(default='homepage-section', help_text='no spaces', max_length=100),
),
migrations.AddField(
model_name='homepagesection',
name='advert',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.SnippetClass'),
),
]
| 36.416667 | 158 | 0.62624 | 137 | 1,311 | 5.832117 | 0.576642 | 0.04005 | 0.052566 | 0.082603 | 0.157697 | 0.157697 | 0.157697 | 0.157697 | 0.157697 | 0.157697 | 0 | 0.031093 | 0.239512 | 1,311 | 35 | 159 | 37.457143 | 0.770311 | 0.051869 | 0 | 0.178571 | 1 | 0 | 0.166129 | 0.032258 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.107143 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dbdce6502afcfa5e2708f1c6de7ac5e46b73c5d7 | 3,303 | py | Python | template/misc.py | da-h/tf-boilerplate | ab8409c935d3fcbed07bbefd1cb0049d45283222 | [
"MIT"
] | null | null | null | template/misc.py | da-h/tf-boilerplate | ab8409c935d3fcbed07bbefd1cb0049d45283222 | [
"MIT"
] | null | null | null | template/misc.py | da-h/tf-boilerplate | ab8409c935d3fcbed07bbefd1cb0049d45283222 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.python.training.session_run_hook import SessionRunArgs
# Define data loaders #####################################
# See https://gist.github.com/peterroelants/9956ec93a07ca4e9ba5bc415b014bcca
class IteratorInitializerHook(tf.train.SessionRunHook):
"""Hook to initialise data iterator after Session is created."""
def __init__(self, func=None):
super(IteratorInitializerHook, self).__init__()
self.iterator_initializer_func = func
def after_create_session(self, session, coord):
"""Initialise the iterator after the session has been created."""
self.iterator_initializer_func(session)
# redefine summarysaverhook (for more accurate saving)
class CustomSummarySaverHook(tf.train.SummarySaverHook):
"""Saves summaries every N steps."""
def __init__(self,save_steps,*args,**kwargs):
super(CustomSummarySaverHook, self).__init__(*args,save_steps=save_steps,**kwargs)
def begin(self):
super().begin()
self._timer.reset()
self._iter_count = 0
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = ((self._iter_count + 1) % self.save_steps == 0)
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
# print(self._iter_count)
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
super().after_run(run_context,run_values)
self._iter_count += 1
class OneTimeSummarySaverHook(tf.train.SummarySaverHook):
"""One-Time SummarySaver
Saves summaries every N steps.
E.g. can be used for saving the source code as text.
"""
def __init__(self, output_dir=None, summary_writer=None, scaffold=None, summary_op=None):
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
class emptytimer():
def update_last_triggered_step(*args,**kwargs):
pass
self._timer = emptytimer()
def begin(self):
super().begin()
self._done = False
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = not self._done
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
# print(self._iter_count)
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
super().after_run(run_context,run_values)
self._done = True
def ExperimentTemplate() -> str:
"""A template with Markdown syntax.
:return: str with Markdown template
"""
return """
Experiment
==========
Any [markdown code](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) can be used to describe this experiment.
For instance, you can find the automatically generated used settings of this run below.
Current Settings
----------------
| Argument | Value |
| -------- | ----- |
"""
| 32.70297 | 126 | 0.666969 | 386 | 3,303 | 5.430052 | 0.34715 | 0.030057 | 0.031011 | 0.032443 | 0.337786 | 0.313931 | 0.289122 | 0.289122 | 0.289122 | 0.289122 | 0 | 0.008089 | 0.214048 | 3,303 | 100 | 127 | 33.03 | 0.799307 | 0.17802 | 0 | 0.338983 | 0 | 0.016949 | 0.134909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.20339 | false | 0.016949 | 0.033898 | 0 | 0.355932 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dbddc1c2c35c862c97e10c987a1255308c864f59 | 2,825 | py | Python | examples/dehydrogenation/3-property-mappings/mappings_from_ontology/run_w_onto.py | TorgeirUstad/dlite | 1d7b4ccec0e76799a25992534cd295a80d83878a | [
"MIT"
] | null | null | null | examples/dehydrogenation/3-property-mappings/mappings_from_ontology/run_w_onto.py | TorgeirUstad/dlite | 1d7b4ccec0e76799a25992534cd295a80d83878a | [
"MIT"
] | null | null | null | examples/dehydrogenation/3-property-mappings/mappings_from_ontology/run_w_onto.py | TorgeirUstad/dlite | 1d7b4ccec0e76799a25992534cd295a80d83878a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from typing import Dict, AnyStr
from pathlib import Path
from ontopy import get_ontology
import dlite
from dlite.mappings import make_instance
# Setup dlite paths
thisdir = Path(__file__).parent.absolute()
rootdir = thisdir.parent.parent
workflow1dir = rootdir / '1-simple-workflow'
entitiesdir = rootdir / 'entities'
atomdata = workflow1dir / 'atomscaledata.json'
dlite.storage_path.append(f'{entitiesdir}/*.json')
# Define the calculation
def get_energy(reaction):
"""Calculates reaction energies with data from Substance entity
data is harvested from collection and mapped to Substance according to
mappings.
Args:
reaction: dict with names of reactants and products ase keys
and stochiometric coefficient as value
Negative stochiometric coefficients for reactants.
Positive stochiometric coefficients for products.
Returns:
reaction energy
"""
energy = 0
for label, n in reaction.items():
inst = make_instance(Substance, coll[label], mappings,
mapsTo=mapsTo)
energy+=n*inst.molecule_energy
return energy
# Import ontologies with mappings
molecules_onto = get_ontology(f'{thisdir}/mapping_mols.ttl').load()
reaction_onto = get_ontology(f'{thisdir}/mapping_substance.ttl').load()
# Convert to mappings to a single list of triples
mappings = list(molecules_onto.get_unabbreviated_triples())
mappings.extend(list(reaction_onto.get_unabbreviated_triples()))
# Obtain the Metadata to be mapped to each other
Molecule = dlite.get_instance('http://onto-ns.com/meta/0.1/Molecule')
Substance = dlite.get_instance('http://onto-ns.com/meta/0.1/Substance')
# Find mapping relation
# TODO: investigate what to do if the two cases
# use a different mappings relation. As of now it is a
# hard requirement that they use the same.
mapsTo = molecules_onto.mapsTo.iri
# Define where the molecule data is obtained from
# This is a dlite collection
coll = dlite.Collection(f'json://{atomdata}?mode=r#molecules', 0)
# input from chemical engineer, e.g. what are reactants and products
# reactants (left side of equation) have negative stochiometric coefficient
# products (right side of equation) have positive stochiometric coefficient
reaction1 = {'C2H6':-1, 'C2H4':1,'H2':1}
reaction_energy = get_energy(reaction1)
print('Reaction energy 1', reaction_energy)
reaction2 = {'C3H8':-1, 'H2': -2,'CH4':3}
reaction_energy2 = get_energy(reaction2)
print('Reaction energy 1', reaction_energy2)
# Map instance Molecule with label 'H2' to Substance
#inst = make_instance(Substance, coll['H2'], mappings)
#print(inst)
# Map instance Molecule with label 'H2' to itself
#inst2 = make_instance(Molecule, coll['H2'], mappings, strict=False)
#print(inst2)
| 31.388889 | 75 | 0.735929 | 377 | 2,825 | 5.427056 | 0.413793 | 0.034213 | 0.01955 | 0.024438 | 0.150538 | 0.094819 | 0.065494 | 0.034213 | 0.034213 | 0.034213 | 0 | 0.016617 | 0.169204 | 2,825 | 89 | 76 | 31.741573 | 0.855134 | 0.463009 | 0 | 0 | 0 | 0 | 0.193103 | 0.062759 | 0 | 0 | 0 | 0.011236 | 0 | 1 | 0.03125 | false | 0 | 0.15625 | 0 | 0.21875 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91551c7d6fac7874ebf8acc4dfa5dfb4b2e853a5 | 6,479 | py | Python | forms.py | lendoo73/my_idea_boxes | c0d0e7bbd0b64ae35146f3792cd477d1ec8461b5 | [
"MIT"
] | null | null | null | forms.py | lendoo73/my_idea_boxes | c0d0e7bbd0b64ae35146f3792cd477d1ec8461b5 | [
"MIT"
] | null | null | null | forms.py | lendoo73/my_idea_boxes | c0d0e7bbd0b64ae35146f3792cd477d1ec8461b5 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, PasswordField, BooleanField, TextAreaField, SubmitField, RadioField, HiddenField
from wtforms.fields.html5 import DateField, IntegerField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, NumberRange
from models import Colleagues, Admins, Boxes, Ideas
class RegistrationFormCompany(FlaskForm):
company_name = StringField("Company name", validators = [DataRequired()])
user_name = StringField("Your User name", validators = [DataRequired()])
first_name = StringField("Your First name", validators = [DataRequired()])
last_name = StringField("Your Last name", validators = [DataRequired()])
position = StringField("Your Position", validators = [DataRequired()])
email = StringField("Email", validators = [DataRequired(), Email()])
founder_password = PasswordField("Your own Password", validators = [DataRequired()])
repeat_founder_password = PasswordField(
"Repeat Your Password",
validators = [DataRequired(),
EqualTo("founder_password")]
)
joining_password = PasswordField("Password for Colleagues to Joining", validators = [DataRequired()])
repeat_joining_password = PasswordField(
"Repeat Joining Password",
validators = [DataRequired(),
EqualTo("joining_password")]
)
submit = SubmitField("Register your Company")
class RegistrationFormColleague(FlaskForm):
company_name = StringField("Company name", validators = [DataRequired()])
joining_password = PasswordField("Password for Colleagues to Joining", validators = [DataRequired()])
user_name = StringField("Your User name", validators = [DataRequired()])
email = StringField("Email", validators = [DataRequired(), Email()])
first_name = StringField("Your First name", validators = [DataRequired()])
last_name = StringField("Your Last name", validators = [DataRequired()])
position = StringField("Your Position", validators = [DataRequired()])
password = PasswordField("Your Password", validators = [DataRequired()])
repeat_password = PasswordField(
"Repeat Password",
validators = [DataRequired(),
EqualTo("password")]
)
submit = SubmitField("Register")
class LoginForm(FlaskForm):
email_or_user_name = StringField("Email or User name", validators = [DataRequired()])
password = PasswordField("Password", validators = [DataRequired()])
remember_me = BooleanField("Remember Me")
submit = SubmitField("Sign In")
class ConfirmEmailForm(FlaskForm):
email = HiddenField("Email")
code = IntegerField(
"Confirmation code",
validators = [
DataRequired(),
NumberRange(
min = 100000,
max = 999999,
message = "Please enter the 6 digits you received in the email."
)
]
)
submit = SubmitField("Confirm my Email")
class UpdateFirstNameForm(FlaskForm):
first_name = StringField("First Name", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdateLastNameForm(FlaskForm):
last_name = StringField("Last Name", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdateEmailForm(FlaskForm):
email = StringField("Email", validators = [DataRequired(), Email()])
password = PasswordField("Password", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdatePositionForm(FlaskForm):
position = StringField("Your Position", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdatePasswordForm(FlaskForm):
password = PasswordField("Your Current Password", validators = [DataRequired()])
new_password = PasswordField("Your New Password", validators = [DataRequired()])
repeat_new_password = PasswordField(
"Repeat your New Password",
validators = [DataRequired(),
EqualTo("repeat_new_password")]
)
submit = SubmitField("Update")
allowed_format = ['png', 'svg', 'jpg', "jpeg"]
class UpdateAvatarForm(FlaskForm):
avatar = FileField(
"Choose an Avatar:",
validators = [
FileRequired(),
FileAllowed(allowed_format, f"Wrong format! Allowed: {allowed_format}.")
]
)
submit = SubmitField("Upload Avatar")
class DeleteColleagueForm(FlaskForm):
password = PasswordField("Your Password", validators = [DataRequired()])
submit = SubmitField("Delete Registration")
class UpdateLogoForm(FlaskForm):
logo = FileField(
"Choose your Company Logo:",
validators = [
FileRequired(),
FileAllowed(allowed_format, f"Wrong format! Allowed: {allowed_format}.")
]
)
submit = SubmitField("Upload Logo")
class UpdateCompanyNameForm(FlaskForm):
company_name = StringField("Company Name", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdateJoiningPasswordForm(FlaskForm):
password = PasswordField("Current Joining Password", validators = [DataRequired()])
new_password = PasswordField("New Joining Password", validators = [DataRequired()])
repeat_new_password = PasswordField(
"Repeat New Password",
validators = [DataRequired(),
EqualTo("repeat_new_password")]
)
submit = SubmitField("Update")
class UpdatePrivilegsForm(FlaskForm):
update_company = BooleanField("Update Company")
update_privilegs = BooleanField("Update Privilegs")
update_colleague = BooleanField("Update Colleague")
update_box = BooleanField("Update Idea Box")
password = PasswordField("Your Password", validators = [DataRequired()])
submit = SubmitField("Update Privilegs")
class CreateBoxForm(FlaskForm):
name = StringField("Title", validators = [DataRequired()])
description = TextAreaField("Description", validators = [DataRequired()])
close_at = DateField("Close at", format = "%Y-%m-%d")
submit = SubmitField("Create Box")
class CreateIdeaForm(FlaskForm):
idea = TextAreaField("My Idea", validators= [DataRequired()])
sign = RadioField(
"Sign",
choices = [
("incognito", "incognito"),
("username", "username"),
("first name", "first name"),
("full name", "full name")
]
)
submit = SubmitField("Share my Idea") | 40.49375 | 113 | 0.679426 | 567 | 6,479 | 7.680776 | 0.215168 | 0.197015 | 0.10333 | 0.062687 | 0.479679 | 0.46062 | 0.378875 | 0.347646 | 0.223192 | 0.223192 | 0 | 0.002705 | 0.201111 | 6,479 | 160 | 114 | 40.49375 | 0.838679 | 0 | 0 | 0.314286 | 0 | 0 | 0.183025 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.214286 | 0.042857 | 0 | 0.635714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
915a53aa4a7088b23b53c3227ab2635547e8ba50 | 1,593 | py | Python | setup.py | abhiomkar/couchdbkit | 035062b504b57c1cc6e576be47fb05423fb1ddb3 | [
"MIT"
] | 1 | 2021-06-03T21:34:38.000Z | 2021-06-03T21:34:38.000Z | setup.py | abhiomkar/couchdbkit | 035062b504b57c1cc6e576be47fb05423fb1ddb3 | [
"MIT"
] | null | null | null | setup.py | abhiomkar/couchdbkit | 035062b504b57c1cc6e576be47fb05423fb1ddb3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
import os
import sys
if not hasattr(sys, 'version_info') or sys.version_info < (2, 5, 0, 'final'):
raise SystemExit("couchdbkit requires Python 2.5 or later.")
from setuptools import setup, find_packages
from couchdbkit import __version__
setup(
name = 'couchdbkit',
version = __version__,
description = 'Python couchdb kit',
long_description = file(
os.path.join(
os.path.dirname(__file__),
'README.rst'
)
).read(),
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.com',
license = 'Apache License 2',
url = 'http://couchdbkit.org',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = find_packages(exclude=['tests']),
zip_safe = False,
install_requires = [
'restkit>=3.2',
],
entry_points="""
[couchdbkit.consumers]
sync=couchdbkit.consumer.sync:SyncConsumer
eventlet=couchdbkit.consumer.ceventlet:EventletConsumer
gevent=couchdbkit.consumer.cgevent:GeventConsumer
""",
test_suite='noses',
)
| 27 | 77 | 0.626491 | 165 | 1,593 | 5.915152 | 0.672727 | 0.055328 | 0.028689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008403 | 0.252982 | 1,593 | 58 | 78 | 27.465517 | 0.811765 | 0.075957 | 0 | 0.045455 | 0 | 0 | 0.466258 | 0.114519 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9163be87e7924e53bd340c783bc5110d591ba91f | 1,386 | py | Python | fairseq/scoring/__init__.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 33 | 2021-01-06T18:03:55.000Z | 2022-03-28T12:07:44.000Z | fairseq/scoring/__init__.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 8 | 2021-06-11T03:11:37.000Z | 2022-03-08T19:15:42.000Z | fairseq/scoring/__init__.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 14 | 2021-05-17T06:55:01.000Z | 2022-03-28T12:07:42.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from abc import ABC, abstractmethod
from fairseq import registry
from omegaconf import DictConfig
class BaseScorer(ABC):
def __init__(self, cfg):
self.cfg = cfg
self.ref = []
self.pred = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.pred.append(pred)
@abstractmethod
def score(self) -> float:
pass
@abstractmethod
def result_string(self) -> str:
pass
_build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry(
"--scoring", default="bleu"
)
def build_scorer(choice, tgt_dict):
if isinstance(choice, DictConfig):
choice = choice._name
if choice == "bleu":
from fairseq.scoring import bleu
return bleu.Scorer(
bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk())
)
return _build_scorer(choice)
# automatically import any Python files in the current directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("fairseq.scoring." + module)
| 24.315789 | 87 | 0.665945 | 178 | 1,386 | 5.039326 | 0.460674 | 0.031215 | 0.024526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.229437 | 1,386 | 56 | 88 | 24.75 | 0.839888 | 0.166667 | 0 | 0.114286 | 0 | 0 | 0.034813 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0.057143 | 0.2 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
916d6d6dc88be47cd9a443a50f8be165dfb36ec7 | 3,167 | py | Python | io_import_rbsp/rbsp/rpak_materials.py | snake-biscuits/io_import_rbsp | 0de47dc70c373cc0417cc222d5d83e6dde72068b | [
"MIT"
] | 7 | 2021-09-30T11:13:00.000Z | 2022-03-25T16:19:19.000Z | io_import_rbsp/rbsp/rpak_materials.py | snake-biscuits/io_import_rbsp | 0de47dc70c373cc0417cc222d5d83e6dde72068b | [
"MIT"
] | 1 | 2021-11-15T18:36:51.000Z | 2021-11-15T18:36:51.000Z | io_import_rbsp/rbsp/rpak_materials.py | snake-biscuits/io_import_rbsp | 0de47dc70c373cc0417cc222d5d83e6dde72068b | [
"MIT"
] | null | null | null | # by MrSteyk & Dogecore
# TODO: extraction instructions & testing
import json
import os.path
from typing import List
import bpy
loaded_materials = {}
MATERIAL_LOAD_PATH = "" # put your path here
# normal has special logic
MATERIAL_INPUT_LINKING = {
"color": "Base Color",
"rough": "Roughness",
"spec": "Specular",
"illumm": "Emission",
}
def load_material_data_from_name(subpath):
full_path = MATERIAL_LOAD_PATH + subpath + ".json"
if not os.path.isfile(full_path):
return False
return json.load(open(full_path, "rb"))
def load_image_from_subpath(subpath):
full_path = MATERIAL_LOAD_PATH + subpath
if not os.path.isfile(full_path):
return False
return bpy.data.images.load(full_path)
def load_materials(bsp) -> List[bpy.types.Material]:
materials = []
for material_name in bsp.TEXTURE_DATA_STRING_DATA:
if material_name in loaded_materials:
materials.append(loaded_materials[material_name])
continue
mat_data = load_material_data_from_name(material_name)
material = bpy.data.materials.new("materials/" + material_name)
if not mat_data:
loaded_materials[material_name] = material
materials.append(material)
# raise ValueError(f"Material data for material {material_name} does not exist!")
continue
# print(material_name, mat_data)
material.use_nodes = True
bsdf = material.node_tree.nodes["Principled BSDF"]
# data link
for mat_data_entry in MATERIAL_INPUT_LINKING.keys():
texture_file = mat_data[mat_data_entry]
if texture_file == "":
print(f"Texture type {mat_data_entry} doesn't exist in {material_name}'s material data, skipping.")
continue
img = load_image_from_subpath(texture_file)
if not img:
raise ValueError(f"{material_name}'s texture {texture_file} ({mat_data_entry}) doesn't exist!")
continue
tex = material.node_tree.nodes.new("ShaderNodeTexImage")
tex.image = img
material.node_tree.links.new(bsdf.inputs[MATERIAL_INPUT_LINKING[mat_data_entry]], tex.outputs["Color"])
if mat_data_entry == "color":
material.node_tree.links.new(bsdf.inputs["Alpha"], tex.outputs["Alpha"])
# normal link
if mat_data["normal"] != "":
texture_file = mat_data["normal"]
normalmap = material.node_tree.nodes.new("ShaderNodeNormalMap")
img = load_image_from_subpath(texture_file)
if not img:
raise ValueError(f"Texture {texture_file} for material {material_name} (normal) doesn't exist!")
continue
tex = material.node_tree.nodes.new("ShaderNodeTexImage")
tex.image = img
material.node_tree.links.new(normalmap.inputs["Color"], tex.outputs["Color"])
material.node_tree.links.new(bsdf.inputs["Normal"], normalmap.outputs["Normal"])
loaded_materials[material_name] = material
materials.append(material)
return materials
| 38.156627 | 115 | 0.649826 | 382 | 3,167 | 5.151832 | 0.23822 | 0.073171 | 0.065041 | 0.042683 | 0.398882 | 0.362297 | 0.344512 | 0.300813 | 0.202236 | 0.202236 | 0 | 0 | 0.250079 | 3,167 | 82 | 116 | 38.621951 | 0.828632 | 0.07515 | 0 | 0.328125 | 0 | 0 | 0.14863 | 0 | 0 | 0 | 0 | 0.012195 | 0 | 1 | 0.046875 | false | 0 | 0.0625 | 0 | 0.1875 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
916f9138f4bbb1766481eef3ea77cac318445838 | 3,291 | py | Python | aardvark/conf/reaper_conf.py | ttsiouts/aardvark | cbf29f332df86814dd581152faf863c0d29ae41c | [
"Apache-2.0"
] | null | null | null | aardvark/conf/reaper_conf.py | ttsiouts/aardvark | cbf29f332df86814dd581152faf863c0d29ae41c | [
"Apache-2.0"
] | null | null | null | aardvark/conf/reaper_conf.py | ttsiouts/aardvark | cbf29f332df86814dd581152faf863c0d29ae41c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 European Organization for Nuclear Research.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
reaper_group = cfg.OptGroup(
'reaper',
title='Aardvark Service Options',
help="Configuration options for Aardvark service")
reaper_opts = [
cfg.StrOpt('reaper_driver',
default='chance_driver',
help="""
The driver that the reaper will use
Possible choices:
* strict_driver: The purpose of the preemptibles existence is to eliminate the
idling resources. This driver gets all the possible offers
from the relevant hosts and tries to find the best matching
for the requested resources. The best matching offer is the
combination of preemptible servers that leave the least
possible resources unused.
* chance_driver: A valid host is selected randomly and in a number of
preconfigured retries, the driver tries to find the instances
that have to be culled in order to have the requested
resources available.
"""
),
cfg.IntOpt('alternatives',
default=1,
help="""
The number of alternative slots that the the reaper will try to free up for
each requested slot.
"""
),
cfg.IntOpt('max_attempts',
default=5,
help="""
The number of alternative slots that the the reaper will try to free up for
each requested slot.
"""
),
cfg.ListOpt('watched_aggregates',
default=[],
help="""
The list of aggregate names that the reaper will try to make space to
Each element of the list can be an aggregate or a combination of aggregates.
Combination of aggregates is a single string with a vertical-line-separated
aggregate names.
e.g. watched_aggregates={agg_name1},{agg_name2}|{agg_name3}',....
For each element in the list, a reaper thread will be spawned and the request
will be forwarded to the responsible worker.
If the provided list is empty, only one worker will be spawned, responsible for
the whole system.
"""
),
cfg.StrOpt('job_backend',
default='redis',
choices=('redis', 'zookeeper'),
help="""
The backend to use for distributed task management.
For this purpose the Reaper uses OpenStack Taskflow. The two supported
backends are redis and zookeper.
"""
),
cfg.StrOpt('backend_host',
default='localhost',
help="""
Specifies the host where the job board backend can be found.
"""
),
]
def register_opts(conf):
conf.register_group(reaper_group)
conf.register_opts(reaper_opts, group=reaper_group)
| 32.91 | 79 | 0.671832 | 439 | 3,291 | 4.98861 | 0.448747 | 0.027397 | 0.023744 | 0.021918 | 0.085845 | 0.077626 | 0.077626 | 0.077626 | 0.077626 | 0.077626 | 0 | 0.005337 | 0.259799 | 3,291 | 99 | 80 | 33.242424 | 0.893678 | 0.192343 | 0 | 0.314286 | 0 | 0 | 0.711691 | 0.031404 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014286 | false | 0 | 0.014286 | 0 | 0.028571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9170343444c1172d149626528603249b2f63831c | 370 | py | Python | count_files.py | xuannianc/keras-retinanet | d1da39592042927aaf3b3eb905a308c327983bed | [
"Apache-2.0"
] | null | null | null | count_files.py | xuannianc/keras-retinanet | d1da39592042927aaf3b3eb905a308c327983bed | [
"Apache-2.0"
] | null | null | null | count_files.py | xuannianc/keras-retinanet | d1da39592042927aaf3b3eb905a308c327983bed | [
"Apache-2.0"
] | null | null | null | import csv
vat_filenames = set()
train_csv_filename = 'train_annotations.csv'
val_csv_filename = 'val_annotations.csv'
for csv_filename in [train_csv_filename, val_csv_filename]:
for line in csv.reader(open(csv_filename)):
vat_filename = line[0].split('/')[-1]
vat_filenames.add(vat_filename)
print(len(vat_filenames))
vat_filenames.clear()
| 30.833333 | 59 | 0.735135 | 53 | 370 | 4.792453 | 0.396226 | 0.259843 | 0.125984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006349 | 0.148649 | 370 | 11 | 60 | 33.636364 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0.110811 | 0.056757 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.1 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91708273d963214e9092983f15d8ef3340677e15 | 814 | py | Python | em Python/Roteiro7/Roteiro7__testes_dijkstra.py | GuilhermeEsdras/Grafos | b6556c3d679496d576f65b798a1a584cd73e40f4 | [
"MIT"
] | null | null | null | em Python/Roteiro7/Roteiro7__testes_dijkstra.py | GuilhermeEsdras/Grafos | b6556c3d679496d576f65b798a1a584cd73e40f4 | [
"MIT"
] | null | null | null | em Python/Roteiro7/Roteiro7__testes_dijkstra.py | GuilhermeEsdras/Grafos | b6556c3d679496d576f65b798a1a584cd73e40f4 | [
"MIT"
] | null | null | null | from Roteiro7.Roteiro7__funcoes import GrafoComPesos
# .:: Arquivo de Testes do Algoritmo de Dijkstra ::. #
# --------------------------------------------------------------------------- #
grafo_aula = GrafoComPesos(
['E', 'A', 'B', 'C', 'D'],
{
'E-A': 1,
'E-C': 10,
'A-B': 2,
'B-C': 4,
'C-D': 3
}
)
print(grafo_aula)
print('Menor caminho por Dijkstra: ', grafo_aula.dijkstra('E', 'D'))
print("-------------------------")
grafo_aula2 = GrafoComPesos(
['A', 'B', 'C', 'D', 'E', 'F', 'G'],
{
'A-B': 1, 'A-F': 3, 'A-G': 2,
'B-F': 1,
'C-B': 2,
'C-D': 5,
'D-E': 2,
'F-D': 4,
'F-G': 2,
'G-E': 7,
}
)
print(grafo_aula2)
print('Menor caminho por Dijkstra: ', grafo_aula2.dijkstra('A', 'E'))
| 22.611111 | 79 | 0.395577 | 103 | 814 | 3.048544 | 0.31068 | 0.025478 | 0.10828 | 0.025478 | 0.242038 | 0.210191 | 0 | 0 | 0 | 0 | 0 | 0.035413 | 0.271499 | 814 | 35 | 80 | 23.257143 | 0.494098 | 0.15602 | 0 | 0 | 0 | 0 | 0.208517 | 0.036711 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.034483 | 0 | 0.034483 | 0.172414 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
917377628f552efbcce428798dd528e6e5fe7134 | 4,196 | py | Python | setup.py | aaron19950321/ICOM | d5bd0705776c505dd1df0a1c76a07fee2d218394 | [
"PSF-2.0",
"BSD-3-Clause"
] | 5 | 2018-10-09T13:39:31.000Z | 2020-03-26T18:39:49.000Z | setup.py | aaron19950321/ICOM | d5bd0705776c505dd1df0a1c76a07fee2d218394 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | setup.py | aaron19950321/ICOM | d5bd0705776c505dd1df0a1c76a07fee2d218394 | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2018-10-09T13:39:36.000Z | 2018-10-09T23:18:39.000Z | import os, os.path
import subprocess
from distutils.core import setup
from py2exe.build_exe import py2exe
PROGRAM_NAME = 'icom_app'
PROGRAM_DESC = 'simple icom app'
NSIS_SCRIPT_TEMPLATE = r"""
!define py2exeOutputDirectory '{output_dir}\'
!define exe '{program_name}.exe'
; Uses solid LZMA compression. Can be slow, use discretion.
SetCompressor /SOLID lzma
; Sets the title bar text (although NSIS seems to append "Installer")
Caption "{program_desc}"
Name '{program_name}'
OutFile ${{exe}}
Icon '{icon_location}'
; Use XPs styles where appropriate
XPStyle on
; You can opt for a silent install, but if your packaged app takes a long time
; to extract, users might get confused. The method used here is to show a dialog
; box with a progress bar as the installer unpacks the data.
;SilentInstall silent
AutoCloseWindow true
ShowInstDetails nevershow
Section
DetailPrint "Extracting application..."
SetDetailsPrint none
InitPluginsDir
SetOutPath '$PLUGINSDIR'
File /r '${{py2exeOutputDirectory}}\*'
GetTempFileName $0
;DetailPrint $0
Delete $0
StrCpy $0 '$0.bat'
FileOpen $1 $0 'w'
FileWrite $1 '@echo off$\r$\n'
StrCpy $2 $TEMP 2
FileWrite $1 '$2$\r$\n'
FileWrite $1 'cd $PLUGINSDIR$\r$\n'
FileWrite $1 '${{exe}}$\r$\n'
FileClose $1
; Hide the window just before the real app launches. Otherwise you have two
; programs with the same icon hanging around, and it's confusing.
HideWindow
nsExec::Exec $0
Delete $0
SectionEnd
"""
class NSISScript(object):
NSIS_COMPILE = "makensis"
def __init__(self, program_name, program_desc, dist_dir, icon_loc):
self.program_name = program_name
self.program_desc = program_desc
self.dist_dir = dist_dir
self.icon_loc = icon_loc
self.pathname = "setup_%s.nsi" % self.program_name
def create(self):
contents = NSIS_SCRIPT_TEMPLATE.format(
program_name = self.program_name,
program_desc = self.program_desc,
output_dir = self.dist_dir,
icon_location = os.path.join(self.dist_dir, self.icon_loc))
with open(self.pathname, "w") as outfile:
outfile.write(contents)
def compile(self):
subproc = subprocess.Popen(
# "/P5" uses realtime priority for the LZMA compression stage.
# This can get annoying though.
[self.NSIS_COMPILE, self.pathname, "/P5"], env=os.environ)
subproc.communicate()
retcode = subproc.returncode
if retcode:
raise RuntimeError("NSIS compilation return code: %d" % retcode)
class build_installer(py2exe):
# This class first builds the exe file(s), then creates an NSIS installer
# that runs your program from a temporary directory.
def run(self):
# First, let py2exe do it's work.
py2exe.run(self)
lib_dir = self.lib_dir
dist_dir = self.dist_dir
# Create the installer, using the files py2exe has created.
script = NSISScript(PROGRAM_NAME,
PROGRAM_DESC,
dist_dir,
os.path.join('.', 'icon.ico'))
print "*** creating the NSIS setup script***"
script.create()
print "*** compiling the NSIS setup script***"
script.compile()
zipfile = r"lib\shardlib"
setup(
name = 'MyApp',
description = 'My Application',
version = '1.0',
window = [
{
'script': os.path.join('.','ICOM.py'),
'icon_resources': [(1, os.path.join('.', 'icom.ico'))],
'dest_base': PROGRAM_NAME,
},
],
options = {
'py2exe': {
# Py2exe options...
"optimize": 2
}
},
zipfile = zipfile,
data_files = [],# etc...
cmdclass = {"py2exe": build_installer},
) | 30.18705 | 81 | 0.580076 | 479 | 4,196 | 4.966597 | 0.440501 | 0.050862 | 0.025221 | 0.027743 | 0.072299 | 0.02438 | 0 | 0 | 0 | 0 | 0 | 0.011955 | 0.322212 | 4,196 | 139 | 82 | 30.18705 | 0.824543 | 0.07817 | 0 | 0.019048 | 0 | 0 | 0.42719 | 0.013702 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.038095 | null | null | 0.019048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9176396ea025090d1e564363b18149e19bf37323 | 5,057 | py | Python | manager/tests/api_view_test_classes.py | UN-ICC/icc-digital-id-manager | aca0109b3202b292145326ec5523ee8f24691a83 | [
"Apache-2.0"
] | 3 | 2021-02-03T16:37:19.000Z | 2022-02-07T09:59:03.000Z | manager/tests/api_view_test_classes.py | UN-ICC/icc-digital-id-manager | aca0109b3202b292145326ec5523ee8f24691a83 | [
"Apache-2.0"
] | null | null | null | manager/tests/api_view_test_classes.py | UN-ICC/icc-digital-id-manager | aca0109b3202b292145326ec5523ee8f24691a83 | [
"Apache-2.0"
] | 2 | 2021-02-10T16:03:31.000Z | 2022-02-07T08:50:16.000Z | import pytest
from rest_framework import status
from rest_framework.test import APIClient
class TestBase:
__test__ = False
path = None
get_data = {}
put_data = {}
post_data = {}
delete_data = {}
requires_auth = True
implements_retrieve = False
implements_create = False
implements_update = False
implements_destroy = False
client = APIClient()
@pytest.fixture
def setup(self, setup_method=None):
return setup_method
@pytest.fixture
def authenticate(self, api_client_admin):
self.client = api_client_admin
class TestGet(TestBase):
@pytest.fixture
def get_response(self):
return self.client.get(f"/{self.path}", self.get_data, format="json",)
def test_get_without_authentication(self, setup, get_response):
if not self.requires_auth:
if not self.implements_retrieve:
returns_status_code_http_405_not_allowed(get_response)
else:
returns_status_code_http_200_ok(get_response)
response_has_etag(get_response)
else:
returns_status_code_http_401_unauthorized(get_response)
def test_get_with_authentication(self, setup, authenticate, get_response):
if not self.implements_retrieve:
returns_status_code_http_405_not_allowed(get_response)
else:
returns_status_code_http_200_ok(get_response)
response_has_etag(get_response)
class TestPost(TestBase):
@pytest.fixture
def post_response(self):
return self.client.post(
path=f"/{self.path}", data=self.post_data, format="json",
)
def test_post_without_authentication(self, setup, post_response):
returns_status_code_http_401_unauthorized(post_response)
def test_post_with_authentication(self, setup, authenticate, post_response):
if self.implements_create:
returns_status_code_http_201_created(post_response)
else:
returns_status_code_http_405_not_allowed(post_response)
class TestPut(TestBase):
@pytest.fixture
def put_response(self):
return self.client.put(f"/{self.path}", self.put_data, format="json",)
def test_put_without_authentication(self, setup, put_response):
if not self.requires_auth:
if self.implements_update:
returns_status_code_http_200_ok(put_response)
else:
returns_status_code_http_405_not_allowed(put_response)
else:
returns_status_code_http_401_unauthorized(put_response)
def test_put_with_authentication(self, setup, authenticate, put_response):
if not self.implements_update:
returns_status_code_http_405_not_allowed(put_response)
elif self.requires_auth:
returns_status_code_http_200_ok(put_response)
else:
returns_status_code_http_401_unauthorized(put_response)
class TestDelete(TestBase):
@pytest.fixture
def delete_response(self):
return self.client.delete(f"/{self.path}", self.delete_data, format="json")
def test_delete_without_authentication(self, setup, delete_response):
if not self.requires_auth:
if self.implements_destroy:
returns_status_code_http_204_no_content(delete_response)
else:
returns_status_code_http_405_not_allowed(delete_response)
else:
returns_status_code_http_401_unauthorized(delete_response)
def test_delete_with_authentication(self, setup, authenticate, delete_response):
if not self.implements_destroy:
returns_status_code_http_405_not_allowed(delete_response)
elif self.requires_auth:
returns_status_code_http_204_no_content(delete_response)
else:
returns_status_code_http_401_unauthorized(delete_response)
class TestView(TestGet, TestPost, TestPut, TestDelete):
__test__ = False
requires_auth = True
class TestListCreateAPIView(TestView):
__test__ = False
implements_retrieve = True
implements_create = True
requires_auth = True
class TestRetrieveAPIView(TestView):
__test__ = False
implements_retrieve = True
requires_auth = True
class TestUnauthenticatedRetrieveAPIView(TestView):
__test__ = False
implements_retrieve = True
requires_auth = False
def returns_status_code_http_200_ok(response):
assert response.status_code == status.HTTP_200_OK
def returns_status_code_http_401_unauthorized(response):
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def returns_status_code_http_201_created(response):
assert response.status_code == status.HTTP_201_CREATED
def returns_status_code_http_204_no_content(response):
assert response.status_code == status.HTTP_204_NO_CONTENT
def returns_status_code_http_405_not_allowed(response):
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def response_has_etag(response):
assert response.get("ETag")
| 31.02454 | 84 | 0.721376 | 615 | 5,057 | 5.478049 | 0.117073 | 0.089047 | 0.12615 | 0.155833 | 0.648857 | 0.512615 | 0.453547 | 0.34669 | 0.304541 | 0.216978 | 0 | 0.022613 | 0.212972 | 5,057 | 162 | 85 | 31.216049 | 0.823869 | 0 | 0 | 0.441667 | 0 | 0 | 0.013447 | 0 | 0 | 0 | 0 | 0 | 0.05 | 1 | 0.166667 | false | 0 | 0.025 | 0.041667 | 0.508333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
917a6b3b8a05d7c695e7c6d3cb38a9324f5ab905 | 302 | py | Python | mol/data/reader.py | TzuTingWei/mol | 9499925443f389d8e960b6d656f2953d21df3e3b | [
"MIT"
] | null | null | null | mol/data/reader.py | TzuTingWei/mol | 9499925443f389d8e960b6d656f2953d21df3e3b | [
"MIT"
] | null | null | null | mol/data/reader.py | TzuTingWei/mol | 9499925443f389d8e960b6d656f2953d21df3e3b | [
"MIT"
] | null | null | null | import os
from mol.util import read_xyz
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, 'look_and_say.dat')
with open(filename, 'r') as handle:
look_and_say = handle.read()
def get_molecule(filename):
return read_xyz(os.path.join(dirname, filename + ".xyz"))
| 25.166667 | 58 | 0.748344 | 49 | 302 | 4.387755 | 0.530612 | 0.111628 | 0.12093 | 0.15814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.109272 | 302 | 11 | 59 | 27.454545 | 0.799257 | 0 | 0 | 0 | 0 | 0 | 0.069536 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0.125 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
917b8eb1f8726a411ad6e99afecc5eaca421cc08 | 1,793 | py | Python | misc/python/mango/application/main_driver/logstream.py | pymango/pymango | b55f831f0194b214e746b2dfb4d9c6671a1abc38 | [
"BSD-2-Clause"
] | 3 | 2020-05-11T03:23:17.000Z | 2021-03-16T09:01:48.000Z | misc/python/mango/application/main_driver/logstream.py | pymango/pymango | b55f831f0194b214e746b2dfb4d9c6671a1abc38 | [
"BSD-2-Clause"
] | null | null | null | misc/python/mango/application/main_driver/logstream.py | pymango/pymango | b55f831f0194b214e746b2dfb4d9c6671a1abc38 | [
"BSD-2-Clause"
] | 2 | 2017-03-04T11:03:40.000Z | 2020-08-01T10:01:36.000Z | __doc__ = \
"""
=======================================================================================
Main-driver :obj:`LogStream` variables (:mod:`mango.application.main_driver.logstream`)
=======================================================================================
.. currentmodule:: mango.application.main_driver.logstream
Logging objects/attributes for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Classes
=======
.. autosummary::
:toctree: generated/
LogStream - Message logging for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Attributes
==========
.. autodata:: log
.. autodata:: mstLog
.. autodata:: mstOut
.. autodata:: warnLog
.. autodata:: errLog
"""
import mango
import mango.mpi as mpi
import os
import os.path
import sys
if sys.platform.startswith('linux'):
import DLFCN as dl
_flags = sys.getdlopenflags()
sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
from . import _mango_main_driver as _mango_main_driver_so
sys.setdlopenflags(_flags)
else:
from . import _mango_main_driver as _mango_main_driver_so
from mango.core import LogStream
#: Messages sent to stdout, prefixed with :samp:`'P<RANK>'`, where :samp:`<RANK>` is MPI process world rank.
log = _mango_main_driver_so._log
#: Messages sent to stdout, prefixed with :samp:`'MST'`, and messages also saved to history-meta-data.
mstLog = _mango_main_driver_so._mstLog
#: Messages sent to stdout, prefixed with :samp:`'OUT'`.
mstOut = _mango_main_driver_so._mstOut
#: Messages sent to stderr, prefixed with :samp:`'WARNING'`.
warnLog = _mango_main_driver_so._warnLog
#: Messages sent to stderr, prefixed with :samp:`'ERROR'`.
errLog = _mango_main_driver_so._errLog
__all__ = [s for s in dir() if not s.startswith('_')]
| 25.985507 | 108 | 0.665365 | 215 | 1,793 | 5.288372 | 0.362791 | 0.123131 | 0.118734 | 0.104661 | 0.394019 | 0.332454 | 0.332454 | 0.174142 | 0.077397 | 0.077397 | 0 | 0 | 0.126603 | 1,793 | 68 | 109 | 26.367647 | 0.726054 | 0.211378 | 0 | 0.095238 | 0 | 0 | 0.008721 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.428571 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
917c31411ccb8a75122b971cca9ce661e5940151 | 9,680 | py | Python | ucdev/cy7c65211/header.py | luftek/python-ucdev | 8d3c46d25551f1237e6a2f7a90d54c24bcb1d4f9 | [
"MIT"
] | 11 | 2015-07-08T01:28:01.000Z | 2022-01-26T14:29:47.000Z | ucdev/cy7c65211/header.py | luftek/python-ucdev | 8d3c46d25551f1237e6a2f7a90d54c24bcb1d4f9 | [
"MIT"
] | 5 | 2017-12-07T15:04:00.000Z | 2021-06-02T14:47:14.000Z | ucdev/cy7c65211/header.py | tai/python-ucdev | 8d3c46d25551f1237e6a2f7a90d54c24bcb1d4f9 | [
"MIT"
] | 4 | 2017-02-18T18:20:13.000Z | 2022-03-23T16:21:20.000Z | # -*- coding: utf-8-unix -*-
import platform
######################################################################
# Platform specific headers
######################################################################
if platform.system() == 'Linux':
src = """
typedef bool BOOL;
"""
######################################################################
# Common headers
######################################################################
src += """
#define CY_STRING_DESCRIPTOR_SIZE 256
#define CY_MAX_DEVICE_INTERFACE 5
#define CY_US_VERSION_MAJOR 1
#define CY_US_VERSION_MINOR 0
#define CY_US_VERSION_PATCH 0
#define CY_US_VERSION 1
#define CY_US_VERSION_BUILD 74
typedef unsigned int UINT32;
typedef unsigned char UINT8;
typedef unsigned short UINT16;
typedef char CHAR;
typedef unsigned char UCHAR;
typedef void* CY_HANDLE;
typedef void (*CY_EVENT_NOTIFICATION_CB_FN)(UINT16 eventsNotified);
typedef struct _CY_VID_PID {
UINT16 vid;
UINT16 pid;
} CY_VID_PID, *PCY_VID_PID;
typedef struct _CY_LIBRARY_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patch;
UINT8 buildNumber;
} CY_LIBRARY_VERSION, *PCY_LIBRARY_VERSION;
typedef struct _CY_FIRMWARE_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patchNumber;
UINT32 buildNumber;
} CY_FIRMWARE_VERSION, *PCY_FIRMWARE_VERSION;
typedef enum _CY_DEVICE_CLASS{
CY_CLASS_DISABLED = 0,
CY_CLASS_CDC = 0x02,
CY_CLASS_PHDC = 0x0F,
CY_CLASS_VENDOR = 0xFF
} CY_DEVICE_CLASS;
typedef enum _CY_DEVICE_TYPE {
CY_TYPE_DISABLED = 0,
CY_TYPE_UART,
CY_TYPE_SPI,
CY_TYPE_I2C,
CY_TYPE_JTAG,
CY_TYPE_MFG
} CY_DEVICE_TYPE;
typedef enum _CY_DEVICE_SERIAL_BLOCK
{
SerialBlock_SCB0 = 0,
SerialBlock_SCB1,
SerialBlock_MFG
} CY_DEVICE_SERIAL_BLOCK;
typedef struct _CY_DEVICE_INFO {
CY_VID_PID vidPid;
UCHAR numInterfaces;
UCHAR manufacturerName [256];
UCHAR productName [256];
UCHAR serialNum [256];
UCHAR deviceFriendlyName [256];
CY_DEVICE_TYPE deviceType [5];
CY_DEVICE_CLASS deviceClass [5];
CY_DEVICE_SERIAL_BLOCK deviceBlock;
} CY_DEVICE_INFO,*PCY_DEVICE_INFO;
typedef struct _CY_DATA_BUFFER {
UCHAR *buffer;
UINT32 length;
UINT32 transferCount;
} CY_DATA_BUFFER,*PCY_DATA_BUFFER;
typedef enum _CY_RETURN_STATUS{
CY_SUCCESS = 0,
CY_ERROR_ACCESS_DENIED,
CY_ERROR_DRIVER_INIT_FAILED,
CY_ERROR_DEVICE_INFO_FETCH_FAILED,
CY_ERROR_DRIVER_OPEN_FAILED,
CY_ERROR_INVALID_PARAMETER,
CY_ERROR_REQUEST_FAILED,
CY_ERROR_DOWNLOAD_FAILED,
CY_ERROR_FIRMWARE_INVALID_SIGNATURE,
CY_ERROR_INVALID_FIRMWARE,
CY_ERROR_DEVICE_NOT_FOUND,
CY_ERROR_IO_TIMEOUT,
CY_ERROR_PIPE_HALTED,
CY_ERROR_BUFFER_OVERFLOW,
CY_ERROR_INVALID_HANDLE,
CY_ERROR_ALLOCATION_FAILED,
CY_ERROR_I2C_DEVICE_BUSY,
CY_ERROR_I2C_NAK_ERROR,
CY_ERROR_I2C_ARBITRATION_ERROR,
CY_ERROR_I2C_BUS_ERROR,
CY_ERROR_I2C_BUS_BUSY,
CY_ERROR_I2C_STOP_BIT_SET,
CY_ERROR_STATUS_MONITOR_EXIST
} CY_RETURN_STATUS;
typedef struct _CY_I2C_CONFIG{
UINT32 frequency;
UINT8 slaveAddress;
BOOL isMaster;
BOOL isClockStretch;
} CY_I2C_CONFIG,*PCY_I2C_CONFIG;
typedef struct _CY_I2C_DATA_CONFIG
{
UCHAR slaveAddress;
BOOL isStopBit;
BOOL isNakBit;
} CY_I2C_DATA_CONFIG, *PCY_I2C_DATA_CONFIG;
typedef enum _CY_SPI_PROTOCOL {
CY_SPI_MOTOROLA = 0,
CY_SPI_TI,
CY_SPI_NS
} CY_SPI_PROTOCOL;
typedef struct _CY_SPI_CONFIG
{
UINT32 frequency;
UCHAR dataWidth;
CY_SPI_PROTOCOL protocol ;
BOOL isMsbFirst;
BOOL isMaster;
BOOL isContinuousMode;
BOOL isSelectPrecede;
BOOL isCpha;
BOOL isCpol;
}CY_SPI_CONFIG,*PCY_SPI_CONFIG;
typedef enum _CY_UART_BAUD_RATE
{
CY_UART_BAUD_300 = 300,
CY_UART_BAUD_600 = 600,
CY_UART_BAUD_1200 = 1200,
CY_UART_BAUD_2400 = 2400,
CY_UART_BAUD_4800 = 4800,
CY_UART_BAUD_9600 = 9600,
CY_UART_BAUD_14400 = 14400,
CY_UART_BAUD_19200 = 19200,
CY_UART_BAUD_38400 = 38400,
CY_UART_BAUD_56000 = 56000,
CY_UART_BAUD_57600 = 57600,
CY_UART_BAUD_115200 = 115200,
CY_UART_BAUD_230400 = 230400,
CY_UART_BAUD_460800 = 460800,
CY_UART_BAUD_921600 = 921600,
CY_UART_BAUD_1000000 = 1000000,
CY_UART_BAUD_3000000 = 3000000,
}CY_UART_BAUD_RATE;
typedef enum _CY_UART_PARITY_MODE {
CY_DATA_PARITY_DISABLE = 0,
CY_DATA_PARITY_ODD,
CY_DATA_PARITY_EVEN,
CY_DATA_PARITY_MARK,
CY_DATA_PARITY_SPACE
} CY_UART_PARITY_MODE;
typedef enum _CY_UART_STOP_BIT {
CY_UART_ONE_STOP_BIT = 1,
CY_UART_TWO_STOP_BIT
} CY_UART_STOP_BIT;
typedef enum _CY_FLOW_CONTROL_MODES {
CY_UART_FLOW_CONTROL_DISABLE = 0,
CY_UART_FLOW_CONTROL_DSR,
CY_UART_FLOW_CONTROL_RTS_CTS,
CY_UART_FLOW_CONTROL_ALL
} CY_FLOW_CONTROL_MODES;
typedef struct _CY_UART_CONFIG {
CY_UART_BAUD_RATE baudRate;
UINT8 dataWidth;
CY_UART_STOP_BIT stopBits;
CY_UART_PARITY_MODE parityMode;
BOOL isDropOnRxErrors;
} CY_UART_CONFIG,*PCY_UART_CONFIG;
typedef enum _CY_CALLBACK_EVENTS {
CY_UART_CTS_BIT = 0x01,
CY_UART_DSR_BIT = 0x02,
CY_UART_BREAK_BIT = 0x04,
CY_UART_RING_SIGNAL_BIT = 0x08,
CY_UART_FRAME_ERROR_BIT = 0x10,
CY_UART_PARITY_ERROR_BIT = 0x20,
CY_UART_DATA_OVERRUN_BIT = 0x40,
CY_UART_DCD_BIT = 0x100,
CY_SPI_TX_UNDERFLOW_BIT = 0x200,
CY_SPI_BUS_ERROR_BIT = 0x400,
CY_ERROR_EVENT_FAILED_BIT = 0x800
} CY_CALLBACK_EVENTS;
CY_RETURN_STATUS CyLibraryInit ();
CY_RETURN_STATUS CyLibraryExit ();
CY_RETURN_STATUS CyGetListofDevices (
UINT8* numDevices
);
CY_RETURN_STATUS CyGetDeviceInfo(
UINT8 deviceNumber,
CY_DEVICE_INFO *deviceInfo
);
CY_RETURN_STATUS CyGetDeviceInfoVidPid (
CY_VID_PID vidPid,
UINT8 *deviceIdList,
CY_DEVICE_INFO *deviceInfoList,
UINT8 *deviceCount,
UINT8 infoListLength
);
CY_RETURN_STATUS CyOpen (
UINT8 deviceNumber,
UINT8 interfaceNum,
CY_HANDLE *handle
);
CY_RETURN_STATUS CyClose (
CY_HANDLE handle
);
CY_RETURN_STATUS CyCyclePort (
CY_HANDLE handle
);
CY_RETURN_STATUS CySetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 value
);
CY_RETURN_STATUS CyGetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 *value
);
CY_RETURN_STATUS CySetEventNotification(
CY_HANDLE handle,
CY_EVENT_NOTIFICATION_CB_FN notificationCbFn
);
CY_RETURN_STATUS CyAbortEventNotification(
CY_HANDLE handle
);
CY_RETURN_STATUS CyGetLibraryVersion (
CY_HANDLE handle,
PCY_LIBRARY_VERSION version
);
CY_RETURN_STATUS CyGetFirmwareVersion (
CY_HANDLE handle,
PCY_FIRMWARE_VERSION firmwareVersion
);
CY_RETURN_STATUS CyResetDevice (
CY_HANDLE handle
);
CY_RETURN_STATUS CyProgUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *progBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyReadUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyGetSignature (
CY_HANDLE handle,
UCHAR *pSignature
);
CY_RETURN_STATUS CyGetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CySetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CyUartRead (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartSetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES mode
);
CY_RETURN_STATUS CyUartGetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES *mode
);
CY_RETURN_STATUS CyUartSetRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetBreak(
CY_HANDLE handle,
UINT16 timeout
);
CY_RETURN_STATUS CyGetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CySetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CyI2cRead (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cWrite (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cReset(
CY_HANDLE handle,
BOOL resetMode
);
CY_RETURN_STATUS CyGetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySpiReadWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagEnable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagDisable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagWrite (
CY_HANDLE handle,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagRead (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyPhdcClrFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcSetFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcGetStatus (
CY_HANDLE handle,
UINT16 *dataStatus
);
"""
| 25.882353 | 70 | 0.71095 | 1,210 | 9,680 | 5.194215 | 0.222314 | 0.058552 | 0.102466 | 0.078918 | 0.283055 | 0.258552 | 0.181543 | 0.16245 | 0.155768 | 0.135879 | 0 | 0.044482 | 0.208058 | 9,680 | 373 | 71 | 25.951743 | 0.775372 | 0.006921 | 0 | 0.33795 | 0 | 0 | 0.990889 | 0.152749 | 0 | 0 | 0.006432 | 0 | 0 | 1 | 0 | false | 0 | 0.00277 | 0 | 0.00277 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
917d1911394719c31fdc868c9c05aa1015cc7576 | 1,316 | py | Python | ljmc/energy.py | karnesh/Monte-Carlo-LJ | f33f08c247df963ca48b9d9f8456e26c0bb19923 | [
"MIT"
] | null | null | null | ljmc/energy.py | karnesh/Monte-Carlo-LJ | f33f08c247df963ca48b9d9f8456e26c0bb19923 | [
"MIT"
] | null | null | null | ljmc/energy.py | karnesh/Monte-Carlo-LJ | f33f08c247df963ca48b9d9f8456e26c0bb19923 | [
"MIT"
] | null | null | null | """
energy.py
function that computes the inter particle energy
It uses truncated 12-6 Lennard Jones potential
All the variables are in reduced units.
"""
def distance(atom1, atom2):
"""
Computes the square of inter particle distance
Minimum image convention is applied for distance calculation for periodic boundary conditions
"""
dx = atom1.x - atom2.x
dy = atom1.y - atom2.y
dz = atom1.z - atom2.z
if dx > halfLx
dx -= Lx
elif dx < -halfLx:
dx += Lx
if dy > halfLy:
dy -= Ly
elif dy < -halfLy:
dy += Ly
if dz > halfLz:
dz -= Lz
elif dz < -halfLz:
dz += Lz
return dx**2 + dy**2 + dz**2
def energy(atom1, atom2, rc):
'''
calculates the energy of the system
'''
## Arithmatic mixing rules - Lorentz Berthlot mixing
eps = (atom1.eps + atom2.eps)/2
sig = (atom1.sigma * atom2.sigma)**0.5
rcsq = rc**2
rsq = distance(atom1, atom2)
if rsq <= rcsq:
energy = 4.0*eps*( (sig/rsq)**6.0 - (sig/rsq)**3.0)
else:
energy = 0.0
def writeEnergy(step, energy):
'''
Writes the energy to a file.
'''
with open('energy.dat', 'a') as f:
f.write('{0} {1}\n'.format(step, energy))
| 19.352941 | 101 | 0.544833 | 180 | 1,316 | 3.983333 | 0.483333 | 0.041841 | 0.050209 | 0.033473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041002 | 0.332827 | 1,316 | 67 | 102 | 19.641791 | 0.775626 | 0.037234 | 0 | 0 | 0 | 0 | 0.023952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
917e0cc4efaf369d4d17aeaeb0fc5c964a039793 | 760 | py | Python | slender/tests/list/test_keep_if.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | 1 | 2020-01-10T21:51:46.000Z | 2020-01-10T21:51:46.000Z | slender/tests/list/test_keep_if.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | null | null | null | slender/tests/list/test_keep_if.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from expects import expect, equal, raise_error
from slender import List
class TestKeepIf(TestCase):
def test_keep_if_if_func_is_none(self):
e = List([1, 2, 3, 4, 5])
expect(e.keep_if(None).to_list()).to(equal([1, 2, 3, 4, 5]))
def test_keep_if_if_func_is_valid(self):
e = List([1, 2, 3, 4, 5])
expect(e.keep_if(lambda item: item > 3).to_list()).to(equal([4, 5]))
def test_keep_if_if_func_is_invalid_for_all_items(self):
e = List([1, 2, 3, 4, 5])
expect(e.keep_if(lambda item: item > 6).to_list()).to(equal([]))
def test_keep_if_if_func_is_different(self):
e = List([1, 2, 3, 4])
expect(lambda: e.keep_if('...')).to(raise_error(TypeError))
| 28.148148 | 76 | 0.628947 | 133 | 760 | 3.323308 | 0.285714 | 0.108597 | 0.033937 | 0.045249 | 0.4819 | 0.475113 | 0.475113 | 0.350679 | 0.350679 | 0.246606 | 0 | 0.046667 | 0.210526 | 760 | 26 | 77 | 29.230769 | 0.69 | 0 | 0 | 0.1875 | 0 | 0 | 0.003968 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.1875 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9187aae337945bbf532915814ef30a4e08766d0c | 10,938 | py | Python | python27/1.0/lib/linux/gevent/pool.py | jt6562/XX-Net | 7b78e4820a3c78c3ba3e75b3917129d17f00e9fc | [
"BSD-2-Clause"
] | 2 | 2017-04-24T03:04:45.000Z | 2017-09-19T03:38:37.000Z | python27/1.0/lib/linux/gevent/pool.py | TDUncle/XX-Net | 24b2af60dc0abc1c26211813064bb14c1e22bac8 | [
"BSD-2-Clause"
] | null | null | null | python27/1.0/lib/linux/gevent/pool.py | TDUncle/XX-Net | 24b2af60dc0abc1c26211813064bb14c1e22bac8 | [
"BSD-2-Clause"
] | 1 | 2019-04-19T09:11:54.000Z | 2019-04-19T09:11:54.000Z | # Copyright (c) 2009-2010 Denis Bilenko. See LICENSE for details.
"""Managing greenlets in a group.
The :class:`Group` class in this module abstracts a group of running greenlets.
When a greenlet dies, it's automatically removed from the group.
The :class:`Pool` which a subclass of :class:`Group` provides a way to limit
concurrency: its :meth:`spawn <Pool.spawn>` method blocks if the number of
greenlets in the pool has already reached the limit, until there is a free slot.
"""
from gevent.hub import GreenletExit, getcurrent
from gevent.greenlet import joinall, Greenlet
from gevent.timeout import Timeout
from gevent.event import Event
from gevent.coros import Semaphore, DummySemaphore
__all__ = ['Group', 'Pool']
class Group(object):
"""Maintain a group of greenlets that are still running.
Links to each item and removes it upon notification.
"""
greenlet_class = Greenlet
def __init__(self, *args):
assert len(args) <= 1, args
self.greenlets = set(*args)
if args:
for greenlet in args[0]:
greenlet.rawlink(self.discard)
# each item we kill we place in dying, to avoid killing the same greenlet twice
self.dying = set()
self._empty_event = Event()
self._empty_event.set()
def __repr__(self):
try:
classname = self.__class__.__name__
except AttributeError:
classname = 'Group' # XXX check if 2.4 really uses this line
return '<%s at %s %s>' % (classname, hex(id(self)), self.greenlets)
def __len__(self):
return len(self.greenlets)
def __contains__(self, item):
return item in self.greenlets
def __iter__(self):
return iter(self.greenlets)
def add(self, greenlet):
greenlet.rawlink(self.discard)
self.greenlets.add(greenlet)
self._empty_event.clear()
def discard(self, greenlet):
self.greenlets.discard(greenlet)
self.dying.discard(greenlet)
if not self.greenlets:
self._empty_event.set()
def start(self, greenlet):
self.add(greenlet)
greenlet.start()
def spawn(self, *args, **kwargs):
add = self.add
greenlet = self.greenlet_class.spawn(*args, **kwargs)
add(greenlet)
return greenlet
def spawn_link(self, *args, **kwargs):
greenlet = self.spawn(*args, **kwargs)
greenlet.link()
return greenlet
def spawn_link_value(self, *args, **kwargs):
greenlet = self.spawn(*args, **kwargs)
greenlet.link_value()
return greenlet
def spawn_link_exception(self, *args, **kwargs):
greenlet = self.spawn(*args, **kwargs)
greenlet.link_exception()
return greenlet
# def close(self):
# """Prevents any more tasks from being submitted to the pool"""
# self.add = RaiseException("This %s has been closed" % self.__class__.__name__)
def join(self, timeout=None, raise_error=False):
if raise_error:
greenlets = self.greenlets.copy()
self._empty_event.wait(timeout=timeout)
for greenlet in greenlets:
if greenlet.exception is not None:
raise greenlet.exception
else:
self._empty_event.wait(timeout=timeout)
def kill(self, exception=GreenletExit, block=True, timeout=None):
timer = Timeout.start_new(timeout)
try:
try:
while self.greenlets:
for greenlet in list(self.greenlets):
if greenlet not in self.dying:
greenlet.kill(exception, block=False)
self.dying.add(greenlet)
if not block:
break
joinall(self.greenlets)
except Timeout, ex:
if ex is not timer:
raise
finally:
timer.cancel()
def killone(self, greenlet, exception=GreenletExit, block=True, timeout=None):
if greenlet not in self.dying and greenlet in self.greenlets:
greenlet.kill(exception, block=False)
self.dying.add(greenlet)
if block:
greenlet.join(timeout)
def apply(self, func, args=None, kwds=None):
"""Equivalent of the apply() builtin function. It blocks till the result is ready."""
if args is None:
args = ()
if kwds is None:
kwds = {}
if getcurrent() in self:
return func(*args, **kwds)
else:
return self.spawn(func, *args, **kwds).get()
def apply_cb(self, func, args=None, kwds=None, callback=None):
result = self.apply(func, args, kwds)
if callback is not None:
Greenlet.spawn(callback, result)
return result
def apply_async(self, func, args=None, kwds=None, callback=None):
"""A variant of the apply() method which returns a Greenlet object.
If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready
callback is applied to it (unless the call failed)."""
if args is None:
args = ()
if kwds is None:
kwds = {}
if self.full():
# cannot call spawn() directly because it will block
return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)
else:
greenlet = self.spawn(func, *args, **kwds)
if callback is not None:
greenlet.link(pass_value(callback))
return greenlet
def map(self, func, iterable):
greenlets = [self.spawn(func, item) for item in iterable]
return [greenlet.get() for greenlet in greenlets]
def map_cb(self, func, iterable, callback=None):
result = self.map(func, iterable)
if callback is not None:
callback(result)
return result
def map_async(self, func, iterable, callback=None):
"""
A variant of the map() method which returns a Greenlet object.
If callback is specified then it should be a callable which accepts a
single argument.
"""
return Greenlet.spawn(self.map_cb, func, iterable, callback)
def imap(self, func, iterable):
"""An equivalent of itertools.imap()
**TODO**: Fix this.
"""
return iter(self.map(func, iterable))
def imap_unordered(self, func, iterable):
"""The same as imap() except that the ordering of the results from the
returned iterator should be considered in arbitrary order."""
return IMapUnordered.spawn(self.spawn, func, iterable)
def full(self):
return False
def wait_available(self):
pass
class IMapUnordered(Greenlet):
def __init__(self, spawn, func, iterable):
from gevent.queue import Queue
Greenlet.__init__(self)
self.spawn = spawn
self.func = func
self.iterable = iterable
self.queue = Queue()
self.count = 0
def __iter__(self):
return self.queue
def _run(self):
try:
func = self.func
for item in self.iterable:
self.count += 1
self.spawn(func, item).rawlink(self._on_result)
finally:
self.__dict__.pop('spawn', None)
self.__dict__.pop('func', None)
self.__dict__.pop('iterable', None)
def _on_result(self, greenlet):
self.count -= 1
if greenlet.successful():
self.queue.put(greenlet.value)
if self.ready() and self.count <= 0:
self.queue.put(StopIteration)
def GreenletSet(*args, **kwargs):
import warnings
warnings.warn("gevent.pool.GreenletSet was renamed to gevent.pool.Group since version 0.13.0", DeprecationWarning, stacklevel=2)
return Group(*args, **kwargs)
class Pool(Group):
def __init__(self, size=None, greenlet_class=None):
if size is not None and size < 1:
raise ValueError('Invalid size for pool (positive integer or None required): %r' % (size, ))
Group.__init__(self)
self.size = size
if greenlet_class is not None:
self.greenlet_class = greenlet_class
if size is None:
self._semaphore = DummySemaphore()
else:
self._semaphore = Semaphore(size)
def wait_available(self):
self._semaphore.wait()
def full(self):
return self.free_count() <= 0
def free_count(self):
if self.size is None:
return 1
return max(0, self.size - len(self))
def start(self, greenlet):
self._semaphore.acquire()
try:
self.add(greenlet)
except:
self._semaphore.release()
raise
greenlet.start()
def spawn(self, *args, **kwargs):
self._semaphore.acquire()
try:
greenlet = self.greenlet_class.spawn(*args, **kwargs)
self.add(greenlet)
except:
self._semaphore.release()
raise
return greenlet
def spawn_link(self, *args, **kwargs):
self._semaphore.acquire()
try:
greenlet = self.greenlet_class.spawn_link(*args, **kwargs)
self.add(greenlet)
except:
self._semaphore.release()
raise
return greenlet
def spawn_link_value(self, *args, **kwargs):
self._semaphore.acquire()
try:
greenlet = self.greenlet_class.spawn_link_value(*args, **kwargs)
self.add(greenlet)
except:
self._semaphore.release()
raise
return greenlet
def spawn_link_exception(self, *args, **kwargs):
self._semaphore.acquire()
try:
greenlet = self.greenlet_class.spawn_link_exception(*args, **kwargs)
self.add(greenlet)
except:
self._semaphore.release()
raise
return greenlet
def discard(self, greenlet):
Group.discard(self, greenlet)
self._semaphore.release()
def get_values(greenlets):
joinall(greenlets)
return [x.value for x in greenlets]
class pass_value(object):
__slots__ = ['callback']
def __init__(self, callback):
self.callback = callback
def __call__(self, source):
if source.successful():
self.callback(source.value)
def __hash__(self):
return hash(self.callback)
def __eq__(self, other):
return self.callback == getattr(other, 'callback', other)
def __str__(self):
return str(self.callback)
def __repr__(self):
return repr(self.callback)
def __getattr__(self, item):
assert item != 'callback'
return getattr(self.callback, item)
| 31.162393 | 132 | 0.598555 | 1,288 | 10,938 | 4.944099 | 0.189441 | 0.028266 | 0.024026 | 0.020729 | 0.318781 | 0.281721 | 0.24152 | 0.226445 | 0.203832 | 0.17902 | 0 | 0.00329 | 0.305266 | 10,938 | 350 | 133 | 31.251429 | 0.834715 | 0.037484 | 0 | 0.403162 | 0 | 0 | 0.022311 | 0.002491 | 0 | 0 | 0 | 0.002857 | 0.007905 | 0 | null | null | 0.011858 | 0.027668 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9192d6d1ce77aea0159f3db895468368ec72c08a | 592 | py | Python | setup.py | avryhof/ambient_api | 08194b5d8626801f2c2c7369adacb15eace54802 | [
"MIT"
] | 20 | 2018-12-24T15:40:49.000Z | 2022-01-10T18:58:41.000Z | setup.py | avryhof/ambient_api | 08194b5d8626801f2c2c7369adacb15eace54802 | [
"MIT"
] | 10 | 2018-08-17T02:01:45.000Z | 2021-01-08T23:34:59.000Z | setup.py | avryhof/ambient_api | 08194b5d8626801f2c2c7369adacb15eace54802 | [
"MIT"
] | 14 | 2018-06-13T23:40:12.000Z | 2022-01-05T06:34:13.000Z | from setuptools import setup
setup(
name="ambient_api",
version="1.5.6",
packages=["ambient_api"],
url="https://github.com/avryhof/ambient_api",
license="MIT",
author="Amos Vryhof",
author_email="amos@vryhofresearch.com",
description="A Python class for accessing the Ambient Weather API.",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
install_requires=["requests", "urllib3"],
)
| 29.6 | 72 | 0.640203 | 64 | 592 | 5.84375 | 0.765625 | 0.080214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012903 | 0.214527 | 592 | 19 | 73 | 31.157895 | 0.791398 | 0 | 0 | 0 | 0 | 0 | 0.535473 | 0.038851 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.055556 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9192df0712738e90f6f197873c3a465c79101722 | 585 | py | Python | tests/llvm/static/test_main_is_found/test_main_is_found.py | ganeshutah/FPChecker | 53a471429762ace13f69733cb2f8b7227fc15b9f | [
"Apache-2.0"
] | 19 | 2019-09-28T16:15:45.000Z | 2022-02-15T15:11:28.000Z | tests/llvm/static/test_main_is_found/test_main_is_found.py | tanmaytirpankar/FPChecker | d3fe4bd9489c5705df58a67dbbc388ac1ebf56bf | [
"Apache-2.0"
] | 16 | 2020-02-01T18:43:00.000Z | 2021-12-22T14:47:39.000Z | tests/llvm/static/test_main_is_found/test_main_is_found.py | tanmaytirpankar/FPChecker | d3fe4bd9489c5705df58a67dbbc388ac1ebf56bf | [
"Apache-2.0"
] | 5 | 2020-07-27T18:15:36.000Z | 2021-11-01T18:43:34.000Z | #!/usr/bin/env python
import subprocess
import os
def setup_module(module):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(THIS_DIR)
def teardown_module(module):
cmd = ["make clean"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
def test_1():
cmd = ["make"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
passed = False
for l in cmdOutput.decode('utf-8').split("\n"):
if "#FPCHECKER: main() found" in l:
passed = True
assert passed == True
| 22.5 | 82 | 0.666667 | 79 | 585 | 4.797468 | 0.56962 | 0.063325 | 0.126649 | 0.158311 | 0.337731 | 0.337731 | 0.337731 | 0.337731 | 0.337731 | 0.337731 | 0 | 0.004274 | 0.2 | 585 | 25 | 83 | 23.4 | 0.805556 | 0.034188 | 0 | 0.125 | 0 | 0 | 0.079929 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 1 | 0.1875 | false | 0.1875 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
91936b7f0195e57ee35ddf84cdb73c2bef559977 | 745 | py | Python | Dynamic_Programming/1259.Integer Replacement/Solution_BFS.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 12 | 2019-05-04T04:21:27.000Z | 2022-03-02T07:06:57.000Z | Dynamic_Programming/1259.Integer Replacement/Solution_BFS.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 1 | 2019-07-24T18:43:53.000Z | 2019-07-24T18:43:53.000Z | Dynamic_Programming/1259.Integer Replacement/Solution_BFS.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 10 | 2019-07-01T04:03:04.000Z | 2022-03-09T03:57:37.000Z | from collections import deque
class Solution:
"""
@param n: a positive integer
@return: the minimum number of replacements
"""
def integerReplacement(self, n):
# Write your code here
steps = 0
if n == 1:
return steps
queue = deque([n])
while queue:
size = len(queue)
print(queue, steps)
for _ in range(size):
num = queue.popleft()
if num == 1:
return steps
if num % 2 == 0:
queue.append(num // 2)
else:
queue.append(num + 1)
queue.append(num - 1)
steps += 1
return 0
| 23.28125 | 47 | 0.436242 | 76 | 745 | 4.263158 | 0.539474 | 0.064815 | 0.12963 | 0.092593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02584 | 0.480537 | 745 | 31 | 48 | 24.032258 | 0.81137 | 0.127517 | 0 | 0.095238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032258 | 0 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.285714 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9197f982af32fc988794515b093dd5bf984c98a5 | 4,132 | py | Python | src/biota_models/vegetation/model/constants_json_create.py | Deltares/NBSDynamics | 4710da529d85b588ea249f6e2b4f4cac132bb34f | [
"MIT"
] | 2 | 2022-01-14T05:02:04.000Z | 2022-03-02T10:42:59.000Z | src/biota_models/vegetation/model/constants_json_create.py | Deltares/NBSDynamics | 4710da529d85b588ea249f6e2b4f4cac132bb34f | [
"MIT"
] | 35 | 2021-11-01T08:59:02.000Z | 2021-11-19T16:47:17.000Z | src/biota_models/vegetation/model/constants_json_create.py | Deltares/NBSDynamics | 4710da529d85b588ea249f6e2b4f4cac132bb34f | [
"MIT"
] | 1 | 2022-03-16T07:11:00.000Z | 2022-03-16T07:11:00.000Z | import json
schema = {
"Spartina": {
"ColStart": "2000-04-01",
"ColEnd": "2000-05-31",
"random": 7,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 20,
"Number LifeStages": 2,
"initial root length": 0.05,
"initial shoot length": 0.015,
"initial diameter": 0.003,
"start growth period": "2000-04-01",
"end growth period": "2000-10-31",
"start winter period": "2000-11-30",
"maximum plant height": [0.8, 1.3],
"maximum diameter": [0.003, 0.005],
"maximum root length": [0.2, 1],
"maximum years in LifeStage": [1, 19],
"numStem": [700, 700], # 3.5. number of stems per m2
"iniCol_frac": 0.6, # 3.6. initial colonization fraction (0-1)
"Cd": [1.1, 1.15], # 3.7. drag coefficient
"desMort_thres": [400, 400], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 0.75], # 3.10. dessication mortality slope
"floMort_thres": [0.4, 0.4], # 3.11. flooding mortality threshold
"floMort_slope": [0.25, 0.25], # 3.12. flooding mortality slope
"vel_thres": [0.15, 0.25], # 3.13. flow velocity threshold
"vel_slope": [3, 3], # 3.14. flow velocity slope
"maxH_winter": [0.4, 0.4], # 3.15 max height during winter time
},
"Salicornia": {
"ColStart": "2000-02-15",
"ColEnd": "2000-04-30",
"random": 20,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 1,
"Number LifeStages": 1,
"initial root length": 0.15,
"initial shoot length": 0.05,
"initial diameter": 0.01,
"start growth period": "2000-02-15",
"end growth period": "2000-10-15",
"start winter period": "2000-11-01",
"maximum plant height": [0.4, 0],
"maximum diameter": [0.015, 0],
"maximum root length": [0.05, 0],
"maximum years in LifeStage": [1, 0],
"numStem": [190, 0], # 3.5. number of stems per m2
"iniCol_frac": 0.2, # 3.6. initial colonization fraction (0-1)
"Cd": [0.7, 0], # 3.7. drag coefficient
"desMort_thres": [400, 1], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 1], # 3.10. dessication mortality slope
"floMort_thres": [0.5, 1], # 3.11. flooding mortality threshold
"floMort_slope": [0.12, 1], # 3.12. flooding mortality slope
"vel_thres": [0.15, 1], # 3.13. flow velocity threshold
"vel_slope": [3, 1], # 3.14. flow velocity slope
"maxH_winter": [0.0, 0.0], # 3.15 max height during winter time
},
"Puccinellia": {
"ColStart": "2000-03-01",
"ColEnd": "2000-04-30",
"random": 7,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 20,
"Number LifeStages": 2,
"initial root length": 0.02,
"initial shoot length": 0.05,
"initial diameter": 0.004,
"start growth period": "2000-03-01",
"end growth period": "2000-11-15",
"start winter period": "2000-11-30",
"maximum plant height": [0.2, 0.35],
"maximum diameter": [0.004, 0.005],
"maximum root length": [0.15, 0.15],
"maximum years in LifeStage": [1, 19],
"numStem": [6500, 6500], # 3.5. number of stems per m2
"iniCol_frac": 0.3, # 3.6. initial colonization fraction (0-1)
"Cd": [0.7, 0.7], # 3.7. drag coefficient
"desMort_thres": [400, 400], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 0.75], # 3.10. dessication mortality slope
"floMort_thres": [0.35, 0.35], # 3.11. flooding mortality threshold
"floMort_slope": [0.4, 0.4], # 3.12. flooding mortality slope
"vel_thres": [0.25, 0.5], # 3.13. flow velocity threshold
"vel_slope": [3, 3], # 3.14. flow velocity slope
"maxH_winter": [0.2, 0.2], # 3.15 max height during winter time
},
}
with open("constants_veg.json", "w") as write_file:
json.dump(schema, write_file, indent=4)
| 43.494737 | 76 | 0.547193 | 580 | 4,132 | 3.841379 | 0.181034 | 0.010772 | 0.010772 | 0.007181 | 0.766158 | 0.706912 | 0.675045 | 0.593357 | 0.426391 | 0.373429 | 0 | 0.153587 | 0.281462 | 4,132 | 94 | 77 | 43.957447 | 0.596834 | 0.232091 | 0 | 0.282609 | 0 | 0 | 0.407702 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.01087 | 0 | 0.01087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91993f87e0ff04f74f7a6f31b278e5b76bf7a8ba | 1,376 | py | Python | Stream-3/Full-Stack-Development/10.Custom-User-And-Email-Authentication/2.Custom-User-Model/auth_demo/accounts/models.py | GunnerJnr/_CodeInstitute | efba0984a3dc71558eef97724c85e274a712798c | [
"MIT"
] | 4 | 2017-10-10T14:00:40.000Z | 2021-01-27T14:08:26.000Z | Stream-3/Full-Stack-Development/10.Custom-User-And-Email-Authentication/2.Custom-User-Model/auth_demo/accounts/models.py | GunnerJnr/_CodeInstitute | efba0984a3dc71558eef97724c85e274a712798c | [
"MIT"
] | 115 | 2019-10-24T11:18:33.000Z | 2022-03-11T23:15:42.000Z | Stream-3/Full-Stack-Development/10.Custom-User-And-Email-Authentication/2.Custom-User-Model/auth_demo/accounts/models.py | GunnerJnr/_CodeInstitute | efba0984a3dc71558eef97724c85e274a712798c | [
"MIT"
] | 5 | 2017-09-22T21:42:39.000Z | 2020-02-07T02:18:11.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser, UserManager
from django.db import models
from django.utils import timezone
# Create your models here.
# Create our new user class
class AccountUserManager(UserManager):
def _create_user(self, username, email, password, is_staff, is_supervisor, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
:param username:
:param email:
:param password:
:param is_staff:
:param is_supervisor:
:param extra_fields:
:return:
"""
now = timezone.now()
if not email:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=email, email=email,
is_staff=is_staff, is_active=True,
is_supervisor=is_supervisor,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self.db)
return user
class User(AbstractUser):
# now that we've abstracted this class we can add any
# number of custom attribute to our user class
# in later units we'll be adding things like payment details!
object = AccountUserManager()
| 32 | 95 | 0.641715 | 167 | 1,376 | 5.155689 | 0.497006 | 0.03252 | 0.031359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001012 | 0.281977 | 1,376 | 42 | 96 | 32.761905 | 0.870445 | 0.303052 | 0 | 0 | 0 | 0 | 0.034325 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0.105263 | 0.210526 | 0 | 0.473684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
919b5dc79a4db8bc0c773739c3eacec33d693967 | 11,973 | py | Python | histoGAN.py | mahmoudnafifi/HistoGAN | 50be1482638ace3ec85d733e849dec494ede155b | [
"MIT"
] | 169 | 2020-11-25T07:42:26.000Z | 2022-03-30T03:08:35.000Z | histoGAN.py | mahmoudnafifi/HistoGAN | 50be1482638ace3ec85d733e849dec494ede155b | [
"MIT"
] | 22 | 2020-12-22T13:14:24.000Z | 2022-03-31T08:41:26.000Z | histoGAN.py | mahmoudnafifi/HistoGAN | 50be1482638ace3ec85d733e849dec494ede155b | [
"MIT"
] | 19 | 2020-11-28T17:28:46.000Z | 2022-02-23T06:09:23.000Z | """
If you find this code useful, please cite our paper:
Mahmoud Afifi, Marcus A. Brubaker, and Michael S. Brown. "HistoGAN:
Controlling Colors of GAN-Generated and Real Images via Color Histograms."
In CVPR, 2021.
@inproceedings{afifi2021histogan,
title={Histo{GAN}: Controlling Colors of {GAN}-Generated and Real Images via
Color Histograms},
author={Afifi, Mahmoud and Brubaker, Marcus A. and Brown, Michael S.},
booktitle={CVPR},
year={2021}
}
"""
from tqdm import tqdm
from histoGAN import Trainer, NanException
from histogram_classes.RGBuvHistBlock import RGBuvHistBlock
from datetime import datetime
import torch
import argparse
from retry.api import retry_call
import os
from PIL import Image
from torchvision import transforms
import numpy as np
SCALE = 1 / np.sqrt(2.0)
def train_from_folder(
data='./dataset/',
results_dir='./results',
models_dir='./models',
name='test',
new=False,
load_from=-1,
image_size=128,
network_capacity=16,
transparent=False,
batch_size=2,
gradient_accumulate_every=8,
num_train_steps=150000,
learning_rate=2e-4,
num_workers=None,
save_every=1000,
generate=False,
save_noise_latent=False,
target_noise_file=None,
target_latent_file=None,
num_image_tiles=8,
trunc_psi=0.75,
fp16=False,
fq_layers=[],
fq_dict_size=256,
attn_layers=[],
hist_method='inverse-quadratic',
hist_resizing='sampling',
hist_sigma=0.02,
hist_bin=64,
hist_insz=150,
alpha=2,
target_hist=None,
aug_prob=0.0,
dataset_aug_prob=0.0,
aug_types=None):
model = Trainer(
name,
results_dir,
models_dir,
batch_size=batch_size,
gradient_accumulate_every=gradient_accumulate_every,
image_size=image_size,
network_capacity=network_capacity,
transparent=transparent,
lr=learning_rate,
num_workers=num_workers,
save_every=save_every,
trunc_psi=trunc_psi,
fp16=fp16,
fq_layers=fq_layers,
fq_dict_size=fq_dict_size,
attn_layers=attn_layers,
hist_insz=hist_insz,
hist_bin=hist_bin,
hist_sigma=hist_sigma,
hist_resizing=hist_resizing,
hist_method=hist_method,
aug_prob=aug_prob,
dataset_aug_prob=dataset_aug_prob,
aug_types=aug_types
)
if not new:
model.load(load_from)
else:
model.clear()
if generate:
now = datetime.now()
timestamp = now.strftime("%m-%d-%Y_%H-%M-%S")
if save_noise_latent and not os.path.exists('temp'):
os.mkdir('./temp')
if save_noise_latent and not os.path.exists(f'./temp/{name}'):
os.mkdir(f'./temp/{name}')
if target_hist is None:
raise Exception('No target histogram or image is given')
extension = os.path.splitext(target_hist)[1]
if extension == '.npy':
hist = np.load(target_hist)
h = torch.from_numpy(hist).to(device=torch.cuda.current_device())
if num_image_tiles > 1:
num_image_tiles = num_image_tiles - num_image_tiles % 2
for i in range(int(np.log2(num_image_tiles))):
h = torch.cat((h, h), dim=0)
samples_name = ('generated-' +
f'{os.path.basename(os.path.splitext(target_hist)[0])}'
f'-{timestamp}')
model.evaluate(samples_name, hist_batch=h,
num_image_tiles=num_image_tiles,
save_noise_latent=save_noise_latent,
load_noise_file=target_noise_file,
load_latent_file=target_latent_file)
print(f'sample images generated at {results_dir}/{name}/{samples_name}')
elif str.lower(extension) == '.jpg' or str.lower(extension) == '.png':
histblock = RGBuvHistBlock(insz=hist_insz, h=hist_bin,
resizing=hist_resizing, method=hist_method,
sigma=hist_sigma,
device=torch.cuda.current_device())
transform = transforms.Compose([transforms.ToTensor()])
img = Image.open(target_hist)
img = torch.unsqueeze(transform(img), dim=0).to(
device=torch.cuda.current_device())
h = histblock(img)
if num_image_tiles > 1:
num_image_tiles = num_image_tiles - num_image_tiles % 2
for i in range(int(np.log2(num_image_tiles))):
h = torch.cat((h, h), dim=0)
samples_name = ('generated-' +
f'{os.path.basename(os.path.splitext(target_hist)[0])}'
f'-{timestamp}')
model.evaluate(samples_name, hist_batch=h,
num_image_tiles=num_image_tiles,
save_noise_latent=save_noise_latent,
load_noise_file=target_noise_file,
load_latent_file=target_latent_file)
print(f'sample images generated at {results_dir}/{name}/{samples_name}')
elif extension == '':
files = [os.path.join(target_hist, f) for f in os.listdir(target_hist) if
os.path.isfile(os.path.join(target_hist, f))]
histblock = RGBuvHistBlock(insz=hist_insz, h=hist_bin,
resizing=hist_resizing, method=hist_method,
sigma=hist_sigma,
device=torch.cuda.current_device())
transform = transforms.Compose([transforms.ToTensor()])
for f in files:
extension = os.path.splitext(f)[1]
if extension == '.npy':
hist = np.load(f)
h = torch.from_numpy(hist).to(device=torch.cuda.current_device())
elif (extension == str.lower(extension) == '.jpg' or str.lower(
extension) == '.png'):
img = Image.open(f)
img = torch.unsqueeze(transform(img), dim=0).to(
device=torch.cuda.current_device())
h = histblock(img)
else:
print(f'Warning: File extension of {f} is not supported.')
continue
if num_image_tiles > 1:
num_image_tiles = num_image_tiles - num_image_tiles % 2
for i in range(int(np.log2(num_image_tiles))):
h = torch.cat((h, h), dim=0)
samples_name = ('generated-' +
f'{os.path.basename(os.path.splitext(f)[0])}'
f'-{timestamp}')
model.evaluate(samples_name, hist_batch=h,
num_image_tiles=num_image_tiles,
save_noise_latent=save_noise_latent,
load_noise_file=target_noise_file,
load_latent_file=target_latent_file)
print(f'sample images generated at {results_dir}/{name}/'
f'{samples_name}')
else:
print('The file extension of target image is not supported.')
raise NotImplementedError
return
print('\nStart training....\n')
print(f'Alpha = {alpha}')
model.set_data_src(data)
for _ in tqdm(range(num_train_steps - model.steps), mininterval=10.,
desc=f'{name}<{data}>'):
retry_call(model.train, fargs=[alpha], tries=3, exceptions=NanException)
if _ % 50 == 0:
model.print_log()
def get_args():
parser = argparse.ArgumentParser(description='Train/Test HistoGAN.')
parser.add_argument('--data', dest='data', default='./dataset/')
parser.add_argument('--results_dir', dest='results_dir',
default='./results_HistoGAN')
parser.add_argument('--models_dir', dest='models_dir', default='./models')
parser.add_argument('--target_hist', dest='target_hist', default=None)
parser.add_argument('--name', dest='name', default='histoGAN_model')
parser.add_argument('--new', dest='new', default=False)
parser.add_argument('--load_from', dest='load_from', default=-1)
parser.add_argument('--image_size', dest='image_size', default=256, type=int)
parser.add_argument('--network_capacity', dest='network_capacity', default=16,
type=int)
parser.add_argument('--transparent', dest='transparent', default=False)
parser.add_argument('--batch_size', dest='batch_size', default=2, type=int)
parser.add_argument('--gradient_accumulate_every',
dest='gradient_accumulate_every', default=8, type=int)
parser.add_argument('--num_train_steps', dest='num_train_steps',
default=1500000, type=int)
parser.add_argument('--learning_rate', dest='learning_rate', default=2e-4,
type=float)
parser.add_argument('--num_workers', dest='num_workers', default=None)
parser.add_argument('--save_every', dest='save_every', default=5000,
type=int)
parser.add_argument('--generate', dest='generate', default=False)
parser.add_argument('--save_noise_latent', dest='save_n_l', default=False)
parser.add_argument('--target_noise_file', dest='target_n', default=None)
parser.add_argument('--target_latent_file', dest='target_l', default=None)
parser.add_argument('--num_image_tiles', dest='num_image_tiles',
default=16, type=int)
parser.add_argument('--trunc_psi', dest='trunc_psi', default=0.75,
type=float)
parser.add_argument('--fp 16', dest='fp16', default=False)
parser.add_argument('--fq_layers', dest='fq_layers', default=[])
parser.add_argument('--fq_dict_size', dest='fq_dict_size', default=256,
type=int)
parser.add_argument('--attn_layers', dest='attn_layers', default=[])
parser.add_argument('--gpu', dest='gpu', default=0, type=int)
parser.add_argument('--hist_bin', dest='hist_bin', default=64, type=int)
parser.add_argument('--hist_insz', dest='hist_insz', default=150, type=int)
parser.add_argument('--hist_method', dest='hist_method',
default='inverse-quadratic')
parser.add_argument('--hist_resizing', dest='hist_resizing',
default='interpolation')
parser.add_argument('--hist_sigma', dest='hist_sigma', default=0.02,
type=float)
parser.add_argument('--alpha', dest='alpha', default=2, type=float)
parser.add_argument('--aug_prob', dest='aug_prob', default=0.0, type=float,
help='Probability of discriminator augmentation. It '
'applies operations specified in --aug_types.')
parser.add_argument('--dataset_aug_prob', dest='dataset_aug_prob',
default=0.0, type=float,
help='Probability of dataset augmentation. It applies '
'random cropping')
parser.add_argument('--aug_types', dest='aug_types',
default=['translation', 'cutout'], nargs='+',
help='Options include: translation, cutout, and color')
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
torch.cuda.set_device(args.gpu)
train_from_folder(
data=args.data,
results_dir=args.results_dir,
models_dir=args.models_dir,
name=args.name,
new=args.new,
load_from=args.load_from,
image_size=args.image_size,
network_capacity=args.network_capacity,
transparent=args.transparent,
batch_size=args.batch_size,
gradient_accumulate_every=args.gradient_accumulate_every,
num_train_steps=args.num_train_steps,
learning_rate=args.learning_rate,
num_workers=args.num_workers,
save_every=args.save_every,
generate=args.generate,
save_noise_latent=args.save_n_l,
target_noise_file=args.target_n,
target_latent_file=args.target_l,
num_image_tiles=args.num_image_tiles,
trunc_psi=args.trunc_psi,
fp16=args.fp16,
fq_layers=args.fq_layers,
fq_dict_size=args.fq_dict_size,
attn_layers=args.attn_layers,
hist_method=args.hist_method,
hist_resizing=args.hist_resizing,
hist_sigma=args.hist_sigma,
hist_bin=args.hist_bin,
hist_insz=args.hist_insz,
target_hist=args.target_hist,
alpha=args.alpha,
aug_prob=args.aug_prob,
dataset_aug_prob=args.dataset_aug_prob,
aug_types=args.aug_types
)
| 39.127451 | 80 | 0.647791 | 1,554 | 11,973 | 4.725869 | 0.158301 | 0.044118 | 0.083333 | 0.023965 | 0.430283 | 0.323393 | 0.306236 | 0.290441 | 0.280093 | 0.259123 | 0 | 0.014808 | 0.227261 | 11,973 | 305 | 81 | 39.255738 | 0.778967 | 0.038336 | 0 | 0.241758 | 1 | 0 | 0.161669 | 0.02512 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007326 | false | 0 | 0.040293 | 0 | 0.054945 | 0.029304 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
919b5e557651de3e6e934fa6c4b16a3e517ceea9 | 501 | py | Python | apps/careeropportunity/migrations/0003_careeropportunity_deadline.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 32 | 2017-02-22T13:38:38.000Z | 2022-03-31T23:29:54.000Z | apps/careeropportunity/migrations/0003_careeropportunity_deadline.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 694 | 2017-02-15T23:09:52.000Z | 2022-03-31T23:16:07.000Z | apps/careeropportunity/migrations/0003_careeropportunity_deadline.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 35 | 2017-09-02T21:13:09.000Z | 2022-02-21T11:30:30.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-05 18:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("careeropportunity", "0002_careeropportunity_job_type")]
operations = [
migrations.AddField(
model_name="careeropportunity",
name="deadline",
field=models.DateField(blank=True, null=True, verbose_name="søknadsfrist"),
)
]
| 26.368421 | 87 | 0.670659 | 54 | 501 | 6.037037 | 0.759259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053435 | 0.215569 | 501 | 18 | 88 | 27.833333 | 0.776081 | 0.135729 | 0 | 0 | 1 | 0 | 0.197674 | 0.072093 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
919c72f34a550015e3cadb40b602759ce1ee194d | 14,482 | py | Python | benchmark/python/ffi/benchmark_ffi.py | grygielski/incubator-mxnet | 45952e21a35e32a04b7607b121085973369a42db | [
"BSL-1.0",
"Apache-2.0"
] | 211 | 2016-06-06T08:32:36.000Z | 2021-07-03T16:50:16.000Z | benchmark/python/ffi/benchmark_ffi.py | grygielski/incubator-mxnet | 45952e21a35e32a04b7607b121085973369a42db | [
"BSL-1.0",
"Apache-2.0"
] | 42 | 2017-01-05T02:45:13.000Z | 2020-08-11T23:45:27.000Z | benchmark/python/ffi/benchmark_ffi.py | grygielski/incubator-mxnet | 45952e21a35e32a04b7607b121085973369a42db | [
"BSL-1.0",
"Apache-2.0"
] | 58 | 2016-10-27T07:37:08.000Z | 2021-07-03T16:50:17.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import timeit
import itertools
import argparse
import os
class OpArgMngr(object):
"""Operator argument manager for storing operator workloads."""
args = {}
@staticmethod
def add_workload(funcname, *args, **kwargs):
if "_specifier" not in kwargs:
_specifier = funcname
else:
_specifier = kwargs["_specififer"]
del kwargs["_specififer"]
if _specifier in OpArgMngr.args:
raise ValueError("duplicate {}".format(_specifier))
OpArgMngr.args[_specifier] = {'args': args, 'kwargs': kwargs, 'funcname': funcname}
def generate_workloads():
array_pool = {}
shapes = []
for ndim in range(4):
shapes.extend(list(itertools.product(range(4), repeat=ndim)))
for shape in shapes:
name = 'x'.join(str(i) for i in shape)
if name in array_pool:
raise ValueError("duplicate array {}".format(name))
array_pool[name] = dnp.ones(shape)
return array_pool
def prepare_workloads():
pool = generate_workloads()
OpArgMngr.add_workload("zeros", (2, 2))
OpArgMngr.add_workload("full", (2, 2), 10)
OpArgMngr.add_workload("identity", 3)
OpArgMngr.add_workload("ones", (2, 2))
OpArgMngr.add_workload("einsum", "ii", pool['2x2'], optimize=False)
OpArgMngr.add_workload("unique", pool['1'], return_index=True, return_inverse=True, return_counts=True, axis=-1)
OpArgMngr.add_workload("dstack", (pool['2x1'], pool['2x1'], pool['2x1'], pool['2x1']))
OpArgMngr.add_workload("polyval", dnp.arange(10), pool['2x2'])
OpArgMngr.add_workload("ediff1d", pool['2x2'], pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("nan_to_num", pool['2x2'])
OpArgMngr.add_workload("tri", 2, 3, 4)
OpArgMngr.add_workload("tensordot", pool['2x2'], pool['2x2'], ((1, 0), (0, 1)))
OpArgMngr.add_workload("cumsum", pool['3x2'], axis=0, out=pool['3x2'])
OpArgMngr.add_workload("random.shuffle", pool['3'])
OpArgMngr.add_workload("equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("not_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("less", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("greater_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("less_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("maximum", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("minimum", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("sum", pool['2x2'], axis=0, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("std", pool['2x2'], axis=0, ddof=0, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("var", pool['2x2'], axis=0, ddof=1, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("average", pool['2x2'], weights=pool['2'], axis=1, returned=True)
OpArgMngr.add_workload("histogram", pool['2x2'], bins=10, range=(0.0, 10.0))
OpArgMngr.add_workload("add", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("cross", pool['2'], pool['2'])
OpArgMngr.add_workload("linalg.eig", pool['3x3'])
OpArgMngr.add_workload("linalg.eigh", pool['3x3'])
OpArgMngr.add_workload("linalg.det", pool['3x3'])
OpArgMngr.add_workload("linalg.slogdet", pool['3x3'])
OpArgMngr.add_workload("linalg.matrix_rank", pool['3x3'], pool['1'], hermitian=False)
OpArgMngr.add_workload("linalg.svd", pool['3x3'])
OpArgMngr.add_workload("linalg.cholesky", pool['1x1'])
OpArgMngr.add_workload("linalg.qr", pool['3x3'])
OpArgMngr.add_workload("linalg.lstsq", pool['2x1'], pool['2'], rcond=None)
OpArgMngr.add_workload("linalg.eigvals", pool['1x1'])
OpArgMngr.add_workload("linalg.eigvalsh", pool['1x1'], UPLO='L')
OpArgMngr.add_workload("linalg.inv", pool['1x1'])
OpArgMngr.add_workload("linalg.pinv", pool['2x3x3'], pool['1'], hermitian=False)
OpArgMngr.add_workload("linalg.solve", pool['1x1'], pool['1'])
OpArgMngr.add_workload("linalg.tensorinv", pool['1x1'], ind=2)
OpArgMngr.add_workload("linalg.norm", pool['3x3'])
OpArgMngr.add_workload("linalg.tensorsolve", pool['1x1x1'], pool['1x1x1'], (2, 0, 1))
OpArgMngr.add_workload("tile", pool['2x2'], 1)
OpArgMngr.add_workload("trace", pool['2x2'])
OpArgMngr.add_workload("transpose", pool['2x2'])
OpArgMngr.add_workload("split", pool['3x3'], (0, 1, 2), axis=1)
OpArgMngr.add_workload("vstack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("argmax", pool['3x2'], axis=-1)
OpArgMngr.add_workload("argmin", pool['3x2'], axis=-1)
OpArgMngr.add_workload("atleast_1d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("atleast_2d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("atleast_3d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("argsort", pool['3x2'], axis=-1)
OpArgMngr.add_workload("sort", pool['3x2'], axis=-1)
OpArgMngr.add_workload("indices", dimensions=(1, 2, 3))
OpArgMngr.add_workload("subtract", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("multiply", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mod", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("remainder", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("divide", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("true_divide", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("power", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("lcm", pool['2x2'].astype('int32'), pool['2x2'].astype('int32'))
OpArgMngr.add_workload("diff", pool['2x2'], n=1, axis=-1)
OpArgMngr.add_workload("inner", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("random.multinomial", n=2, pvals=[1/6.]*6, size=(2,2))
OpArgMngr.add_workload("random.rand", 3, 2)
OpArgMngr.add_workload("random.randn", 2, 2)
OpArgMngr.add_workload("nonzero", pool['2x2'])
OpArgMngr.add_workload("tril", pool['2x2'], k=0)
OpArgMngr.add_workload("random.choice", pool['2'], size=(2, 2))
OpArgMngr.add_workload("take", pool['2'], dnp.array([1,0], dtype='int64'))
OpArgMngr.add_workload("clip", pool['2x2'], 0, 1)
OpArgMngr.add_workload("expand_dims", pool['2x2'], axis=0)
OpArgMngr.add_workload("broadcast_to", pool['2x2'], (2, 2, 2))
OpArgMngr.add_workload("full_like", pool['2x2'], 2)
OpArgMngr.add_workload("zeros_like", pool['2x2'])
OpArgMngr.add_workload("ones_like", pool['2x2'])
OpArgMngr.add_workload("bitwise_and", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("bitwise_xor", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("bitwise_or", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("copysign", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("arctan2", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("hypot", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("ldexp", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("logical_and", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("logical_or", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("logical_xor", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("random.uniform", low=0, high=1, size=1)
OpArgMngr.add_workload("random.exponential", scale=2, size=(2,2))
OpArgMngr.add_workload("random.rayleigh", scale=2, size=(2,2))
OpArgMngr.add_workload("random.weibull", a=2, size=(2,2))
OpArgMngr.add_workload("random.pareto", a=2, size=(2,2))
OpArgMngr.add_workload("random.power", a=2, size=(2,2))
OpArgMngr.add_workload("random.logistic", loc=2, scale=2, size=(2,2))
OpArgMngr.add_workload("random.gumbel", loc=2, scale=2, size=(2,2))
OpArgMngr.add_workload("where", pool['2x3'], pool['2x3'], pool['2x1'])
OpArgMngr.add_workload("may_share_memory", pool['2x3'][:0], pool['2x3'][:1])
OpArgMngr.add_workload('squeeze', pool['2x2'], axis=None)
OpArgMngr.add_workload("pad", pool['2x2'], pad_width=((1,2),(1,2)), mode="constant")
OpArgMngr.add_workload("prod", pool['2x2'], axis=1, dtype="float64", keepdims=False)
OpArgMngr.add_workload("around", pool['2x2'], decimals=0)
OpArgMngr.add_workload("round", pool['2x2'], decimals=1)
OpArgMngr.add_workload("repeat", pool['2x2'], repeats=1, axis=None)
OpArgMngr.add_workload("diagflat", pool['2x2'], k=1)
OpArgMngr.add_workload("diag", pool['2x2'], k=1)
OpArgMngr.add_workload("diagonal", pool['2x2x2'], offset=-1, axis1=0, axis2=1)
OpArgMngr.add_workload("diag_indices_from", pool['2x2'])
OpArgMngr.add_workload("bincount", dnp.arange(3, dtype=int), pool['3'], minlength=4)
OpArgMngr.add_workload("percentile", pool['2x2x2'], 80, axis=0, out=pool['2x2'],\
interpolation='midpoint')
OpArgMngr.add_workload("quantile", pool['2x2x2'], 0.8, axis=0, out=pool['2x2'],\
interpolation='midpoint')
OpArgMngr.add_workload("all", pool['2x2x2'], axis=(0, 1),\
out=dnp.array([False, False], dtype=bool), keepdims=False)
OpArgMngr.add_workload("any", pool['2x2x2'], axis=(0, 1),\
out=dnp.array([False, False], dtype=bool), keepdims=False)
OpArgMngr.add_workload("roll", pool["2x2"], 1, axis=0)
OpArgMngr.add_workload("rot90", pool["2x2"], 2)
OpArgMngr.add_workload("column_stack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("hstack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("triu", pool['3x3'])
OpArgMngr.add_workload("array_split", pool['2x2'], 2, axis=1)
OpArgMngr.add_workload("vsplit", pool['2x2'], 2)
OpArgMngr.add_workload("hsplit", pool['2x2'], 2)
OpArgMngr.add_workload("dsplit", pool['2x2x2'], 2)
OpArgMngr.add_workload("arange", 10)
OpArgMngr.add_workload("concatenate", (pool['1x2'], pool['1x2'], pool['1x2']), axis=0)
OpArgMngr.add_workload("append", pool['2x2'], pool['1x2'], axis=0)
OpArgMngr.add_workload("insert", pool['3x2'], 1, pool['1x1'], axis=0)
OpArgMngr.add_workload("delete", pool['3x2'], 1, axis=0)
OpArgMngr.add_workload("blackman", 12)
OpArgMngr.add_workload("eye", 5)
OpArgMngr.add_workload("hamming", 12)
OpArgMngr.add_workload("hanning", 12)
OpArgMngr.add_workload("linspace", 0, 10, 8, endpoint=False)
OpArgMngr.add_workload("logspace", 2.0, 3.0, num=4, base=2.0, dtype=onp.float32)
OpArgMngr.add_workload("matmul", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mean", pool['2x2'], axis=0, keepdims=True)
OpArgMngr.add_workload("random.gamma", 1, size=(2, 3))
OpArgMngr.add_workload("random.normal", 1, size=(2, 3))
OpArgMngr.add_workload("max", pool["2x2"], axis=0, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("min", pool["2x2"], axis=0, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("amax", pool["2x2"], axis=1, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("amin", pool["2x2"], axis=1, out=pool['2'], keepdims=False)
unary_ops = ['negative', 'reciprocal', 'abs', 'sign', 'rint', 'ceil', 'floor',
'bitwise_not', 'trunc', 'fix', 'square', 'sqrt', 'cbrt', 'exp',
'log', 'log10', 'log2', 'log1p', 'expm1', 'logical_not', 'isnan',
'isinf', 'isposinf', 'isneginf', 'isfinite', 'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan', 'degrees', 'radians', 'sinh', 'cosh',
'tanh', 'arcsinh', 'arccosh', 'arctanh'] # 'rad2deg', 'deg2rad' cannot run without tvm
for unary_op in unary_ops:
if unary_op == "bitwise_not":
OpArgMngr.add_workload(unary_op, dnp.ones((2, 2), dtype=int))
else:
OpArgMngr.add_workload(unary_op, pool['2x2'])
def benchmark_helper(f, *args, **kwargs):
number = 10000
return timeit.timeit(lambda: f(*args, **kwargs), number=number) / number
def get_op(module, funcname):
funcname = funcname.split(".")
for fname in funcname:
module = getattr(module, fname)
return module
def run_benchmark(packages):
results = {}
for (k, v) in OpArgMngr.args.items():
result = {}
for (name, package) in packages.items():
print('{}.{} running...'.format(name, k))
op = get_op(package["module"], v["funcname"])
args = [package["data"](arg) for arg in v["args"]]
kwargs = {k: package["data"](v) for (k, v) in v["kwargs"].items()}
benchmark = benchmark_helper(op, *args, **kwargs)
result[name] = benchmark
results[k] = result
return results
def show_results(results):
print("{:>24}{:>24}{:>24}".format("name", "package", "time(us)"))
for (specifier, d) in results.items():
for (k, v) in d.items():
print("{:>24}{:>24}{:>24}".format(specifier, k, v * 10 ** 6))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ffi_type')
parsed = parser.parse_args()
if parsed.ffi_type == "cython":
os.environ['MXNET_ENABLE_CYTHON'] = '1'
os.environ['MXNET_ENFORCE_CYTHON'] = '1'
elif parsed.ffi_type == "ctypes":
os.environ['MXNET_ENABLE_CYTHON'] = '0'
else:
raise ValueError("unknown ffi_type {}",format(parsed.ffi_type))
os.environ["MXNET_ENGINE_TYPE"] = "NaiveEngine"
import mxnet as mx
import numpy as onp
from mxnet import np as dnp
mx.npx.set_np(dtype=False)
packages = {
"onp": {
"module": onp,
"data": lambda arr: arr.asnumpy() if isinstance(arr, dnp.ndarray) else arr
},
"dnp": {
"module": dnp,
"data": lambda arr: arr
}
}
prepare_workloads()
results = run_benchmark(packages)
show_results(results)
| 51.90681 | 116 | 0.646596 | 1,922 | 14,482 | 4.74974 | 0.221124 | 0.177128 | 0.31986 | 0.072845 | 0.459634 | 0.367729 | 0.288531 | 0.193121 | 0.132873 | 0.073173 | 0 | 0.048544 | 0.160751 | 14,482 | 278 | 117 | 52.093525 | 0.702567 | 0.059039 | 0 | 0.028807 | 0 | 0 | 0.172216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028807 | false | 0 | 0.028807 | 0 | 0.082305 | 0.012346 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91a824d6a95f0e9a4a572ff289971a58109b3c3c | 3,887 | py | Python | test/present.py | jchampio/apache-websocket | 18ad4ae2fc99381b8d75785f492a479f789b322b | [
"Apache-2.0"
] | 8 | 2015-09-10T21:49:25.000Z | 2022-02-02T04:39:00.000Z | test/present.py | jchampio/apache-websocket | 18ad4ae2fc99381b8d75785f492a479f789b322b | [
"Apache-2.0"
] | 34 | 2015-09-10T21:40:09.000Z | 2020-09-04T22:16:08.000Z | test/present.py | jchampio/apache-websocket | 18ad4ae2fc99381b8d75785f492a479f789b322b | [
"Apache-2.0"
] | 5 | 2016-01-22T05:16:54.000Z | 2017-10-18T12:28:02.000Z | #! /usr/bin/env python
#
# Presents the results of an Autobahn TestSuite run in TAP format.
#
# Copyright 2015 Jacob Champion
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import StrictVersion
import json
import os.path
import sys
import textwrap
import yamlish
def filter_report(report):
"""Filters a test report dict down to only the interesting keys."""
INTERESTING_KEYS = [
'behavior',
'behaviorClose',
'expected',
'received',
'expectedClose',
'remoteCloseCode'
]
return { key: report[key] for key in INTERESTING_KEYS }
def prepare_description(report):
"""Constructs a description from a test report."""
raw = report['description']
# Wrap to at most 80 characters.
wrapped = textwrap.wrap(raw, 80)
description = wrapped[0]
if len(wrapped) > 1:
# If the text is longer than one line, add an ellipsis.
description += '...'
return description
#
# MAIN
#
# Read the index.
results_dir = 'test-results'
with open(os.path.join(results_dir, 'index.json'), 'r') as index_file:
index = json.load(index_file)['AutobahnPython']
# Sort the tests by numeric ID so we print them in a sane order.
test_ids = list(index.keys())
test_ids.sort(key=StrictVersion)
# Print the TAP header.
print('TAP version 13')
print('1..{0!s}'.format(len(test_ids)))
count = 0
skipped_count = 0
failed_count = 0
for test_id in test_ids:
count += 1
passed = True
skipped = False
report = None
result = index[test_id]
# Try to get additional information from this test's report file.
try:
path = os.path.join(results_dir, result['reportfile'])
with open(path, 'r') as f:
report = json.load(f)
description = prepare_description(report)
except Exception as e:
description = '[could not load report file: {0!s}]'.format(e)
test_result = result['behavior']
close_result = result['behaviorClose']
# Interpret the result for this test.
if test_result != 'OK' and test_result != 'INFORMATIONAL':
if test_result == 'UNIMPLEMENTED':
skipped = True
else:
passed = False
elif close_result != 'OK' and close_result != 'INFORMATIONAL':
passed = False
# Print the TAP result.
print(u'{0} {1} - [{2}] {3}{4}'.format('ok' if passed else 'not ok',
count,
test_id,
description,
' # SKIP unimplemented' if skipped
else ''))
# Print a YAMLish diagnostic for failed tests.
if report and not passed:
output = filter_report(report)
diagnostic = yamlish.dumps(output)
for line in diagnostic.splitlines():
print(' ' + line)
if not passed:
failed_count += 1
if skipped:
skipped_count += 1
# Print a final result.
print('# Autobahn|TestSuite {0}'.format('PASSED' if not failed_count else 'FAILED'))
print('# total {0}'.format(count))
print('# passed {0}'.format(count - failed_count - skipped_count))
print('# skipped {0}'.format(skipped_count))
print('# failed {0}'.format(failed_count))
exit(0 if not failed_count else 1)
| 28.792593 | 84 | 0.623874 | 496 | 3,887 | 4.818548 | 0.362903 | 0.025105 | 0.010879 | 0.013389 | 0.033473 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013107 | 0.273733 | 3,887 | 134 | 85 | 29.007463 | 0.83351 | 0.289426 | 0 | 0.026316 | 0 | 0 | 0.141492 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0.105263 | 0.078947 | 0 | 0.131579 | 0.118421 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
91a977f44ca6b26789c3c66246a46fa0280ee2a7 | 1,143 | py | Python | softwarecollections/scls/migrations/0004_other_repos_default_values.py | WEBZCC/softwarecollections | efee5c3c276033d526a0cdba504d43deff71581e | [
"BSD-3-Clause"
] | 39 | 2016-12-24T02:57:55.000Z | 2022-02-15T09:29:43.000Z | softwarecollections/scls/migrations/0004_other_repos_default_values.py | WEBZCC/softwarecollections | efee5c3c276033d526a0cdba504d43deff71581e | [
"BSD-3-Clause"
] | 32 | 2016-11-21T15:05:07.000Z | 2021-12-06T11:52:32.000Z | softwarecollections/scls/migrations/0004_other_repos_default_values.py | WEBZCC/softwarecollections | efee5c3c276033d526a0cdba504d43deff71581e | [
"BSD-3-Clause"
] | 13 | 2016-12-14T10:42:22.000Z | 2022-01-01T20:35:15.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scls', '0003_other_repos'),
]
operations = [
migrations.AlterField(
model_name='otherrepo',
name='arch',
field=models.CharField(default='', blank=True, verbose_name='Architecture', max_length=20),
),
migrations.AlterField(
model_name='otherrepo',
name='command',
field=models.TextField(default='', blank=True, verbose_name='Command'),
),
migrations.AlterField(
model_name='otherrepo',
name='icon',
field=models.CharField(default='', blank=True, verbose_name='Icon', choices=[('centos', 'centos'), ('epel', 'epel'), ('fedora', 'fedora'), ('rhel', 'rhel')], max_length=20),
),
migrations.AlterField(
model_name='otherrepo',
name='version',
field=models.CharField(default='', blank=True, verbose_name='Distribution version', max_length=20),
),
]
| 32.657143 | 185 | 0.582677 | 107 | 1,143 | 6.056075 | 0.420561 | 0.123457 | 0.154321 | 0.179012 | 0.552469 | 0.510802 | 0.381173 | 0.381173 | 0.16358 | 0 | 0 | 0.013095 | 0.265092 | 1,143 | 34 | 186 | 33.617647 | 0.758333 | 0.018373 | 0 | 0.428571 | 0 | 0 | 0.14375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91ab6aa12f229c7b9ddab5414461949479dfe028 | 787 | py | Python | plugins/polio/migrations/0029_campaign_country.py | BLSQ/iaso-copy | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | [
"MIT"
] | 29 | 2020-12-26T07:22:19.000Z | 2022-03-07T13:40:09.000Z | plugins/polio/migrations/0029_campaign_country.py | BLSQ/iaso-copy | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | [
"MIT"
] | 150 | 2020-11-09T15:03:27.000Z | 2022-03-07T15:36:07.000Z | plugins/polio/migrations/0029_campaign_country.py | BLSQ/iaso | 95c8087c0182bdd576598eb8cd39c440e58e15d7 | [
"MIT"
] | 4 | 2020-11-09T10:38:13.000Z | 2021-10-04T09:42:47.000Z | # Generated by Django 3.1.13 on 2021-10-04 11:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("iaso", "0107_auto_20211001_1845"),
("polio", "0028_remove_campaign_budget_first_draft_submitted_at"),
]
operations = [
migrations.AddField(
model_name="campaign",
name="country",
field=models.ForeignKey(
blank=True,
help_text="Country for campaign, set automatically from initial_org_unit",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="campaigns_country",
to="iaso.orgunit",
),
),
]
| 28.107143 | 90 | 0.590851 | 83 | 787 | 5.385542 | 0.698795 | 0.053691 | 0.06264 | 0.098434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066298 | 0.310038 | 787 | 27 | 91 | 29.148148 | 0.756906 | 0.05845 | 0 | 0.095238 | 1 | 0 | 0.255751 | 0.101489 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91ad7c273462430b62373174e1161a8ff1416f63 | 715 | py | Python | atcoder/corp/codethxfes2014a_e.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | 1 | 2018-11-12T15:18:55.000Z | 2018-11-12T15:18:55.000Z | atcoder/corp/codethxfes2014a_e.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | atcoder/corp/codethxfes2014a_e.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | r, c, m = map(int, input().split())
n = int(input())
op = [list(map(lambda x: int(x) - 1, input().split())) for _ in range(n)]
board = [[0 for _ in range(c)] for _ in range(r)]
for ra, rb, ca, cb in op:
for j in range(ra, rb + 1):
for k in range(ca, cb + 1):
board[j][k] += 1
cnt = 0
for i in range(r):
for j in range(c):
board[i][j] %= 4
if board[i][j] == 0:
cnt += 1
for i in range(n):
ra, rb, ca, cb = op[i]
cnti = cnt
for j in range(ra, rb + 1):
for k in range(ca, cb + 1):
if board[j][k] == 0:
cnti -= 1
elif board[j][k] == 1:
cnti += 1
if cnti == m:
print(i + 1)
| 25.535714 | 73 | 0.439161 | 129 | 715 | 2.410853 | 0.24031 | 0.22508 | 0.096463 | 0.106109 | 0.205788 | 0.205788 | 0.205788 | 0.205788 | 0.205788 | 0.205788 | 0 | 0.036036 | 0.379021 | 715 | 27 | 74 | 26.481481 | 0.664414 | 0 | 0 | 0.16 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.04 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91ae1121ab522c5ec74869736cdca27ee08ca053 | 3,080 | py | Python | halmodule.py | richteer/pyfatafl | 1faddcf5d9eb36cbc6952b9a8e8bb899989f7112 | [
"MIT"
] | null | null | null | halmodule.py | richteer/pyfatafl | 1faddcf5d9eb36cbc6952b9a8e8bb899989f7112 | [
"MIT"
] | null | null | null | halmodule.py | richteer/pyfatafl | 1faddcf5d9eb36cbc6952b9a8e8bb899989f7112 | [
"MIT"
] | null | null | null | from module import XMPPModule
import halutils
import pyfatafl
class Game():
self.players = []
self.xmpp = None
self.b = None
self.turn = ""
self.mod = None
def __init__(self, mod, p1, p2):
self.players = [p1, p2]
self.mod = mod
self.xmpp = mod.xmpp
self.xmpp.sendMsg(p2, "You have been challenged to play Hnefatafl by {}, reply with '!hnefatafl accept' to begin!".format(p1))
def begin():
# Send initial board state
self.b = hnefatafl.Board()
self.turn = False # For now, make the challenger be first
self._sendBoard()
def _sendBoard(self)
for i in players:
self.xmpp.sendMsg(i, self.b.getPtBoard() + "\n\n" + "It is '{}''s ({}) turn".format(self.players[self.turn]), "white" if self.turn else "black")
def msg(player, string):
if player != self.players[self.turn]:
self.xmpp.sendMsg(player, "Sorry, it is not your turn!")
m = hnefatafl.Move()
string = "{} {}".format("w" if self.turn else "b", string)
try:
m.parse(string, self.b)
except:
self.xmpp.sendMsg(player, "Invalid move format, see !help hnefatafl")
try:
self.b.move(m)
self._sendBoard()
except Exception as e: # TODO: Have been errors
self.xmpp.sendMsg(player, str(e))
if self.over:
for i in self.players:
self.xmpp.sendMsg(i, "Game over! {} wins!".format(self.b.over))
del self.mod.sessions[i]
# Commented to avoid loading before its ready
class Hnefatafl(XMPPModule):
sessions = {}
def recvMsg(self, msg):
cmd, args = halutils.splitArgList(msg)
if cmd == "!hnefatafl":
if args[0] == "challenge":
if len(args) != 2:
self.xmpp.reply(msg, "Need to the JID of a target")
return
elif arg[1] == msg['body'].bare:
self.xmpp.reply(msg, "You can't challenge yourself...")
# TODO: Validate JID here
g = Game(self, msg['from'].bare, args[1])
self.sessions[msg['from']].bare = g
self.sessions[args[1]] = g
self.xmpp.reply(msg, "Challenge sent!")
elif args[0] == "accept":
if msg['from'].bare not in self.sessions:
self.xmpp.reply(msg, "You have not been challenged!")
return
self.sessions[msg['from'].bare].begin()
elif args[0] == "surrender":
if msg['from'].bare not in self.sessions:
self.xmpp.reply(msg, "You aren't currently in a session")
return
for p in [p for p in self.sessions[msg['from'].bare].players]:
del self.sessions[p]
elif msg['from'].bare in sessions:
self.sessions[msg['from'].bare].msg(msg['from'].bare, msg['body'])
def help(self, string):
if string in ["!hnefatafl", "hnefatafl"]:
return '''
usage: !hnefatafl <command> [arg]
Commands:
challenge <jid> - Send a challenge to JID
accept - Accept a challenge from JID, and begin game
surrender - Surrender the game
'''
return '''
Hnefatafl by XMPP! Play a game against someone through this bot.
Features:
!hnefatafl - Command to challenge, accept, and surrender games
Note: This module will ignore any MUC messages, or other indirect messages
Another Note: This will likely be unplayable if not using a monospace font :)
'''
| 29.615385 | 147 | 0.656494 | 461 | 3,080 | 4.370933 | 0.314534 | 0.051613 | 0.049132 | 0.039702 | 0.126551 | 0.048635 | 0.048635 | 0.048635 | 0.048635 | 0.048635 | 0 | 0.005255 | 0.196753 | 3,080 | 103 | 148 | 29.902913 | 0.809216 | 0.049675 | 0 | 0.156627 | 0 | 0 | 0.321465 | 0 | 0 | 0 | 0 | 0.009709 | 0 | 0 | null | null | 0 | 0.036145 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91b495763107bc2ceb225b3984a8b4ffae309299 | 2,914 | py | Python | data_converter/data_converter.py | jkchen2/JshBot-plugins | b5999fecf0df067e34673ff193dcfbf8c7e2fde2 | [
"MIT"
] | 1 | 2021-08-09T19:28:49.000Z | 2021-08-09T19:28:49.000Z | data_converter/data_converter.py | jkchen2/JshBot-plugins | b5999fecf0df067e34673ff193dcfbf8c7e2fde2 | [
"MIT"
] | null | null | null | data_converter/data_converter.py | jkchen2/JshBot-plugins | b5999fecf0df067e34673ff193dcfbf8c7e2fde2 | [
"MIT"
] | 2 | 2017-07-14T00:15:54.000Z | 2019-03-02T09:46:21.000Z | import discord
from jshbot import utilities, data, configurations, plugins, logger
from jshbot.exceptions import BotException, ConfiguredBotException
from jshbot.commands import (
Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)
__version__ = '0.1.0'
CBException = ConfiguredBotException('0.3 to 0.4 plugin')
@plugins.command_spawner
def get_commands(bot):
return [Command('convertdata', hidden=True, elevated_level=3)]
async def get_response(bot, context):
for guild in bot.guilds:
convert_core(bot, guild)
if 'tags.py' in bot.plugins:
convert_tags(bot, guild)
return Response("Converted.")
def convert_core(bot, guild):
if data.get(bot, 'core', None, guild_id=guild.id):
logger.warn("Guild %s (%s) already had core converted", guild.name, guild.id)
return
base_data = data.get(bot, 'base', None, guild_id=guild.id, default={})
if 'disabled' in base_data:
# TODO: Iterate through toggled commands
pass
if 'blocked' in base_data:
replacement = []
for entry in base_data['blocked']:
replacement.append(int(entry))
base_data['blocked'] = replacement
if 'muted_channels' in base_data:
replacement = []
for entry in base_data['muted_channels']:
replacement.append(int(entry))
base_data['muted_channels'] = replacement
if 'moderators' in base_data:
del base_data['moderators']
if base_data:
for key, value in base_data.items():
data.add(bot, 'core', key, value, guild_id=guild.id)
data.remove(bot, 'base', None, guild_id=guild.id)
def convert_tags(bot, guild):
if not data.get(bot, 'tags.py', 'tags', guild_id=guild.id):
logger.warn("Guild %s (%s) already had tags converted", guild.name, guild.id)
return
tags = data.get(bot, 'tags.py', 'tags', guild_id=guild.id, default={})
add_tag = bot.plugins['tags.py']._add_tag
#key,value,length,volume,name,flags,author,hits,created,last_used,last_used_by,complex,extra
for key, tag in tags.items():
to_insert = [
key, # key
tag['value'], # value
tag['length'], # length
tag['volume'], # volume
tag['name'], # name
tag['flags'], # flags
int(tag['author']), # author
tag['hits'], # hits
int(tag['created']), # created
int(tag['last_used']), # last_used
None, # last_used_by
{}, # complex
{} # extra
]
add_tag(bot, to_insert, guild.id)
data.remove(bot, 'tags.py', 'tags', guild_id=guild.id, safe=True)
| 38.853333 | 96 | 0.576527 | 351 | 2,914 | 4.652422 | 0.273504 | 0.072872 | 0.051439 | 0.060012 | 0.363748 | 0.256583 | 0.1782 | 0.147581 | 0.131047 | 0.083282 | 0 | 0.003923 | 0.300275 | 2,914 | 74 | 97 | 39.378378 | 0.796959 | 0.075841 | 0 | 0.095238 | 0 | 0 | 0.122899 | 0 | 0 | 0 | 0 | 0.013514 | 0 | 1 | 0.047619 | false | 0.015873 | 0.063492 | 0.015873 | 0.174603 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91b880c2b2d9577a02c8519251133c3cee61564c | 14,894 | py | Python | codes/elastoplasticity_spectralAnalysis/planeStress/slowWavePlaneStressSigDriven.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | 1 | 2021-06-18T14:52:03.000Z | 2021-06-18T14:52:03.000Z | codes/elastoplasticity_spectralAnalysis/planeStress/slowWavePlaneStressSigDriven.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | 1 | 2019-01-07T13:11:11.000Z | 2019-01-07T13:11:11.000Z | codes/elastoplasticity_spectralAnalysis/planeStress/slowWavePlaneStressSigDriven.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | null | null | null | # !\usr\bin\python
import numpy as np
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import scipy.optimize
from matplotlib import animation
from scipy.integrate import ode
import pdb
# Material parameters
rho = 7800.
E = 2.e11
nu = 0.3
mu = 0.5*E/(1.+nu)
kappa = E/(3.*(1.-2.*nu))
lamb = kappa-2.*mu/3.
sigy = 100.0e6
H = 100.08e6
beta=(6.*mu**2)/(3.*mu+H)
def tangentModulus(sigma,lamb,mu,beta,tangent):
H=np.zeros((3,3))
# |H1111 H1112 H1122|
# H =|H1211 H1212 H1222|
# |H2211 H2212 H2222|
# sigma = [sig11 , sig12 , sig22 , sig33 ]
sigDev = computeDeviatoricPart(sigma)
sigdnorm2=np.dot(sigDev,sigDev)
BETA=beta/sigdnorm2
s11=sigDev[0];s12=sigDev[1]/np.sqrt(2.);s22=sigDev[2];s33=sigDev[3]
## Plane stress tangent modulus Hijkl = Hijkl - Hij33*H33kl/H3333
H1133=(lamb -BETA*s11*s33)
H1233=(-BETA*s12*s33)
H1122=(lamb -BETA*s11*s22)
H2222=(lamb+2.*mu -BETA*s22**2)
H1222=(-BETA*s12*s22)
H2233=(lamb-BETA*s22*s33)
H3333=(lamb+2.*mu-BETA*s33*s33)
if tangent=='planeStress':
H[0,0]=lamb+2.*mu - BETA*s11**2 -H1133*H1133/H3333
H[0,1]=-BETA*s11*s12 -H1133*H1233/H3333
H[0,2]=lamb-BETA*s11*s22 -H1133*H2233/H3333
H[1,0]=-BETA*s12*s11-H1233*H1133/H3333
H[1,1]=mu-BETA*s12**2 -H1233*H1233/H3333
H[1,2]=-BETA*s12*s22-H1233*H2233/H3333
H[2,0]=lamb - BETA*s11*s22 -H2233*H1133/H3333
H[2,1]=-BETA*s22*s12 -H2233*H1233/H3333
H[2,2]=lamb+2.*mu-BETA*s22**2 -H2233*H2233/H3333
elif tangent=='thinWalled':
H[0,0]=lamb+2.*mu - BETA*s11**2 -H1122*(H1122+H1133)/(H2233+H2222)
H[0,1]=-BETA*s11*s12 -H1222*(H1122+H1133)/(H2233+H2222)
H[0,2]=lamb-BETA*s11*s22
H[1,0]=-BETA*s12*s11-H1122*(H1222+H1233)/(H2233+H2222)
H[1,1]=mu-BETA*s12**2-H1222*(H1222+H1233)/(H2233+H2222)
H[1,2]=-BETA*s12*s22
H[2,0]=lamb - BETA*s11*s22
H[2,1]=-BETA*s22*s12
H[2,2]=lamb+2.*mu-BETA*s22**2
else :
H[0,0]=lamb+2.*mu - BETA*s11**2
H[0,1]=-BETA*s11*s12
H[0,2]=lamb-BETA*s11*s12
H[1,0]=-BETA*s12*s11
H[1,1]=mu-BETA*s12**2
H[1,2]=-BETA*s12*s22
H[2,0]=lamb-BETA*s11*s22
H[2,1]=-BETA*s12*s22
H[2,2]=lamb+2.*mu-BETA*s22**2
return H
def acousticTensor(H,n):
n1=n[0] ; n2=n[1]
C11 = H[0,0]*n1**2 + H[1,1]*n2**2 + 2.*H[0,1]*n1*n2
C12 = H[0,1]*n1**2 + H[1,2]*n2**2 + (H[0,2]+H[1,1])*n1*n2
C22 = H[1,1]*n1**2 + H[2,2]*n2**2 + 2.*H[2,1]*n1*n2
return np.array([C11,C12,C22])
def acousticEigenStructure(C):
C11=C[0];C12=C[1];C22=C[2]
## omega1,w1 associated to cf
## omega2,w2 associated to cs
omega1=0.5*(C11+C22 + np.sqrt((C11-C22)**2+4.*C12**2))
omega2=0.5*(C11+C22 - np.sqrt((C11-C22)**2+4.*C12**2))
w1=np.array([-C12,C11-omega1])
w2=np.array([-C12,C11-omega2])
return [omega1,w1],[omega2,w2]
def vonMisesYieldSurface(sigy):
radius=np.sqrt((2./3.)*sigy**2)
theta=np.linspace(0,2*np.pi,50)
s2 = radius*np.cos(theta)
s3 = radius*np.sin(theta)
s1=0.
c=np.sqrt(2.)/2.;
s=np.sqrt(2.)/2.;
P2=np.array([[c,-c,0.],[c,c,0.],[0.,0.,1.]])
P1=np.array([[c,0.,-c],[0.,1.,0.],[c,0.,c]])
c=np.cos(np.arctan(1./np.sqrt(2.0)))
s=np.sin(np.arctan(1./np.sqrt(2.0)))
P1=np.array([[c,0.,-s],[0.,1.,0.],[s,0.,c]])
cylindre=np.zeros((3,len(s2)))
for i in range(len(s2)):
cylindre[:,i] = np.dot(P2,np.dot(P1,np.array([s1,s2[i],s3[i]])))
return cylindre
def computeDeviatoricPart(T):
# T = [T11 T21 T22 T33]
Pdev=np.array([[1.-1/3.,0.,-1./3.,-1./3.],[0.,1.,0.,0.],[-1./3.,0.,1.-1./3.,-1./3.],[-1./3.,0.,-1./3.,1.-1./3.]])
Tdev=np.dot(Pdev,T)
return np.array([Tdev[0],np.sqrt(2.)*Tdev[1],Tdev[2],Tdev[3]])
def computeCriterion(sig11,sig22,sig12,sig33,sigy):
# deviatoric stress
sDev=computeDeviatoricPart(np.array([sig11,sig12,sig22,sig33]))
normSDev=np.sqrt(np.dot(sDev,sDev))
f=np.sqrt(3./2.)*normSDev - sigy
return f
def computePsiSlow(sig11,sigma,sig33,lamb,mu,beta,tangent,rho):
# sig11 driven
n1=1.;n2=0.
sig12=sigma[0];sig22=sigma[1]
H=tangentModulus(np.array([sig11,sig12,sig22,sig33]),lamb,mu,beta,tangent)
C=acousticTensor(H,np.array([n1,n2]))
eigenf,eigens=acousticEigenStructure(C)
alpha11=H[0,1]*H[1,2]- H[1,1]*H[0,2]
alpha12=-H[0,1]*H[0,2]-H[0,0]*H[2,1]
alpha22=H[0,0]*H[1,1]-H[0,1]**2
w1=eigenf[1][0];w2=eigenf[1][1]
psi12=-2.*w1/w2
psi22=(2.*w1*alpha12/w2-alpha11)/alpha22
"""
n1=1.;n2=0.
JN=-np.array([[0.,0.,n1/rho,n2/rho,0.],[0.,0.,0.,n1/rho,n2/rho],[H[0,0]*n1+H[0,1]*n2,H[0,1]*n1+H[0,2]*n2,0.,0.,0.],[H[0,1]*n1+H[1,1]*n2,H[1,1]*n1+H[1,2]*n2,0,0,0],[H[2,0]*n1+H[2,1]*n2,H[2,1]*n1+H[2,2]*n2,0,0,0]])
eigenStructure=np.linalg.eig(JN.T)
contact=np.where(eigenStructure[0]==0)[0][0]
cfplus=np.where(eigenStructure[0]==np.max(eigenStructure[0]))[0][0]
cfminus=np.where(eigenStructure[0]==np.min(eigenStructure[0]))[0][0]
index=np.ones(5);index[[contact,cfminus,cfplus]]-=1
cs=np.where(index!=0.)[0]
csminus=np.where(eigenStructure[0]==np.min(eigenStructure[0][cs]))[0][0]
csplus=np.where(eigenStructure[0]==np.max(eigenStructure[0][cs]))[0][0]
lcfminus=eigenStructure[1][:,cfminus];lcfplus=eigenStructure[1][:,cfplus]
lcontact=eigenStructure[1][:,contact]
dl=lcfminus-lcfplus
if not (dl[4]!=0. and dl[0]!=0. and dl[1]!=0.):
psi12=-dl[2]/dl[3]
if not (lcontact[0]>1.e-6 and lcontact[1]>1.e-6):
psi22=(lcontact[3]*(dl[2]/dl[3])-lcontact[2])/lcontact[4]
"""
return np.array([psi12,psi22])
def computeLodeAngle(sig11,sig22,sig12,sig33):
# deviatoric stress
sDev=computeDeviatoricPart(np.array([sig11,sig12,sig22,sig33]))
s11=sDev[0];s12=sDev[1]/np.sqrt(2.);s22=sDev[2];s33=sDev[3]
# deviator 2nd and 3rd invariants
J3=s33*(s11*s22-s12**2) ; sqrtJ2=np.sqrt(0.5*np.dot(sDev,sDev))
theta=np.arccos((3./2.)*np.sqrt(3.)*J3/(sqrtJ2**3))/3.
theta=theta*360./(2.*np.pi)
return theta
def updateEquivalentPlasticStrain(sig,sign,H):
# sig=[sig11^n , sqrt(2)*sig12^n , sig22 , sig33^n]
# sign=[sig11^n+1 , sqrt(2)*sig12^n+1 , sig22 , sig33^n+1]
sigDev=computeDeviatoricPart(np.array([sign[0],sign[1]/np.sqrt(2.),sign[2],sign[3]]))
norm=np.sqrt(np.dot(sigDev,sigDev))
flow=sigDev/norm
dSig=sign-sig
dp=(1./H)*np.sqrt(3./2.)*np.dot(flow,dSig)
return dp
def plasticResidual(sig,sign,p,pn,H):
# sig=[sig11^n , sqrt(2)*sig12^n , sig22 , sig33^n]
# sign=[sig11^n+1 , sqrt(2)*sig12^n+1 , sig22 , sig33^n+1]
sigDev=computeDeviatoricPart(np.array([sign[0],sign[1]/np.sqrt(2.),sign[2],sign[3]]))
norm=np.sqrt(np.dot(sigDev,sigDev))
flow=sigDev/norm
dSig=sign-sig
dp=(1./H)*np.sqrt(3./2.)*np.dot(flow,dSig)
res=pn-p-dp
return res
def computeEigenStresses(sig):
# | sig11 sig12 0 |
#sig=| sig12 sig22 0 |
# | 0 0 sig33 |
s3=sig[2,2]
delta=(sig[0,0]-sig[1,1])**2+4.*sig[0,1]**2
s1=0.5*(sig[0,0]+sig[1,1]-np.sqrt(delta))
s2=0.5*(sig[0,0]+sig[1,1]+np.sqrt(delta))
return np.array([s1,s2,s3])
from mpl_toolkits.mplot3d import proj3d
def orthogonal_proj(zfront, zback):
a = (zfront+zback)/(zfront-zback)
b = -2*(zfront*zback)/(zfront-zback)
return np.array([[1,0,0,0],
[0,1,0,0],
[0,0,a,b],
[0,0,0,zback]])
proj3d.persp_transformation = orthogonal_proj
Samples=5
# Sample constant stress component sig22
sig22=np.linspace(0.,sigy,Samples)
#sig22=np.linspace(-sigy/np.sqrt(1-nu+nu**2),sigy/np.sqrt(1-nu+nu**2),Samples)
Samples*=10
sig=np.zeros((Samples,Samples))
tau=np.zeros((Samples,Samples))
frames=[10,20,40]
frames=[5,10,15,20]
col=["r","g","b","y","c","m","k","p"]
tauM=1.5*sigy/np.sqrt(3.)
sigM=1.5*sigy/np.sqrt(1-nu+nu**2)
tauM=sigM
Niter=1000
TAU=np.zeros((Niter,len(frames),len(sig22)))
SIG11=np.zeros((Niter,len(frames),len(sig22)))
SIG22=np.zeros((Niter,len(frames),len(sig22)))
eigsigS=np.zeros((Niter,len(frames),len(sig22),3))
criterionS=np.zeros((Niter,len(frames)))
PsiS=np.zeros((Samples,len(sig22)))
plast_S=np.zeros((Niter,len(frames)))
LodeAngle_S=np.zeros((Niter,len(frames)))
# Boolean to plot the upadted yield surface
updated_criterion=False
for k in range(len(sig22)-1):
s22=sig22[k]
Delta=(4.*sigy**2- 3.*s22**2)
sigMax=(s22+np.sqrt(Delta))/2.
sigMin=(s22-np.sqrt(Delta))/2.
# Sample stress component sig11
sig[:,k]=np.linspace(sigMin,sigMax,Samples)
sig[:,k]=np.linspace(0.,sigMax,Samples)
# Compute shear stress satisfying the criterion given sig11 and sig22
for i in range(Samples):
s11=sig[i,k]
delta=(s11*s22 -s11**2-s22**2 + sigy**2)/3.
if np.abs(delta)<10. : delta=np.abs(delta)
tauMax=np.sqrt(delta)
f_vm=lambda x:computeCriterion(s11,s22,x,0.,sigy)
tau[i,k]=np.sqrt(delta)
## LOADING PATHS PLOTS
for k in range(len(sig22)-1)[1:]:
s22=sig22[k]
sigM=1.25*np.max(sig[:,k])
tauM=1.25*np.max(tau[:,k])
## For each value of sig22 trace the loading paths given by psis from yield surface to an arbitrary shear stress level
approx=np.zeros((len(frames),2))
ordonnees=np.zeros((len(frames),Samples))
abscisses=np.zeros((len(frames),Samples))
radius_S=np.zeros(len(frames))
for s,i in enumerate(frames):
if i==0:
continue
sig0=sig[-1-i,k]
tau0=tau[-1-i,k]
dsig=(sigM-sig0)/Niter
SIG11[:,s,k]=np.linspace(sig0,sigM,Niter)
TAU[0,s,k]=tau0
SIG22[0,s,k]=s22
#rSlow = ode(computePsiSlow).set_integrator('vode',method='bdf')
rSlow = ode(computePsiSlow).set_integrator('vode',method='adams',order=12)
rSlow.set_initial_value(np.array([TAU[0,s,k],SIG22[0,s,k]]),SIG11[0,s,k]).set_f_params(0.,lamb,mu,beta,'planeStress',rho)
sigma = np.matrix([[SIG11[0,s,k],TAU[0,s,k],0.],[TAU[0,s,k],SIG22[0,s,k],0.],[0.,0.,0.]])
eigsig=np.linalg.eig(sigma)[0]
eigsigS[0,s,k,:]=eigsig
LodeAngle_S[0,s]=computeLodeAngle(sigma[0,0],SIG22[0,s,k],sigma[0,1],0.)
p=0.
epsp33=0.
for j in range(Niter-1):
rSlow.set_f_params(np.array([TAU[j,s,k],SIG22[j,s,k]]),0.,lamb,mu,beta,'planeStress',rho)
if not rSlow.successful():
print "Integration issues in slow wave path"
break
rSlow.integrate(rSlow.t+dsig)
TAU[j+1,s,k],SIG22[j+1,s,k]=rSlow.y
sigma = np.array([SIG11[j,s,k],np.sqrt(2.)*TAU[j,s,k],SIG22[j,s,k],0.])
sigman = np.array([SIG11[j+1,s,k],np.sqrt(2.)*TAU[j+1,s,k],SIG22[j+1,s,k],0.])
f_vm=computeCriterion(SIG11[j+1,s,k],SIG22[j+1,s,k],TAU[j+1,s,k],0.,sigy+H*p)
#if f_vm>0. :
#p+=updateEquivalentPlasticStrain(sigma,sigman,H)
#residual=lambda x: plasticResidual(sigma,sigman,p,x,H)
residual=lambda x: computeCriterion(SIG11[j+1,s,k],SIG22[j+1,s,k],TAU[j+1,s,k],0.,sigy+H*x)
p=scipy.optimize.root(residual,p,method='hybr',options={'xtol':1.e-12}).x[0]
criterionS[j+1,s]=computeCriterion(SIG11[j+1,s,k],SIG22[j+1,s,k],TAU[j+1,s,k],0.,sigy+H*p)
plast_S[j+1,s]=p
LodeAngle_S[j+1,s]=computeLodeAngle(sigman[0],sigman[2],sigman[1]/np.sqrt(2.),0.)
# Eigenvalues of sigma (for deviatoric plane plots)
sigma = np.matrix([[SIG11[j+1,s,k],TAU[j+1,s,k],0.],[TAU[j+1,s,k],SIG22[j+1,s,k],0.],[0.,0.,0.]])
eigsigS[j+1,s,k,:]=computeEigenStresses(sigma)
print "Final equivalent plastic strain after slow wave : ",p
radius_S[s]=sigy+H*p
TAU_MAX_S=np.max(ordonnees)
SIG_MAX_S=np.max(abscisses)
### SUBPLOTS SETTINGS
fig = plt.figure()
ax2=plt.subplot2grid((1,2),(0,1),projection='3d')
ax1d1=plt.subplot2grid((1,2),(0,0))
ax1d1.grid()
ax1d1.set_xlabel(r'$\Theta$', fontsize=24)
ax1d1.set_ylabel('p', fontsize=24)
fvm1=ax1d1.twinx()
fvm1.set_ylabel('f',fontsize=18.)
fvm1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
cylindre=vonMisesYieldSurface(sigy)
ax2.plot_wireframe(cylindre[0,:],cylindre[1,:],cylindre[2,:], color="k")
elevation_Angle_radian=np.arctan(1./np.sqrt(2.0))
angle_degree= 180.*elevation_Angle_radian/np.pi
radius=1.*np.sqrt((2./3.)*sigy**2)
ax2.set_xlim(-1.*radius,1.*radius)
ax2.set_ylim(-1.*radius,1.*radius)
ax2.set_zlim(-1.*radius,1.*radius)
ax2.view_init(angle_degree,45.)
ax2.plot([0.,sigy],[0.,sigy],[0.,sigy],color="k")
ax2.set_xlabel(r'$\sigma_1$',size=24.)
ax2.set_ylabel(r'$\sigma_2$',size=24.)
ax2.set_zlabel(r'$\sigma_3$',size=24.)
for p in range(len(frames)):
if updated_criterion :
cylindre=vonMisesYieldSurface(radius_S[p])
ax2.plot_wireframe(cylindre[0,:],cylindre[1,:],cylindre[2,:], color=col[p],linestyle='--')
## 2D plot of equivalent plastic strain evolution
ax1d1.plot(LodeAngle_S[:Niter/5,p],plast_S[:Niter/5,p],col[p])
#ax1d1_2.plot(LodeAngle_S[:Niter/5,p],SIG33_S[:Niter/5,p,k],col[p],marker='o')
fvm1.plot(LodeAngle_S[:,p],criterionS[:,p],col[p],linestyle='--')
## 3D plots of loading paths (deviatoric plane)
ax2.plot(eigsigS[:,p,k,0],eigsigS[:,p,k,1],eigsigS[:,p,k,2],color=col[p],marker="o")
ax2.plot([-sigy,sigy],[0.,0.],[0.,0.],color="k",linestyle="--",lw=1.)
ax2.plot([0.,0.],[-sigy,sigy],[0.,0.],color="k",linestyle="--",lw=1.)
ax2.plot([-radius,radius],[radius,-radius],[0.,0.],color="k",linestyle="--",lw=1.)
#plt.show()
fig = plt.figure()
ax1=plt.subplot2grid((1,2),(0,0))
ax2=plt.subplot2grid((1,2),(0,1))
ax1.set_xlabel(r'$\sigma_{11}$',size=28.)
ax1.set_ylabel(r'$\sigma_{12}$',size=28.)
#ax1.set_zlabel(r'$\sigma_{22}$',size=28.)
ax2.set_xlabel(r'$\sigma_{22}$',size=28.)
ax2.set_ylabel(r'$\sigma_{12}$',size=28.)
#ax2.set_zlabel(r'$\sigma_{11}$',size=28.)
ax1.grid()
ax2.grid()
#ax2.view_init(-90.,-0.)
#ax1.view_init(-90.,0.)
for s,i in enumerate(frames):
sig0=sig[-1-i,k]
s22max=(sig0+np.sqrt(4*sigy**2-3.*sig0**2))/2.
s22min=(sig0-np.sqrt(4*sigy**2-3.*sig0**2))/2.
s22=np.linspace(s22min,s22max,Samples)
s12=np.sqrt((sigy**2- sig0**2-s22**2+sig0*s22)/3.)
ax2.plot(s22,s12,color=col[s])
ax1.plot(sig[:,k],tau[:,k],'k')
#ax2.plot(sig[:,k],tau[:,k],sig22[k],'k')
for p in range(len(frames)):
ax1.plot(SIG11[:,p,k],TAU[:,p,k],color=col[p])
ax2.plot(SIG22[:,p,k],TAU[:,p,k],color=col[p])
plt.show()
| 37.422111 | 216 | 0.589701 | 2,644 | 14,894 | 3.294629 | 0.131619 | 0.013776 | 0.007577 | 0.008725 | 0.362645 | 0.316841 | 0.239353 | 0.170474 | 0.1443 | 0.122259 | 0 | 0.127085 | 0.178998 | 14,894 | 397 | 217 | 37.516373 | 0.585296 | 0.110044 | 0 | 0.09894 | 0 | 0 | 0.022156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.028269 | null | null | 0.007067 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91c2124933101c4997c3e85497e979cf423b2846 | 10,418 | py | Python | Tests/test_ironmath.py | btddg28/ironpython | 8006238c19d08db5db9bada39d765143e631059e | [
"Apache-2.0"
] | null | null | null | Tests/test_ironmath.py | btddg28/ironpython | 8006238c19d08db5db9bada39d765143e631059e | [
"Apache-2.0"
] | null | null | null | Tests/test_ironmath.py | btddg28/ironpython | 8006238c19d08db5db9bada39d765143e631059e | [
"Apache-2.0"
] | null | null | null | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
#
# test Microsoft.Scripting.Math
#
from iptest.assert_util import *
skiptest("win32")
from System import *
import clr
#silverlight already has this
if is_cli:
math_assembly = (1).GetType().Assembly
clr.AddReference(math_assembly)
load_iron_python_test()
import IronPythonTest
if is_net40:
from System.Numerics import BigInteger, Complex
else:
from Microsoft.Scripting.Math import BigInteger
from Microsoft.Scripting.Math import Complex64 as Complex
class myFormatProvider(IFormatProvider):
def ToString():pass
p = myFormatProvider()
def test_bigint():
AreEqual(BigInteger.Add(1,99999999999999999999999999999999999999999999999999999999999) ,BigInteger.Subtract(100000000000000000000000000000000000000000000000000000000001,1))
AreEqual(BigInteger.Multiply(400,500) , BigInteger.Divide(1000000,5))
AreEqual(BigInteger.Multiply(400,8) , BigInteger.LeftShift(400,3))
AreEqual(BigInteger.Divide(400,8) , BigInteger.RightShift(400,3))
AreEqual(BigInteger.RightShift(BigInteger.LeftShift(400,100),100) , 400)
AreEqual(BigInteger.RightShift(BigInteger.LeftShift(-12345678987654321,100),100) , -12345678987654321)
if is_net40:
AssertError(ValueError, BigInteger.RightShift, 400, -100)
AssertError(ValueError, BigInteger.LeftShift, 400, -100)
AssertError(ValueError, BigInteger.RightShift, -12345678987654321, -100)
AssertError(ValueError, BigInteger.LeftShift, -12345678987654321, -100)
else:
AreEqual(BigInteger.LeftShift(BigInteger.RightShift(400,-100),-100) , 400)
AreEqual(BigInteger.LeftShift(BigInteger.RightShift(-12345678987654321,-100),-100) , -12345678987654321)
AreEqual(BigInteger(-123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement().OnesComplement() , -123456781234567812345678123456781234567812345678123456781234567812345678)
AreEqual(BigInteger(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement() , -(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678 + 1 ))
Assert(BigInteger.Xor(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678,BigInteger(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement()) , -1)
AreEqual(BigInteger.BitwiseAnd(0xff00ff00,BigInteger.BitwiseOr(0x00ff00ff,0xaabbaabb)) , BigInteger(0xaa00aa00))
AreEqual(BigInteger.Mod(BigInteger(-9999999999999999999999999999999999999999),1000000000000000000) , -BigInteger.Mod(9999999999999999999999999999999999999999,BigInteger(-1000000000000000000)))
AreEqual(BigInteger.ToInt64(0x7fffffffffffffff) , 9223372036854775807)
AssertError(OverflowError, BigInteger.ToInt64, 0x8000000000000000)
AreEqual(BigInteger(-0).ToBoolean(p) , False )
AreEqual(BigInteger(-1212321.3213).ToBoolean(p) , True )
AreEqual(BigInteger(1212321384892342394723947).ToBoolean(p) , True )
AreEqual(BigInteger(0).ToChar(p) , Char.MinValue)
AreEqual(BigInteger(65).ToChar(p) , IConvertible.ToChar('A', p))
AreEqual(BigInteger(0xffff).ToChar(p) , Char.MaxValue)
AssertError(OverflowError, BigInteger(-1).ToChar, p)
AreEqual(BigInteger(100).ToDouble(p) , 100.0)
AreEqual(BigInteger(BigInteger(100).ToDouble(p)).ToSingle(p) , BigInteger(100.1213123).ToFloat())
Assert(BigInteger(100) != 100.32)
AreEqual(BigInteger(100) , 100.0)
Assert( 100.32 != BigInteger(100))
AreEqual(100.0 , BigInteger(100) )
def test_big_1():
for (a, m, t,x) in [
(7, "ToSByte", SByte,2),
(8, "ToByte", Byte, 0),
(15, "ToInt16", Int16,2),
(16, "ToUInt16", UInt16,0),
(31, "ToInt32", Int32,2),
(32, "ToUInt32", UInt32,0),
(63, "ToInt64", Int64,2),
(64, "ToUInt64", UInt64,0)
]:
b = BigInteger(-x ** a )
left = getattr(b, m)(p)
right = t.MinValue
AreEqual(left, right)
b = BigInteger(2 ** a -1)
left = getattr(b, m)(p)
right = t.MaxValue
AreEqual(left, right)
b = BigInteger(0)
left = getattr(b, m)(p)
right = t.MaxValue - t.MaxValue
AreEqual(left, 0)
AssertError(OverflowError,getattr(BigInteger(2 ** a ), m),p)
AssertError(OverflowError,getattr(BigInteger(-1 - x ** a ), m),p)
def test_big_2():
for (a, m, t,x) in [
(31, "ToInt32",Int32,2),
(32, "ToUInt32",UInt32,0),
(63, "ToInt64",Int64,2),
(64, "ToUInt64",UInt64,0)
]:
b = BigInteger(-x ** a )
left = getattr(b, m)()
right = t.MinValue
AreEqual(left, right)
b = BigInteger(2 ** a -1)
left = getattr(b, m)()
right = t.MaxValue
AreEqual(left, right)
b = BigInteger(0)
left = getattr(b, m)()
right = t.MaxValue - t.MaxValue
AreEqual(left, right)
AssertError(OverflowError,getattr(BigInteger(2 ** a ), m))
AssertError(OverflowError,getattr(BigInteger(-1 - x ** a ), m))
#complex
def test_complex():
AreEqual(
Complex.Add(
Complex(BigInteger(9999), -1234),
Complex.Conjugate(Complex(9999, -1234)) ),
Complex.Multiply(BigInteger(9999), 2) )
AreEqual(
Complex.Add(
Complex(99999.99e-200, 12345.88e+100),
Complex.Negate(Complex(99999.99e-200, 12345.88e+100)) ),
Complex.Subtract(
Complex(99999.99e-200, 12345.88e+100),
Complex(99999.99e-200, 12345.88e+100) ))
AreEqual(
Complex.Divide(4+2j,2),
(2 + 1j) )
Assert(not hasattr(Complex, "Mod")) #IP 1.x had limited support for modulo which has been removed
def test_bool_misc():
if is_net40:
def is_zero(bigint):
return bigint.IsZero
else:
def is_zero(bigint):
return bigint.IsZero()
AreEqual(BigInteger(-1234).Sign, -1)
AreEqual(is_zero(BigInteger(-1234)), False)
AreEqual(BigInteger(-1234).IsNegative(), True)
AreEqual(BigInteger(-1234).IsPositive(), False)
AreEqual(BigInteger(0).Sign, 0)
AreEqual(is_zero(BigInteger(0)), True)
AreEqual(BigInteger(0).IsNegative(), False)
AreEqual(BigInteger(0).IsPositive(), False)
AreEqual(BigInteger(1234).Sign, 1)
AreEqual(is_zero(BigInteger(1234)), False)
AreEqual(BigInteger(1234).IsNegative(), False)
AreEqual(BigInteger(1234).IsPositive(), True)
def test_byte_conversions():
def CheckByteConversions(bigint, bytes):
SequencesAreEqual(bigint.ToByteArray(), bytes)
AreEqual(BigInteger.Create(Array[Byte](bytes)), bigint)
CheckByteConversions(BigInteger(0x00), [0x00])
CheckByteConversions(BigInteger(-0x01), [0xff])
CheckByteConversions(BigInteger(-0x81), [0x7f, 0xff])
CheckByteConversions(BigInteger(-0x100), [0x00, 0xff])
CheckByteConversions(BigInteger(-0x1000), [0x00, 0xf0])
CheckByteConversions(BigInteger(-0x10000), [0x00, 0x00, 0xff])
CheckByteConversions(BigInteger(-0x100000), [0x00, 0x00, 0xf0])
CheckByteConversions(BigInteger(-0x10000000), [0x00, 0x00, 0x00, 0xf0])
CheckByteConversions(BigInteger(-0x100000000), [0x00, 0x00, 0x00, 0x00, 0xff])
CheckByteConversions(BigInteger(0x7f), [0x7f])
CheckByteConversions(BigInteger(0xff), [0xff, 0x00])
CheckByteConversions(BigInteger(0x0201), [0x01, 0x02])
CheckByteConversions(BigInteger(0xf2f1), [0xf1, 0xf2, 0x00])
CheckByteConversions(BigInteger(0x03020100), [0x00, 0x01, 0x02, 0x03])
CheckByteConversions(BigInteger(0x0403020100), [0x00, 0x01, 0x02, 0x03, 0x04])
CheckByteConversions(BigInteger(0x0706050403020100), [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07])
CheckByteConversions(BigInteger(0x080706050403020100), [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08])
def test_dword_conversions():
def CheckDwordConversions(bigint, dwords):
SequencesAreEqual(bigint.GetWords(), dwords)
if bigint == BigInteger.Zero:
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
0,
Array[UInt32](dwords),),
bigint)
else:
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
1,
Array[UInt32](dwords)),
bigint)
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
-1,
Array[UInt32](dwords)),
BigInteger.Negate(bigint))
CheckDwordConversions(BigInteger(0), [0x00000000])
CheckDwordConversions(BigInteger(1), [0x00000001])
CheckDwordConversions(BigInteger((1<<31)), [0x80000000])
CheckDwordConversions(BigInteger(((1<<31) + 9)), [0x80000009])
CheckDwordConversions(BigInteger((1<<32)), [0x00000000, 0x00000001])
def test_misc():
AssertError(ArgumentException, IronPythonTest.System_Scripting_Math.CreateBigInteger, 0, (1, 2, 3))
AssertError(ArgumentNullException, IronPythonTest.System_Scripting_Math.CreateBigInteger, 0, None)
AreEqual(BigInteger(1).CompareTo(None), 1)
if is_net40:
AreEqual(BigInteger(1).CompareTo(True), 0)
else:
AssertError(ArgumentException, BigInteger(1).CompareTo, True)
run_test(__name__)
| 41.015748 | 241 | 0.656748 | 1,016 | 10,418 | 6.690945 | 0.25689 | 0.090026 | 0.023683 | 0.011474 | 0.31877 | 0.230656 | 0.192262 | 0.150044 | 0.10915 | 0.10915 | 0 | 0.203911 | 0.209637 | 10,418 | 253 | 242 | 41.177866 | 0.621691 | 0.062584 | 0 | 0.333333 | 0 | 0 | 0.010129 | 0 | 0 | 0 | 0.053154 | 0 | 0.095238 | 1 | 0.068783 | false | 0.005291 | 0.037037 | 0.010582 | 0.121693 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91c59190736d04c98947f42fd90af017204111ac | 505 | py | Python | ndscheduler/server/handlers/index.py | symphonyrm/ndscheduler | e9a56ef345b25916a2b53d1ea3349efb532d63ce | [
"BSD-2-Clause"
] | null | null | null | ndscheduler/server/handlers/index.py | symphonyrm/ndscheduler | e9a56ef345b25916a2b53d1ea3349efb532d63ce | [
"BSD-2-Clause"
] | null | null | null | ndscheduler/server/handlers/index.py | symphonyrm/ndscheduler | e9a56ef345b25916a2b53d1ea3349efb532d63ce | [
"BSD-2-Clause"
] | null | null | null | """Serves the single page app web ui."""
import json
import tornado.gen
from ndscheduler import settings
from ndscheduler import utils
from ndscheduler.server.handlers import base
class Handler(base.BaseHandler):
"""Index page request handler."""
@tornado.gen.coroutine
def get(self):
"""Serve up the single page app for scheduler dashboard."""
meta_info = utils.get_all_available_jobs()
self.render(settings.APP_INDEX_PAGE, jobs_meta_info=json.dumps(meta_info))
| 25.25 | 82 | 0.732673 | 69 | 505 | 5.231884 | 0.565217 | 0.124654 | 0.072022 | 0.088643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.174257 | 505 | 19 | 83 | 26.578947 | 0.865707 | 0.229703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.5 | 0 | 0.7 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
91c6b0a778c821558e257de0d52e71c5f953c2bf | 801 | py | Python | Scripts/xbbtools/xbb_io.py | eoc21/biopython | c0f8db8f55a506837c320459957a0ce99b0618b6 | [
"PostgreSQL"
] | 3 | 2017-10-23T21:53:57.000Z | 2019-09-23T05:14:12.000Z | Scripts/xbbtools/xbb_io.py | eoc21/biopython | c0f8db8f55a506837c320459957a0ce99b0618b6 | [
"PostgreSQL"
] | null | null | null | Scripts/xbbtools/xbb_io.py | eoc21/biopython | c0f8db8f55a506837c320459957a0ce99b0618b6 | [
"PostgreSQL"
] | 6 | 2020-02-26T16:34:20.000Z | 2020-03-04T15:34:00.000Z | #!/usr/bin/env python
# Created: Wed Jun 21 13:46:35 2000
# Last changed: Time-stamp: <00/12/02 14:18:23 thomas>
# Thomas.Sicheritz@molbio.uu.se, http://evolution.bmc.uu.se/~thomas
# File: xbb_io.py
import os, sys # os.system, sys.argv
sys.path.insert(0, '.')
sys.path.insert(0, os.path.expanduser('~thomas/cbs/python/biopython'))
from Bio.ParserSupport import *
from Bio import Fasta
class xbb_io:
def __init__(self):
""
def error(self, str):
print str
def read_fasta_file(self, file):
genes = []
iter = Fasta.Iterator(handle = open(file), parser = Fasta.RecordParser())
while 1:
rec = iter.next()
if not rec: break
genes.append((rec.sequence, rec.title))
return genes
| 23.558824 | 81 | 0.604245 | 113 | 801 | 4.212389 | 0.663717 | 0.016807 | 0.054622 | 0.058824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045531 | 0.259675 | 801 | 33 | 82 | 24.272727 | 0.757167 | 0.260924 | 0 | 0 | 0 | 0 | 0.049658 | 0.047945 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.166667 | null | null | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91c92b40c4f1e26399a0ff522ec30f406f0ff98d | 934 | py | Python | nlp_annotator_api/server/app.py | IBM/deepsearch-nlp-annotator-api-example | 76c2c8fd83c1e6d51c51c7b581a8c3f273b23c40 | [
"Apache-2.0"
] | 3 | 2022-01-04T12:15:22.000Z | 2022-03-25T21:19:20.000Z | nlp_annotator_api/server/app.py | IBM/deepsearch-nlp-annotator-api-example | 76c2c8fd83c1e6d51c51c7b581a8c3f273b23c40 | [
"Apache-2.0"
] | null | null | null | nlp_annotator_api/server/app.py | IBM/deepsearch-nlp-annotator-api-example | 76c2c8fd83c1e6d51c51c7b581a8c3f273b23c40 | [
"Apache-2.0"
] | 5 | 2021-09-27T08:26:09.000Z | 2022-03-10T11:41:35.000Z | import logging
import os
import aiohttp.web
from connexion import AioHttpApp
from nlp_annotator_api.config.config import conf
from nlp_annotator_api.config.logging import setup_logging
from nlp_annotator_api.server.middleware.statsd_middleware import StatsdMiddleware
from nlp_annotator_api.server.signals.statsd_client import statsd_client_factory
setup_logging()
access_log = logging.getLogger("nlp_annotator_api.access")
_file_dir = os.path.dirname(__file__)
app = AioHttpApp(
__name__, specification_dir=os.path.join(_file_dir, "..", "resources", "schemas"),
server_args=dict(
client_max_size=8 * 1024**2
)
)
app.add_api("openapi.yaml", pass_context_arg_name="request")
aiohttp_app: aiohttp.web.Application = app.app
aiohttp_app.cleanup_ctx.append(statsd_client_factory(conf.statsd))
aiohttp_app.middlewares.append(StatsdMiddleware())
if __name__ == "__main__":
app.run(access_log=access_log)
| 26.685714 | 86 | 0.799786 | 128 | 934 | 5.421875 | 0.4375 | 0.086455 | 0.108069 | 0.10951 | 0.144092 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007168 | 0.103854 | 934 | 34 | 87 | 27.470588 | 0.821983 | 0 | 0 | 0 | 0 | 0 | 0.073876 | 0.025696 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.043478 | 0.347826 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
91c9ae32ffd6100ceb2a8fceee2c2c30ae4e7dc4 | 3,518 | py | Python | dataactcore/migrations/versions/8692ab1298e1_replace_filerequest_with_filegeneration.py | brianherman/data-act-broker-backend | 80eb055b9d245046192f7ad4fd0be7d0e11d2dec | [
"CC0-1.0"
] | 1 | 2019-06-22T21:53:16.000Z | 2019-06-22T21:53:16.000Z | dataactcore/migrations/versions/8692ab1298e1_replace_filerequest_with_filegeneration.py | brianherman/data-act-broker-backend | 80eb055b9d245046192f7ad4fd0be7d0e11d2dec | [
"CC0-1.0"
] | 3 | 2021-08-22T11:47:45.000Z | 2022-03-29T22:06:49.000Z | dataactcore/migrations/versions/8692ab1298e1_replace_filerequest_with_filegeneration.py | brianherman/data-act-broker-backend | 80eb055b9d245046192f7ad4fd0be7d0e11d2dec | [
"CC0-1.0"
] | 1 | 2020-07-17T23:50:56.000Z | 2020-07-17T23:50:56.000Z | """replace FileRequest with FileGeneration
Revision ID: 8692ab1298e1
Revises: 4bbc47f2b48d
Create Date: 2018-10-24 14:54:39.278159
"""
# revision identifiers, used by Alembic.
revision = '8692ab1298e1'
down_revision = '4bbc47f2b48d'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('file_generation',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('file_generation_id', sa.Integer(), nullable=False),
sa.Column('request_date', sa.Date(), nullable=False),
sa.Column('start_date', sa.Date(), nullable=False),
sa.Column('end_date', sa.Date(), nullable=False),
sa.Column('agency_code', sa.Text(), nullable=False),
sa.Column('agency_type', sa.Enum('awarding', 'funding', name='generation_agency_types'), server_default='awarding', nullable=False),
sa.Column('file_type', sa.Enum('D1', 'D2', name='generation_file_types'), server_default='D1', nullable=False),
sa.Column('file_path', sa.Text(), nullable=True),
sa.Column('is_cached_file', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('file_generation_id')
)
op.create_index(op.f('ix_file_generation_agency_code'), 'file_generation', ['agency_code'], unique=False)
op.create_index(op.f('ix_file_generation_agency_type'), 'file_generation', ['agency_type'], unique=False)
op.create_index(op.f('ix_file_generation_end_date'), 'file_generation', ['end_date'], unique=False)
op.create_index(op.f('ix_file_generation_file_type'), 'file_generation', ['file_type'], unique=False)
op.create_index(op.f('ix_file_generation_request_date'), 'file_generation', ['request_date'], unique=False)
op.create_index(op.f('ix_file_generation_start_date'), 'file_generation', ['start_date'], unique=False)
op.add_column('job', sa.Column('file_generation_id', sa.Integer(), nullable=True))
op.create_foreign_key('fk_file_request_file_generation_id', 'job', 'file_generation', ['file_generation_id'], ['file_generation_id'], ondelete='SET NULL')
op.drop_column('job', 'from_cached')
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('job', sa.Column('from_cached', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.drop_constraint('fk_file_request_file_generation_id', 'job', type_='foreignkey')
op.drop_column('job', 'file_generation_id')
op.drop_index(op.f('ix_file_generation_start_date'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_request_date'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_file_type'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_end_date'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_agency_type'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_agency_code'), table_name='file_generation')
op.drop_table('file_generation')
op.execute("""
DROP TYPE generation_agency_types
""")
op.execute("""
DROP TYPE generation_file_types
""")
# ### end Alembic commands ###
| 45.102564 | 158 | 0.726549 | 484 | 3,518 | 4.958678 | 0.200413 | 0.204167 | 0.04 | 0.05 | 0.529583 | 0.4725 | 0.44625 | 0.3575 | 0.283333 | 0.265 | 0 | 0.017025 | 0.115122 | 3,518 | 77 | 159 | 45.688312 | 0.753935 | 0.091245 | 0 | 0.078431 | 0 | 0 | 0.376701 | 0.160177 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078431 | false | 0 | 0.039216 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91d4aad729e6a3ae80ef7ec7692d7daf662bb479 | 1,127 | py | Python | setup.py | garnaat/details | 07f2fc7f27b29a6ddcda918abf6ae0882450319e | [
"Apache-2.0"
] | 27 | 2015-03-01T10:54:32.000Z | 2021-09-08T14:52:30.000Z | setup.py | garnaat/details | 07f2fc7f27b29a6ddcda918abf6ae0882450319e | [
"Apache-2.0"
] | 3 | 2015-01-29T08:26:13.000Z | 2017-02-14T09:35:06.000Z | setup.py | garnaat/details | 07f2fc7f27b29a6ddcda918abf6ae0882450319e | [
"Apache-2.0"
] | 7 | 2015-03-26T13:53:34.000Z | 2017-05-23T20:58:28.000Z | #!/usr/bin/env python
from setuptools import setup, find_packages
import os
requires = [
]
setup(
name='details',
version=open(os.path.join('details', '_version')).read(),
description='Tools for processing AWS detailed billing reports',
long_description=open('README.md').read(),
author='Mitch Garnaat',
author_email='mitch@scopely.com',
url='https://github.com/scopely-devops/details',
packages=find_packages(exclude=['tests*']),
package_dir={'details': 'details'},
install_requires=requires,
license=open("LICENSE").read(),
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
),
)
| 30.459459 | 68 | 0.632653 | 117 | 1,127 | 6.034188 | 0.581197 | 0.161473 | 0.212465 | 0.110482 | 0.076487 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011312 | 0.215617 | 1,127 | 36 | 69 | 31.305556 | 0.78733 | 0.017746 | 0 | 0 | 0 | 0 | 0.517179 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91d7cad5b4e7e6fe780b392c22b198941b8e6380 | 10,434 | py | Python | server/splunkdj/views.py | splunk/splunk-webframework | a4179558616f5f4fcbfa2b54e9179f30e6395264 | [
"Apache-2.0"
] | 31 | 2015-01-20T12:49:17.000Z | 2022-02-21T05:21:44.000Z | server/splunkdj/views.py | splunk/splunk-webframework | a4179558616f5f4fcbfa2b54e9179f30e6395264 | [
"Apache-2.0"
] | 2 | 2015-07-08T19:40:41.000Z | 2018-04-26T21:34:35.000Z | server/splunkdj/views.py | splunk/splunk-webframework | a4179558616f5f4fcbfa2b54e9179f30e6395264 | [
"Apache-2.0"
] | 8 | 2015-02-26T13:19:45.000Z | 2022-03-27T08:34:20.000Z | import sys
import pprint
import json
import datetime
import uuid
import urllib
import types
import traceback
from django.core.urlresolvers import reverse, resolve
from django.http import HttpResponseRedirect, Http404, HttpResponseServerError, HttpResponseNotFound
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import never_cache
from django.views.debug import ExceptionReporter, get_safe_settings
from django.template import TemplateDoesNotExist, Context
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.shortcuts import render
from splunkdj.decorators.render import render_to
from splunkdj.utility import make_splunkweb_url
from urlparse import urlparse
import logging
logger = logging.getLogger('spl.django.service')
error_logger = logging.getLogger('spl.django.request_error')
def format(value):
"""
Format values appropriately for json.dumps:
- Basic types will remain the same
- Unicode will be converted to str
- Everything else will be formatted using pprint
"""
if value is None:
return value
if isinstance(value, (int, long, str, float, list, dict, tuple, bool, unicode)):
return value
return str(pprint.pformat(value))
def get_exception_info(request):
# We use Django's debug reporter, even though we are doing our own template.
# This is because it has a great way of collecting all the useful info we
# need, so no reason not to leverage it
exc_info = sys.exc_info()
reporter = ExceptionReporter(request, *exc_info)
ctx = reporter.get_traceback_data()
# This is a refactor of what the technical_500_template contains, just
# doing the logic in Python rather than in a template. We collect all this
# information so that we can log it.
exception_type = ctx['exception_type'] if 'exception_type' in ctx else "No exception supplied"
exception_value = ctx['exception_value'] if 'exception_value' in ctx else "No exception supplied"
django_version = ctx["django_version_info"]
python_executable = ctx['sys_executable']
python_version = ctx['sys_version_info']
python_path = ctx['sys_path']
server_time = str(ctx['server_time'])
unicode_hint = None
if 'unicode_hint' in ctx:
unicdoe_hint = ctx['unicode_hint']
last_frame = None
if 'lastframe' in ctx:
frame_info = ctx['lastframe']
last_frame = "%s in %s, line %s" % (frame_info['filename'], frame_info['function'], frame_info['lineno'])
loaders = []
if 'template_does_not_exist' in ctx and 'loader_debug_info' in ctx and ctx['loader_debug_info']:
for loader in ctx['loader_debug_info']:
loader_info = {"name": loader['loader'], "templates": []}
for tmpl in loader['templates']:
loader_info['templates'].append({"file": tmpl['name'], "exists": tmpl['exists']})
loaders.append(loader_info)
template_errors = None
if 'template_info' in ctx and ctx['template_info']:
template_info = ctx['template_info']
template_errors = {
"name": template_info['name'],
"line": template_info['line'],
"message": template_info['message']
}
exception_info = []
if 'frames' in ctx:
frames = ctx['frames']
for frame in frames:
frame_info = {
"filename": frame['filename'],
"function": frame['function'],
"line": frame['lineno'],
"context_line": frame['context_line'],
"vars": []
}
if 'vars' in frame:
for var in frame['vars']:
frame_info['vars'].append({
"variable": str(var[0]),
"value": format(var[1])
})
exception_info.append(frame_info)
request_info = {
"path_info": request.path_info,
"method": request.META['REQUEST_METHOD'],
"url": request.build_absolute_uri(),
"GET": {},
"POST": {},
"FILES": {},
"COOKIES": {},
"META": {}
}
if hasattr(request, "GET"):
for key, value in request.GET.iteritems():
request_info['GET'][key] = format(value)
if "filtered_POST" in ctx:
for key, value in ctx['filtered_POST'].iteritems():
request_info['POST'][key] = format(value)
if hasattr(request, "FILES"):
for key, value in request.FILES.iteritems():
request_info['FILES'][key] = format(value)
if hasattr(request, "COOKIES"):
for key, value in request.COOKIES.iteritems():
request_info['COOKIES'][key] = format(value)
if hasattr(request, "META"):
for key, value in request.META.iteritems():
request_info['META'][key] = format(value)
settings_info = {}
for key, value in ctx['settings'].iteritems():
settings_info[key] = format(value)
ctx['errorid'] = errorid = uuid.uuid4().hex
full_info = dict(
__time=datetime.datetime.now().isoformat(),
__uuid=errorid,
settings=settings_info,
request=request_info,
traceback=exception_info,
stack=traceback.format_exc(exc_info[2]),
last_frame=last_frame,
template_loaders=loaders,
template_errors=template_errors,
unicode_hint=unicdoe_hint,
exception_type=exception_type,
exception_value=exception_value,
django_version=django_version,
python_version=python_version,
python_executable=python_executable,
python_path=python_path,
server_time=server_time
)
return (errorid, ctx, full_info)
def redirector(request, app, view):
params = {}
for (key, val) in request.GET.iteritems():
params[key] = val
full_name = "%s:%s" % (app, view)
if not view or not app:
logger.error("Redirector requires both 'app' and 'view' to be set, received: app='%s' view='%s'" % (app, view))
raise Error("Redirector requires both 'app' and 'view' to be set, received: app='%s' view='%s'" % (app, view))
return HttpResponseRedirect(reverse(full_name, kwargs=params))
def default_search(request):
app = request.app_name
lang_code = request.LANGUAGE_CODE
return HttpResponseRedirect(make_splunkweb_url("/%s/app/%s/search" % (lang_code, app)))
def default_flashtimeline(request):
app = request.app_name
lang_code = request.LANGUAGE_CODE
return HttpResponseRedirect(make_splunkweb_url("/%s/app/%s/flashtimeline" % (lang_code, app)))
@render_to()
@login_required
def default_template_render(request, template_name):
app = request.app_name
template_path = "%s:%s.html" % (app, template_name)
return {
"TEMPLATE": template_path
}
@never_cache
def handle404(request):
# This code is modified from views/debug.py in Django, as we want to display
# a debug style view, just modified slightly.
exc_info = sys.exc_info()
exception = exc_info[1]
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(render_to_string('splunkdj:404.html', context_instance=c))
@never_cache
def handle500(request):
# Let's attempt to render a more useful error message
errorid, ctx, exception = get_exception_info(request)
# We log the raw error to the log file, so that splunk can pick it up as
# JSON.
error_logger.error(json.dumps(exception, sort_keys=True))
# Build up the URL for making the query
lang_code = request.LANGUAGE_CODE
query_args = {
"q": 'search index=_internal sourcetype=django_error "%s" | head 1 | spath' % errorid,
"display.events.maxlines": 0,
"display.general.type": "events",
"earliest": 0,
"latest": ""
}
query_string = urllib.urlencode(query_args)
ctx['search_url'] = make_splunkweb_url("/%s/app/search/search?%s" % (lang_code, query_string))
return HttpResponseServerError(render_to_string('splunkdj:500.html', context_instance=Context(ctx)))
@never_cache
@render_to('splunkdj:page_config.html', mimetype="application/javascript")
@login_required
def get_page_config(request):
referer = request.META.get("HTTP_REFERER", "")
app = ""
app_label = ""
if referer:
try:
parsed = urlparse(referer)
parsed_path = parsed.path.replace("/%s/" % settings.MOUNT, "/")
resolved = resolve(parsed_path)
app = resolved.app_name
if app:
app_label = request.service.apps[app]["label"]
except Exception, e:
# If there was an error here, don't kill the entire page
# just return some default info
app = app or ""
app_label = app_label or app
zone_info = request.service.get('/services/search/timeparser/tz').body.read()
return {
"autoload": "1" == request.GET.get("autoload", "0"),
"config": json.dumps({
"SPLUNKD_FREE_LICENSE": request.user.is_free,
"MRSPARKLE_ROOT_PATH": "/%s" % str(settings.SPLUNK_WEB_MOUNT).strip("/"),
"DJANGO_ROOT_PATH": "/%s" % str(settings.RAW_MOUNT),
"MRSPARKLE_PORT_NUMBER": str(settings.SPLUNK_WEB_PORT),
"DJANGO_PORT_NUMBER": str(settings.DJANGO_PORT),
"LOCALE": str(request.LANGUAGE_CODE),
"JS_LOGGER_MODE": "None",
"USERNAME": str(request.user.username),
"USER_DISPLAYNAME": str(request.user.realname),
"APP": str(app),
"APP_DISPLAYNAME": str(app_label),
"SERVER_ZONEINFO": str(zone_info),
})
}
| 37.804348 | 119 | 0.637531 | 1,256 | 10,434 | 5.11465 | 0.246019 | 0.00934 | 0.010274 | 0.012142 | 0.132783 | 0.074408 | 0.051681 | 0.051681 | 0.051681 | 0.051681 | 0 | 0.00382 | 0.247364 | 10,434 | 275 | 120 | 37.941818 | 0.814211 | 0.072168 | 0 | 0.096916 | 0 | 0.008811 | 0.16369 | 0.025256 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.096916 | null | null | 0.008811 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91d867e70ec797fb77cf3fedd501ea6a1aca218d | 8,301 | py | Python | wbia/plottool/interact_keypoints.py | mmulich/wildbook-ia | 81b405e2bfaa3f6c30a546fb6dc6e6488e9b2663 | [
"Apache-2.0"
] | null | null | null | wbia/plottool/interact_keypoints.py | mmulich/wildbook-ia | 81b405e2bfaa3f6c30a546fb6dc6e6488e9b2663 | [
"Apache-2.0"
] | null | null | null | wbia/plottool/interact_keypoints.py | mmulich/wildbook-ia | 81b405e2bfaa3f6c30a546fb6dc6e6488e9b2663 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import utool as ut
import six
from . import draw_func2 as df2
from wbia.plottool import plot_helpers as ph
from wbia.plottool import interact_helpers as ih
from wbia.plottool.viz_featrow import draw_feat_row
from wbia.plottool.viz_keypoints import show_keypoints
from wbia.plottool import abstract_interaction
(print, rrr, profile) = ut.inject2(__name__)
logger = logging.getLogger('wbia')
class KeypointInteraction(abstract_interaction.AbstractInteraction):
r"""
CommandLine:
python -m wbia.plottool.interact_keypoints --exec-KeypointInteraction --show
python -m wbia.plottool.interact_keypoints --exec-KeypointInteraction --show --fname=lena.png
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.interact_keypoints import * # NOQA
>>> import numpy as np
>>> import wbia.plottool as pt
>>> import utool as ut
>>> import pyhesaff
>>> import vtool as vt
>>> kpts, vecs, imgBGR = pt.viz_keypoints.testdata_kpts()
>>> ut.quit_if_noshow()
>>> #pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, color='distinct')
>>> pt.interact_keypoints.KeypointInteraction(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, autostart=True)
>>> pt.show_if_requested()
"""
def __init__(self, chip, kpts, vecs, fnum=0, figtitle=None, **kwargs):
self.chip = chip
self.kpts = kpts
self.vecs = vecs
self.figtitle = figtitle
self.mode = 0
super(KeypointInteraction, self).__init__(**kwargs)
def plot(self, fnum=None, pnum=(1, 1, 1), **kwargs):
import wbia.plottool as pt
fnum = pt.ensure_fnum(fnum)
pt.figure(fnum=fnum, docla=True, doclf=True)
show_keypoints(self.chip, self.kpts, fnum=fnum, pnum=pnum, **kwargs)
if self.figtitle is not None:
pt.set_figtitle(self.figtitle)
def _select_ith_kpt(self, fx):
logger.info('[interact] viewing ith=%r keypoint' % fx)
# Get the fx-th keypiont
kp, sift = self.kpts[fx], self.vecs[fx]
# Draw the image with keypoint fx highlighted
self.plot(self.fnum, (2, 1, 1), sel_fx=fx)
# Draw the selected feature
nRows, nCols, px = (2, 3, 3)
draw_feat_row(self.chip, fx, kp, sift, self.fnum, nRows, nCols, px, None)
def on_click_outside(self, event):
self.mode = (self.mode + 1) % 3
ell = self.mode == 1
pts = self.mode == 2
logger.info('... default kpts view mode=%r' % self.mode)
self.plot(self.fnum, ell=ell, pts=pts)
self.draw()
def on_click_inside(self, event, ax):
import wbia.plottool as pt
viztype = ph.get_plotdat(ax, 'viztype', None)
logger.info('[ik] viztype=%r' % viztype)
if viztype is None:
pass
elif viztype == 'keypoints':
kpts = ph.get_plotdat(ax, 'kpts', [])
if len(kpts) == 0:
logger.info('...nokpts')
else:
logger.info('...nearest')
x, y = event.xdata, event.ydata
import vtool as vt
fx = vt.nearest_point(x, y, kpts)[0]
self._select_ith_kpt(fx)
elif viztype == 'warped':
hs_fx = ph.get_plotdat(ax, 'fx', None)
if hs_fx is not None:
kp = self.kpts[hs_fx] # FIXME
sift = self.vecs[hs_fx]
df2.draw_keypoint_gradient_orientations(
self.chip, kp, sift=sift, mode='vec', fnum=pt.next_fnum()
)
pt.draw()
elif viztype.startswith('colorbar'):
pass
else:
logger.info('...unhandled')
self.draw()
def ishow_keypoints(chip, kpts, desc, fnum=0, figtitle=None, nodraw=False, **kwargs):
"""
TODO: Depricate in favor of the class
CommandLine:
python -m wbia.plottool.interact_keypoints --test-ishow_keypoints --show
python -m wbia.plottool.interact_keypoints --test-ishow_keypoints --show --fname zebra.png
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.interact_keypoints import * # NOQA
>>> import numpy as np
>>> import wbia.plottool as pt
>>> import utool as ut
>>> import pyhesaff
>>> import vtool as vt
>>> kpts, vecs, imgBGR = pt.viz_keypoints.testdata_kpts()
>>> ut.quit_if_noshow()
>>> #pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, color='distinct')
>>> pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4)
>>> pt.show_if_requested()
"""
if isinstance(chip, six.string_types):
import vtool as vt
chip = vt.imread(chip)
fig = ih.begin_interaction('keypoint', fnum)
annote_ptr = [1]
self = ut.DynStruct() # MOVE TO A CLASS INTERACTION
self.kpts = kpts
vecs = desc
self.vecs = vecs
def _select_ith_kpt(fx):
logger.info('[interact] viewing ith=%r keypoint' % fx)
# Get the fx-th keypiont
kp, sift = kpts[fx], vecs[fx]
# Draw the image with keypoint fx highlighted
_viz_keypoints(fnum, (2, 1, 1), sel_fx=fx, **kwargs) # MAYBE: remove kwargs
# Draw the selected feature
nRows, nCols, px = (2, 3, 3)
draw_feat_row(chip, fx, kp, sift, fnum, nRows, nCols, px, None)
def _viz_keypoints(fnum, pnum=(1, 1, 1), **kwargs):
df2.figure(fnum=fnum, docla=True, doclf=True)
show_keypoints(chip, kpts, fnum=fnum, pnum=pnum, **kwargs)
if figtitle is not None:
df2.set_figtitle(figtitle)
def _on_keypoints_click(event):
logger.info('[viz] clicked keypoint view')
if event is None or event.xdata is None or event.inaxes is None:
annote_ptr[0] = (annote_ptr[0] + 1) % 3
mode = annote_ptr[0]
ell = mode == 1
pts = mode == 2
logger.info('... default kpts view mode=%r' % mode)
_viz_keypoints(fnum, ell=ell, pts=pts, **kwargs) # MAYBE: remove kwargs
else:
ax = event.inaxes
viztype = ph.get_plotdat(ax, 'viztype', None)
logger.info('[ik] viztype=%r' % viztype)
if viztype == 'keypoints':
kpts = ph.get_plotdat(ax, 'kpts', [])
if len(kpts) == 0:
logger.info('...nokpts')
else:
logger.info('...nearest')
x, y = event.xdata, event.ydata
import vtool as vt
fx = vt.nearest_point(x, y, kpts)[0]
_select_ith_kpt(fx)
elif viztype == 'warped':
hs_fx = ph.get_plotdat(ax, 'fx', None)
# kpts = ph.get_plotdat(ax, 'kpts', [])
if hs_fx is not None:
# Ugly. Interactions should be changed to classes.
kp = self.kpts[hs_fx] # FIXME
sift = self.vecs[hs_fx]
df2.draw_keypoint_gradient_orientations(
chip, kp, sift=sift, mode='vec', fnum=df2.next_fnum()
)
elif viztype.startswith('colorbar'):
pass
# Hack to get a specific scoring feature
# sortx = self.fs.argsort()
# idx = np.clip(int(np.round(y * len(sortx))), 0, len(sortx) - 1)
# mx = sortx[idx]
# (fx1, fx2) = self.fm[mx]
# (fx1, fx2) = self.fm[mx]
# logger.info('... selected score at rank idx=%r' % (idx,))
# logger.info('... selected score with fs=%r' % (self.fs[mx],))
# logger.info('... resolved to mx=%r' % mx)
# logger.info('... fx1, fx2 = %r, %r' % (fx1, fx2,))
# self.select_ith_match(mx)
else:
logger.info('...unhandled')
ph.draw()
# Draw without keypoints the first time
_viz_keypoints(fnum, **kwargs) # MAYBE: remove kwargs
ih.connect_callback(fig, 'button_press_event', _on_keypoints_click)
if not nodraw:
ph.draw()
| 39.15566 | 113 | 0.563787 | 1,035 | 8,301 | 4.393237 | 0.201932 | 0.037387 | 0.024632 | 0.021553 | 0.554651 | 0.506928 | 0.490653 | 0.451067 | 0.444469 | 0.362657 | 0 | 0.010508 | 0.312131 | 8,301 | 211 | 114 | 39.341232 | 0.785814 | 0.292013 | 0 | 0.435115 | 0 | 0 | 0.061973 | 0 | 0 | 0 | 0 | 0.014218 | 0 | 1 | 0.068702 | false | 0.022901 | 0.10687 | 0 | 0.183206 | 0.007634 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91da549f96f9ccca48e20a796a48546be83febae | 206 | py | Python | exercises/ja/exc_03_16_01.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 2,085 | 2019-04-17T13:10:40.000Z | 2022-03-30T21:51:46.000Z | exercises/ja/exc_03_16_01.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 79 | 2019-04-18T14:42:55.000Z | 2022-03-07T08:15:43.000Z | exercises/ja/exc_03_16_01.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 361 | 2019-04-17T13:34:32.000Z | 2022-03-28T04:42:45.000Z | import spacy
nlp = spacy.load("ja_core_news_sm")
text = (
"チックフィレイはジョージア州カレッジパークに本社を置く、"
"チキンサンドを専門とするアメリカのファストフードレストランチェーンです。"
)
# トークナイズのみ行う
doc = nlp(text)
print([token.text for token in doc])
| 17.166667 | 42 | 0.73301 | 23 | 206 | 6.434783 | 0.73913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.150485 | 206 | 11 | 43 | 18.727273 | 0.845714 | 0.048544 | 0 | 0 | 0 | 0 | 0.407216 | 0.329897 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.125 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91db99963a9d2cafd0fa8e863ed2ec3e7df55f3e | 1,320 | py | Python | opendatatools/common/ui_util.py | harveywwu/OpenData | cf421465dd9b11fdbb2fbf4d00512e3aaf09d070 | [
"Apache-2.0"
] | null | null | null | opendatatools/common/ui_util.py | harveywwu/OpenData | cf421465dd9b11fdbb2fbf4d00512e3aaf09d070 | [
"Apache-2.0"
] | null | null | null | opendatatools/common/ui_util.py | harveywwu/OpenData | cf421465dd9b11fdbb2fbf4d00512e3aaf09d070 | [
"Apache-2.0"
] | 1 | 2020-05-29T00:26:59.000Z | 2020-05-29T00:26:59.000Z | # -*- coding: UTF-8 -*-
import sys, time
class ShowProcess():
"""
显示处理进度的类
调用该类相关函数即可实现处理进度的显示
"""
i = 0 # 当前的处理进度
max_steps = 0 # 总共需要处理的次数
max_arrow = 50 #进度条的长度
infoDone = 'done'
# 初始化函数,需要知道总共的处理次数
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
# 显示函数,根据当前的处理进度i显示进度
# 效果为[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>]100.00%
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps) #计算显示多少个'>'
num_line = self.max_arrow - num_arrow #计算显示多少个'-'
percent = self.i * 100.0 / self.max_steps #计算完成进度,格式为xx.xx%
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r' #带输出的字符串,'\r'表示不换行回到最左边
print process_bar #打印字符到终端
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
if __name__=='__main__':
max_steps = 100
process_bar = ShowProcess(max_steps, 'OK')
for i in range(max_steps):
process_bar.show_process()
time.sleep(0.1) | 26.938776 | 77 | 0.526515 | 157 | 1,320 | 4.197452 | 0.394904 | 0.121396 | 0.091047 | 0.036419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025054 | 0.304545 | 1,320 | 49 | 78 | 26.938776 | 0.69281 | 0.159848 | 0 | 0.060606 | 0 | 0 | 0.027645 | 0 | 0.030303 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.030303 | null | null | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91e4401665d568cd4d6102a4a69c6d2f7668744f | 602 | py | Python | backend/api/v1/dialogs/urls.py | donicrazy/ChatApp | ab129a9c0706bbb972cbce43283ba6e06d144635 | [
"MIT"
] | null | null | null | backend/api/v1/dialogs/urls.py | donicrazy/ChatApp | ab129a9c0706bbb972cbce43283ba6e06d144635 | [
"MIT"
] | 7 | 2021-03-19T04:47:13.000Z | 2022-01-13T02:02:46.000Z | backend/api/v1/dialogs/urls.py | donicrazy/ChatApp | ab129a9c0706bbb972cbce43283ba6e06d144635 | [
"MIT"
] | null | null | null | from django.urls import path
from backend.api.v1.dialogs.views import (
DialogListCreateView,
DialogRetrieveUpdateDestroyAPIView,
DialogMembershipListCreateView,
DialogMessageListCreateView,
DialogMessageRetrieveUpdateDestroyAPIView,
)
urlpatterns = [
path('', DialogListCreateView.as_view()),
path('<int:pk>', DialogRetrieveUpdateDestroyAPIView.as_view()),
path('membership/', DialogMembershipListCreateView.as_view()),
path('messages/', DialogMessageListCreateView.as_view()),
path('messages/<int:pk>', DialogMessageRetrieveUpdateDestroyAPIView.as_view()),
]
| 35.411765 | 83 | 0.770764 | 45 | 602 | 10.2 | 0.488889 | 0.065359 | 0.087146 | 0.078431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001876 | 0.114618 | 602 | 16 | 84 | 37.625 | 0.859287 | 0 | 0 | 0 | 0 | 0 | 0.074751 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91e4f118680c4b4128c740a76beaad48599ab626 | 848 | py | Python | datamart/tests/test_Dimension.py | josemrsantos/zoopla_datamart | f3a3af8071199deeb712d1814aecb6cc3cd88d57 | [
"MIT"
] | 1 | 2016-02-01T20:27:25.000Z | 2016-02-01T20:27:25.000Z | datamart/tests/test_Dimension.py | josemrsantos/zoopla_datamart | f3a3af8071199deeb712d1814aecb6cc3cd88d57 | [
"MIT"
] | null | null | null | datamart/tests/test_Dimension.py | josemrsantos/zoopla_datamart | f3a3af8071199deeb712d1814aecb6cc3cd88d57 | [
"MIT"
] | null | null | null | from ..datamart import *
def test_create_dimension():
dimension = Dimension("test_dimension")
assert dimension.is_degenerate == False
def test_create_dimension_insert_2_identical_lines():
''' with 2 identical lines, only one gets stored
'''
dimension = Dimension("test_dimension")
dimension.addDimensionLine('test')
dimension.addDimensionLine('test')
assert dimension.id_value == 1
assert len(list(dimension.values)) == 1
def test_create_dimension_insert_2_identical_lines_and_1_different():
''' with 2 identical lines and one different, only 2 get stored
'''
dimension = Dimension("test_dimension")
dimension.addDimensionLine('test')
dimension.addDimensionLine('test2')
dimension.addDimensionLine('test')
assert dimension.id_value == 2
assert len(list(dimension.values)) == 2
| 33.92 | 69 | 0.732311 | 99 | 848 | 6.040404 | 0.313131 | 0.180602 | 0.100334 | 0.110368 | 0.670569 | 0.576923 | 0.576923 | 0.448161 | 0.304348 | 0.304348 | 0 | 0.015493 | 0.162736 | 848 | 24 | 70 | 35.333333 | 0.826761 | 0.128538 | 0 | 0.411765 | 1 | 0 | 0.087379 | 0 | 0 | 0 | 0 | 0 | 0.294118 | 1 | 0.176471 | false | 0 | 0.058824 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91e82476dc55d0591c20d0a5e9975a53641bca72 | 6,711 | py | Python | examples/Word2Vec_AverageVectorsTuto.py | noiseux1523/Deep-Belief-Network | 6eb364a85fb128a33c539e5e414ef451f24e499d | [
"MIT"
] | 1 | 2019-08-20T12:13:34.000Z | 2019-08-20T12:13:34.000Z | examples/Word2Vec_AverageVectorsTuto.py | noiseux1523/Deep-Belief-Network | 6eb364a85fb128a33c539e5e414ef451f24e499d | [
"MIT"
] | null | null | null | examples/Word2Vec_AverageVectorsTuto.py | noiseux1523/Deep-Belief-Network | 6eb364a85fb128a33c539e5e414ef451f24e499d | [
"MIT"
] | null | null | null | # Author: Angela Chapman
# Date: 8/6/2014
#
# This file contains code to accompany the Kaggle tutorial
# "Deep learning goes to the movies". The code in this file
# is for Parts 2 and 3 of the tutorial, which cover how to
# train a model using Word2Vec.
#
# *************************************** #
# ****** Read the two training sets and the test set
#
import pandas as pd
import os
from nltk.corpus import stopwords
import nltk.data
import logging
import numpy as np # Make sure that numpy is imported
from gensim.models import Word2Vec
from sklearn.ensemble import RandomForestClassifier
from KaggleWord2VecUtility import KaggleWord2VecUtility
# ****** Define functions to create average word vectors
#
def makeFeatureVec(words, model, num_features):
# Function to average all of the word vectors in a given
# paragraph
#
# Pre-initialize an empty numpy array (for speed)
featureVec = np.zeros((num_features,), dtype="float32")
#
nwords = 0.
#
# Index2word is a list that contains the names of the words in
# the model's vocabulary. Convert it to a set, for speed
index2word_set = set(model.wv.index2word)
#
# Loop over each word in the review and, if it is in the model's
# vocaublary, add its feature vector to the total
for word in words:
if word in index2word_set:
nwords = nwords + 1.
featureVec = np.add(featureVec, model[word])
#
# Divide the result by the number of words to get the average
featureVec = np.divide(featureVec, nwords)
return featureVec
def getAvgFeatureVecs(reviews, model, num_features):
# Given a set of reviews (each one a list of words), calculate
# the average feature vector for each one and return a 2D numpy array
#
# Initialize a counter
counter = 0.
#
# Preallocate a 2D numpy array, for speed
reviewFeatureVecs = np.zeros((len(reviews), num_features), dtype="float32")
#
# Loop through the reviews
for review in reviews:
#
# Print a status message every 1000th review
if counter % 1000. == 0.:
print "Review %d of %d" % (counter, len(reviews))
#
# Call the function (defined above) that makes average feature vectors
reviewFeatureVecs[int(counter)] = makeFeatureVec(review, model, \
num_features)
#
# Increment the counter
counter = counter + 1.
return reviewFeatureVecs
def getCleanReviews(reviews):
clean_reviews = []
for review in reviews["review"]:
clean_reviews.append(KaggleWord2VecUtility.review_to_wordlist(review, remove_stopwords=True))
return clean_reviews
if __name__ == '__main__':
# Read data from files
train = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'labeledTrainData.tsv'), header=0,
delimiter="\t", quoting=3)
test = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'testData.tsv'), header=0, delimiter="\t",
quoting=3)
unlabeled_train = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', "unlabeledTrainData.tsv"), header=0,
delimiter="\t", quoting=3)
# Verify the number of reviews that were read (100,000 in total)
print "Read %d labeled train reviews, %d labeled test reviews, " \
"and %d unlabeled reviews\n" % (train["review"].size,
test["review"].size, unlabeled_train["review"].size)
# Load the punkt tokenizer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# ****** Split the labeled and unlabeled training sets into clean sentences
#
sentences = [] # Initialize an empty list of sentences
print "Parsing sentences from training set"
for review in train["review"]:
sentences += KaggleWord2VecUtility.review_to_sentences(review, tokenizer)
print "Parsing sentences from unlabeled set"
for review in unlabeled_train["review"]:
sentences += KaggleWord2VecUtility.review_to_sentences(review, tokenizer)
# ****** Set parameters and train the word2vec model
#
# Import the built-in logging module and configure it so that Word2Vec
# creates nice output messages
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', \
level=logging.INFO)
# Set values for various parameters
num_features = 300 # Word vector dimensionality
min_word_count = 40 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model (this will take some time)
print "Training Word2Vec model..."
model = Word2Vec(sentences, workers=num_workers, \
size=num_features, min_count=min_word_count, \
window=context, sample=downsampling, seed=1)
# If you don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)
# It can be helpful to create a meaningful model name and
# save the model for later use. You can load it later using Word2Vec.load()
model_name = "300features_40minwords_10context"
model.save(model_name)
model.doesnt_match("man woman child kitchen".split())
model.doesnt_match("france england germany berlin".split())
model.doesnt_match("paris berlin london austria".split())
model.most_similar("man")
model.most_similar("queen")
model.most_similar("awful")
# ****** Create average vectors for the training and test sets
#
print "Creating average feature vecs for training reviews"
trainDataVecs = getAvgFeatureVecs(getCleanReviews(train), model, num_features)
print "Creating average feature vecs for test reviews"
testDataVecs = getAvgFeatureVecs(getCleanReviews(test), model, num_features)
# ****** Fit a random forest to the training set, then make predictions
#
# Fit a random forest to the training data, using 100 trees
forest = RandomForestClassifier(n_estimators=100)
print "Fitting a random forest to labeled training data..."
forest = forest.fit(trainDataVecs, train["sentiment"])
# Test & extract results
result = forest.predict(testDataVecs)
# Write the test results
output = pd.DataFrame(data={"id": test["id"], "sentiment": result})
output.to_csv("Word2Vec_AverageVectors.csv", index=False, quoting=3)
print "Wrote Word2Vec_AverageVectors.csv" | 37.915254 | 118 | 0.670094 | 848 | 6,711 | 5.220519 | 0.336085 | 0.022363 | 0.018071 | 0.007454 | 0.121075 | 0.109781 | 0.094421 | 0.062345 | 0.062345 | 0.029365 | 0 | 0.016003 | 0.236477 | 6,711 | 177 | 119 | 37.915254 | 0.84797 | 0.336463 | 0 | 0.048193 | 0 | 0 | 0.170927 | 0.031721 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.108434 | null | null | 0.108434 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
91eebd9cfe8ecc166ed16501e2c6d724f724535d | 4,110 | py | Python | theory/model/form.py | ralfonso/theory | 41684969313cfc545d74b306e409fd5bf21387b3 | [
"MIT"
] | 4 | 2015-07-03T19:53:59.000Z | 2016-04-25T03:03:56.000Z | theory/model/form.py | ralfonso/theory | 41684969313cfc545d74b306e409fd5bf21387b3 | [
"MIT"
] | null | null | null | theory/model/form.py | ralfonso/theory | 41684969313cfc545d74b306e409fd5bf21387b3 | [
"MIT"
] | 2 | 2020-03-29T22:02:29.000Z | 2021-07-13T07:17:19.000Z | import formencode
import pylons
from pylons import app_globals as g
class OutputSchema(formencode.Schema):
allow_extra_fields = False
enabled = formencode.validators.Int()
class ConfigForm(formencode.Schema):
allow_extra_fields = True
filter_extra_fields = True
#pre_validators = [formencode.NestedVariables()]
action = formencode.validators.String(not_empty=False,if_missing=None)
cancel = formencode.validators.String(not_empty=False,if_missing=None)
firsttime = formencode.validators.Int(not_empty=False, if_missing=0)
server = formencode.validators.String(strip=True,not_empty=True,messages={'empty':'please enter a server host name'})
port = formencode.validators.Int(strip=True,not_empty=True,messages={'empty':'please enter a port, MPD default is 6600',
'integer':'please enter an integer value for port, MPD default is 6600'
})
password = formencode.validators.String(not_empty=False,if_missing=None)
webpassword = formencode.validators.String(not_empty=False,if_missing=None)
timeout = formencode.validators.Bool()
default_search = formencode.validators.String(not_empty=True)
awskey = formencode.validators.String(strip=True,not_empty=False,if_missing=None)
aws_secret = formencode.validators.String(strip=True,not_empty=False,if_missing=None)
outputs = formencode.ForEach(OutputSchema(), if_missing=[])
class StreamNameInUse(formencode.validators.FancyValidator):
def validate_python(self, values, state):
# if old name is set, don't do this check
if values['oldname']:
return
if values['name'] in [name[0] for name in g.tc.streams]:
raise formencode.Invalid({'stream_name_taken':"that stream name has already been used"}, values, state)
class StreamForm(formencode.Schema):
allow_extra_fields = False
name = formencode.validators.String(not_empty=True,strip=True,messages={'empty':'please enter a name for this stream'})
url = formencode.validators.URL(not_empty=True,require_tld=False,strip=True,check_exists=False,messages={'empty':'please enter a URL'})
oldname = formencode.validators.String(not_empty=False)
chained_validators = [StreamNameInUse()]
class State(object):
"""Trivial class to be used as State objects to transport information to formencode validators"""
def __init__(self, **kw):
for key in kw:
setattr(self, key, kw[key])
def __repr__(self):
atts = []
for key in self.__dict__:
atts.append( (key, getattr(self, key)) )
return self.__class__.__name__ + '(' + ', '.join(x[0] + '=' + repr(x[1]) for x in atts) + ')'
def validate_custom(schema, **state_kwargs):
"""Validate a formencode schema.
Works similar to the @validate decorator. On success return a dictionary
of parameters from request.params. On failure throws a formencode.Invalid
exception."""
# Create a state object if requested
if state_kwargs:
state = State(**state_kwargs)
else:
state = None
# In case of validation errors an exception is thrown. This needs to
# be caught elsewhere.
if state_kwargs.get('variable_decode', False):
params = formencode.variabledecode.variable_decode(pylons.request.params)
print pylons.request.params
print params
else:
params = pylons.request.params
return schema.to_python(params, state)
def htmlfill(html, exception_error=None):
"""Add formencode error messages to an HTML string.
'html' contains the HTML page with the form (e.g. created with render()).
'exception_error' is the formencode.Invalid-Exception from formencode."""
return formencode.htmlfill.render(
form=html,
defaults=pylons.request.params,
errors=(exception_error and exception_error.unpack_errors()),
encoding=pylons.response.determine_charset()
)
| 42.8125 | 139 | 0.682968 | 503 | 4,110 | 5.435388 | 0.324056 | 0.12436 | 0.095099 | 0.07425 | 0.287125 | 0.243599 | 0.163862 | 0.154353 | 0.154353 | 0.078274 | 0 | 0.003728 | 0.216788 | 4,110 | 95 | 140 | 43.263158 | 0.845604 | 0.051095 | 0 | 0.063492 | 0 | 0 | 0.087419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.031746 | 0.047619 | null | null | 0.031746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
37d29492156d47c44672b00f04cedb7fbbdcf78e | 5,880 | py | Python | networks/mobilenet.py | softsys4ai/FlexiBO | 1406d67e5bd14d6b7210e724e6b239889f210db6 | [
"MIT"
] | 8 | 2020-06-23T07:05:18.000Z | 2021-10-24T02:38:14.000Z | networks/mobilenet.py | softsys4ai/FlexiBO | 1406d67e5bd14d6b7210e724e6b239889f210db6 | [
"MIT"
] | null | null | null | networks/mobilenet.py | softsys4ai/FlexiBO | 1406d67e5bd14d6b7210e724e6b239889f210db6 | [
"MIT"
] | 3 | 2020-01-06T10:49:12.000Z | 2020-04-20T03:26:33.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MobileNet 224 (2017)
# Paper: https://arxiv.org/pdf/1704.04861.pdf
import os
import tensorflow as tf
from tensorflow.keras import layers, Input, Model
def stem(inputs, alpha, n_filters,
filter_size):
""" Construct the stem group
inputs : input tensor
alpha : width multiplier
"""
# Convolutional block
x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(inputs)
x = layers.Conv2D(n_filters, (filter_size, filter_size), strides=(2, 2), padding='valid')(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
# Depthwise Separable Convolution Block
x = depthwise_block(x, 64, alpha, (1, 1))
return x
def classifier(x, alpha, dropout, n_classes):
""" Construct the classifier group
x : input to the classifier
alpha : width multiplier
dropout : dropout percentage
n_classes : number of output classes
"""
# Flatten the feature maps into 1D feature maps (?, N)
x = layers.GlobalAveragePooling2D()(x)
# Reshape the feature maps to (?, 1, 1, 1024)
shape = (1, 1, int(1024 * alpha))
x = layers.Reshape(shape)(x)
# Perform dropout for preventing overfitting
x = layers.Dropout(dropout)(x)
# Use convolution for classifying (emulates a fully connected layer)
x = layers.Conv2D(n_classes, (1, 1), padding='same')(x)
x = layers.Activation('softmax')(x)
# Reshape the resulting output to 1D vector of number of classes
x = layers.Reshape((n_classes, ))(x)
return x
def depthwise_block(x, n_filters, alpha, strides):
""" Construct a Depthwise Separable Convolution block
x : input to the block
n_filters : number of filters
alpha : width multiplier
strides : strides
"""
# Apply the width filter to the number of feature maps
filters = int(n_filters * alpha)
# Strided convolution to match number of filters
if strides == (2, 2):
x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(x)
padding = 'valid'
else:
padding = 'same'
# Depthwise Convolution
x = layers.DepthwiseConv2D((3, 3), strides, padding=padding)(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
# Pointwise Convolution
x = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
return x
def get_configurable_hyperparams():
"""This function is used to ge the configurable hyperparameters
"""
import yaml
with open("cur_config.yaml") as fp:
cur_cfg=yaml.load(fp)
return (cur_cfg["cur_conf"][0], cur_cfg["cur_conf"][1], cur_cfg["cur_conf"][2],
cur_cfg["cur_conf"][3], cur_cfg["cur_conf"][4])
def get_data():
"""This function is used to get train and test data
"""
from tensorflow.keras.datasets import cifar10
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = (x_train / 255.0).astype(np.float32)
x_test = (x_test / 255.0).astype(np.float32)
return x_train, y_train, x_test, y_test
if __name__=="__main__":
# get configurable hyperparams
(stem_n_filters,
stem_filter_size
depthwise_block1_n_filters,
depthwise_block2_n_filters,
depthwise_block3_n_filters,
depthwise_block4_n_filters,)=get_configurable_hyperparams()
alpha = 1 # width multiplier
dropout = 0.5 # dropout percentage
n_classes = 1000 # number of classes
inputs = Input(shape=(224, 224, 3))
# Create the stem group
x = stem(inputs, alpha, stem_n_filters,
stem_filter_size)
# First Depth wise Separable Convolution Group
# Strided convolution - feature map size reduction
x = depthwise_block(x, depthwise_block1_n_filters, alpha, strides=(2, 2))
x = depthwise_block(x, depthwise_block1_n_filters, alpha, strides=(1, 1))
# Second Depthwise Separable Convolution Group
# Strided convolution - feature map size reduction
x = depthwise_block(x, depthwise_block2_n_filters, alpha, strides=(2, 2))
x = depthwise_block(x, depthwise_block2_n_filters, alpha, strides=(1, 1))
# Third Depthwise Separable Convolution Group
# Strided convolution - feature map size reduction
x = depthwise_block(x, depthwise_block3_n_filters, alpha, strides=(2, 2))
for _ in range(5):
x = depthwise_block(x, depthwise_block3_n_filters, alpha, strides=(1, 1))
# Fourth Depthwise Separable Convolution Group
# Strided convolution - feature map size reduction
x = depthwise_block(x, depthwise_block4_n_filters, alpha, strides=(2, 2))
x = depthwise_block(x, depthwise_block4_n_filters, alpha, strides=(1, 1))
# Create the classifier
outputs = classifier(x, alpha, dropout, n_classes)
# Instantiate the Model
model = Model(inputs, outputs)
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.summary()
xtrain, ytrain, x_test, y_test=get_data()
# train model
model.fit(x_train, y_train, epochs=10,
batch_size=32, validation_split=0.1, verbose=1)
# save model
fmodel=os.path.join(os.getcwd(),"model.h5")
model.save(fmodel)
| 35.209581 | 96 | 0.671769 | 799 | 5,880 | 4.798498 | 0.274093 | 0.039645 | 0.039124 | 0.037559 | 0.321857 | 0.283255 | 0.250913 | 0.242045 | 0.214398 | 0.183881 | 0 | 0.030847 | 0.222619 | 5,880 | 166 | 97 | 35.421687 | 0.807919 | 0.277551 | 0 | 0.115385 | 0 | 0 | 0.038731 | 0.008701 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.076923 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
37d2de39d6a42eafed34788e36c34749e153b301 | 500 | py | Python | info.py | altfool/mri_face_detection | 3117f7f00c98efe2260936146ce6b5454b059672 | [
"MIT"
] | 1 | 2021-11-13T02:42:49.000Z | 2021-11-13T02:42:49.000Z | info.py | altfool/mri_face_detection | 3117f7f00c98efe2260936146ce6b5454b059672 | [
"MIT"
] | null | null | null | info.py | altfool/mri_face_detection | 3117f7f00c98efe2260936146ce6b5454b059672 | [
"MIT"
] | null | null | null | import numpy as np
img_dtype = np.float32
imgX, imgY, imgZ = (256, 256, 150)
imgs_path_withfaces = '../dataset/withfaces'
imgs_path_nofaces = '../dataset/nofaces'
imgX_dwt1, imgY_dwt1, imgZ_dwt1 = (128, 128, 75)
imgs_path_withfaces_dwt = './dataset/withfaces'
imgs_path_nofaces_dwt = './dataset/nofaces'
dwt_flag = (True, False)[0]
if dwt_flag:
imgX, imgY, imgZ = imgX_dwt1, imgY_dwt1, imgZ_dwt1
imgs_path_withfaces = imgs_path_withfaces_dwt
imgs_path_nofaces = imgs_path_nofaces_dwt
| 27.777778 | 54 | 0.752 | 76 | 500 | 4.565789 | 0.342105 | 0.184438 | 0.195965 | 0.138329 | 0.317003 | 0.138329 | 0 | 0 | 0 | 0 | 0 | 0.060185 | 0.136 | 500 | 17 | 55 | 29.411765 | 0.743056 | 0 | 0 | 0 | 0 | 0 | 0.148 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
37d53dc9e4eafc3370db20f7342e6ffdb10aeb9f | 24,609 | py | Python | src/pretalx/orga/urls.py | martinheidegger/pretalx | d812e665c1c5ce29df3eafc1985af08e4d986fef | [
"Apache-2.0"
] | null | null | null | src/pretalx/orga/urls.py | martinheidegger/pretalx | d812e665c1c5ce29df3eafc1985af08e4d986fef | [
"Apache-2.0"
] | null | null | null | src/pretalx/orga/urls.py | martinheidegger/pretalx | d812e665c1c5ce29df3eafc1985af08e4d986fef | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import include, url
from django.views.generic.base import RedirectView
from pretalx.event.models.event import SLUG_CHARS
from pretalx.orga.views import cards
from .views import (
admin,
auth,
cfp,
dashboard,
event,
mails,
organiser,
person,
plugins,
review,
schedule,
speaker,
submission,
)
app_name = "orga"
urlpatterns = [
url("^login/$", auth.LoginView.as_view(), name="login"),
url("^logout/$", auth.logout_view, name="logout"),
url("^reset/$", auth.ResetView.as_view(), name="auth.reset"),
url(r"^reset/(?P<token>\w+)$", auth.RecoverView.as_view(), name="auth.recover"),
url("^$", RedirectView.as_view(url="event", permanent=False)),
url("^admin/$", admin.AdminDashboard.as_view(), name="admin.dashboard"),
url("^admin/update/$", admin.UpdateCheckView.as_view(), name="admin.update"),
url("^me$", event.UserSettings.as_view(), name="user.view"),
url("^me/subuser$", person.SubuserView.as_view(), name="user.subuser"),
url(
r"^invitation/(?P<code>\w+)$",
event.InvitationView.as_view(),
name="invitation.view",
),
url(
"^organiser/$",
dashboard.DashboardOrganiserListView.as_view(),
name="organiser.list",
),
url(
"^organiser/new$", organiser.OrganiserDetail.as_view(), name="organiser.create"
),
url(
f"^organiser/(?P<organiser>[{SLUG_CHARS}]+)/",
include(
[
url("^$", organiser.OrganiserDetail.as_view(), name="organiser.view"),
url(
"^delete$",
organiser.OrganiserDelete.as_view(),
name="organiser.delete",
),
url("^teams/$", organiser.TeamDetail.as_view(), name="organiser.teams"),
url(
"^teams/new$",
organiser.TeamDetail.as_view(),
name="organiser.teams.create",
),
url(
"^teams/(?P<pk>[0-9]+)/$",
organiser.TeamDetail.as_view(),
name="organiser.teams.view",
),
url(
"^teams/(?P<pk>[0-9]+)/delete$",
organiser.TeamDelete.as_view(),
name="organiser.teams.delete",
),
url(
"^teams/(?P<pk>[0-9]+)/tracks$",
organiser.TeamTracks.as_view(),
name="organiser.teams.tracks",
),
url(
"^teams/(?P<pk>[0-9]+)/delete/(?P<user_pk>[0-9]+)$",
organiser.TeamDelete.as_view(),
name="organiser.teams.delete_member",
),
url(
"^teams/(?P<pk>[0-9]+)/reset/(?P<user_pk>[0-9]+)$",
organiser.TeamResetPassword.as_view(),
name="organiser.team.password_reset",
),
url(
"^teams/(?P<pk>[0-9]+)/uninvite$",
organiser.TeamUninvite.as_view(),
name="organiser.teams.uninvite",
),
url(
"^teams/(?P<pk>[0-9]+)/resend$",
organiser.TeamResend.as_view(),
name="organiser.teams.resend",
),
]
),
),
url("^event/new/$", event.EventWizard.as_view(), name="event.create"),
url("^event/typeahead/$", event.event_list, name="event.typeahead"),
url("^event/$", dashboard.DashboardEventListView.as_view(), name="event.list"),
url(
f"^event/(?P<event>[{SLUG_CHARS}]+)/",
include(
[
url(
"^$", dashboard.EventDashboardView.as_view(), name="event.dashboard"
),
url("^login/$", auth.LoginView.as_view(), name="event.login"),
url("^reset/$", auth.ResetView.as_view(), name="event.auth.reset"),
url(
r"^reset/(?P<token>\w+)$",
auth.RecoverView.as_view(),
name="event.auth.recover",
),
url("^delete$", event.EventDelete.as_view(), name="event.delete"),
url("^live$", event.EventLive.as_view(), name="event.live"),
url("^api/users$", person.UserList.as_view(), name="event.user_list"),
url(
"^cfp/$",
RedirectView.as_view(pattern_name="orga:cfp.text.view"),
name="cfp",
),
url("^cfp/flow/$", cfp.CfPFlowEditor.as_view(), name="cfp.flow"),
url(
"^cfp/questions/$",
cfp.CfPQuestionList.as_view(),
name="cfp.questions.view",
),
url(
"^cfp/questions/new$",
cfp.CfPQuestionDetail.as_view(),
name="cfp.questions.create",
),
url(
"^cfp/questions/remind$",
cfp.CfPQuestionRemind.as_view(),
name="cfp.questions.remind",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/$",
cfp.CfPQuestionDetail.as_view(),
name="cfp.question.view",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/up$",
cfp.question_move_up,
name="cfp.questions.up",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/down$",
cfp.question_move_down,
name="cfp.questions.down",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/delete$",
cfp.CfPQuestionDelete.as_view(),
name="cfp.question.delete",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/edit$",
cfp.CfPQuestionDetail.as_view(),
name="cfp.question.edit",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/toggle$",
cfp.CfPQuestionToggle.as_view(),
name="cfp.question.toggle",
),
url("^cfp/text$", cfp.CfPTextDetail.as_view(), name="cfp.text.view"),
url(
"^cfp/types/$",
cfp.SubmissionTypeList.as_view(),
name="cfp.types.view",
),
url(
"^cfp/types/new$",
cfp.SubmissionTypeDetail.as_view(),
name="cfp.types.create",
),
url(
"^cfp/types/(?P<pk>[0-9]+)/$",
cfp.SubmissionTypeDetail.as_view(),
name="cfp.type.view",
),
url(
"^cfp/types/(?P<pk>[0-9]+)/delete$",
cfp.SubmissionTypeDelete.as_view(),
name="cfp.type.delete",
),
url(
"^cfp/types/(?P<pk>[0-9]+)/default$",
cfp.SubmissionTypeDefault.as_view(),
name="cfp.type.default",
),
url("^cfp/tracks/$", cfp.TrackList.as_view(), name="cfp.tracks.view"),
url(
"^cfp/tracks/new$",
cfp.TrackDetail.as_view(),
name="cfp.track.create",
),
url(
"^cfp/tracks/(?P<pk>[0-9]+)/$",
cfp.TrackDetail.as_view(),
name="cfp.track.view",
),
url(
"^cfp/tracks/(?P<pk>[0-9]+)/delete$",
cfp.TrackDelete.as_view(),
name="cfp.track.delete",
),
url(
"^cfp/access-codes/$",
cfp.AccessCodeList.as_view(),
name="cfp.access_code.view",
),
url(
"^cfp/access-codes/new$",
cfp.AccessCodeDetail.as_view(),
name="cfp.access_code.create",
),
url(
"^cfp/access-codes/(?P<code>[A-z0-9]+)/$",
cfp.AccessCodeDetail.as_view(),
name="cfp.access_code.view",
),
url(
"^cfp/access-codes/(?P<code>[A-z0-9]+)/send$",
cfp.AccessCodeSend.as_view(),
name="cfp.access_code.send",
),
url(
"^cfp/access-codes/(?P<code>[A-z0-9]+)/delete$",
cfp.AccessCodeDelete.as_view(),
name="cfp.access_code.delete",
),
url(
"^mails/",
include(
[
url(
"^(?P<pk>[0-9]+)/$",
mails.MailDetail.as_view(),
name="mails.outbox.mail.view",
),
url(
"^(?P<pk>[0-9]+)/copy$",
mails.MailCopy.as_view(),
name="mails.outbox.mail.copy",
),
url(
"^(?P<pk>[0-9]+)/delete$",
mails.OutboxPurge.as_view(),
name="mails.outbox.mail.delete",
),
url(
"^(?P<pk>[0-9]+)/send$",
mails.OutboxSend.as_view(),
name="mails.outbox.mail.send",
),
url(
"^templates/$",
mails.TemplateList.as_view(),
name="mails.templates.list",
),
url(
"^templates/new$",
mails.TemplateDetail.as_view(),
name="mails.templates.create",
),
url(
"^templates/(?P<pk>[0-9]+)/$",
mails.TemplateDetail.as_view(),
name="mails.templates.view",
),
url(
"^templates/(?P<pk>[0-9]+)/delete$",
mails.TemplateDelete.as_view(),
name="mails.templates.delete",
),
url(
"^compose$",
mails.ComposeMail.as_view(),
name="mails.compose",
),
url("^sent$", mails.SentMail.as_view(), name="mails.sent"),
url(
"^outbox/$",
mails.OutboxList.as_view(),
name="mails.outbox.list",
),
url(
"^outbox/send$",
mails.OutboxSend.as_view(),
name="mails.outbox.send",
),
url(
"^outbox/purge$",
mails.OutboxPurge.as_view(),
name="mails.outbox.purge",
),
]
),
),
url(
"^submissions/$",
submission.SubmissionList.as_view(),
name="submissions.list",
),
url(
"^submissions/new$",
submission.SubmissionContent.as_view(),
name="submissions.create",
),
url(
"^submissions/cards/$",
cards.SubmissionCards.as_view(),
name="submissions.cards",
),
url(
"^submissions/feed/$",
submission.SubmissionFeed(),
name="submissions.feed",
),
url(
"^submissions/statistics/$",
submission.SubmissionStats.as_view(),
name="submissions.statistics",
),
url(
"^submissions/feedback/$",
submission.AllFeedbacksList.as_view(),
name="submissions.feedback",
),
url(
r"^submissions/(?P<code>[\w-]+)/",
include(
[
url(
"^$",
submission.SubmissionContent.as_view(),
name="submissions.content.view",
),
url(
"^submit$",
submission.SubmissionStateChange.as_view(),
name="submissions.submit",
),
url(
"^accept$",
submission.SubmissionStateChange.as_view(),
name="submissions.accept",
),
url(
"^reject$",
submission.SubmissionStateChange.as_view(),
name="submissions.reject",
),
url(
"^confirm",
submission.SubmissionStateChange.as_view(),
name="submissions.confirm",
),
url(
"^withdraw$",
submission.SubmissionStateChange.as_view(),
name="submissions.withdraw",
),
url(
"^delete",
submission.SubmissionStateChange.as_view(),
name="submissions.delete",
),
url(
"^cancel",
submission.SubmissionStateChange.as_view(),
name="submissions.cancel",
),
url(
"^speakers/$",
submission.SubmissionSpeakers.as_view(),
name="submissions.speakers.view",
),
url(
"^speakers/add$",
submission.SubmissionSpeakersAdd.as_view(),
name="submissions.speakers.add",
),
url(
"^speakers/delete$",
submission.SubmissionSpeakersDelete.as_view(),
name="submissions.speakers.delete",
),
url(
"^reviews/$",
review.ReviewSubmission.as_view(),
name="submissions.reviews",
),
url(
"^reviews/delete$",
review.ReviewSubmissionDelete.as_view(),
name="submissions.reviews.submission.delete",
),
url(
"^feedback/$",
submission.FeedbackList.as_view(),
name="submissions.feedback.list",
),
url(
"^toggle_featured$",
submission.ToggleFeatured.as_view(),
name="submissions.toggle_featured",
),
url(
"^anonymise/$",
submission.Anonymise.as_view(),
name="submissions.anonymise",
),
]
),
),
url("^speakers/$", speaker.SpeakerList.as_view(), name="speakers.list"),
url(
"^speakers/(?P<pk>[0-9]+)/$",
speaker.SpeakerDetail.as_view(),
name="speakers.view",
),
url(
"^speakers/(?P<pk>[0-9]+)/reset$",
speaker.SpeakerPasswordReset.as_view(),
name="speakers.reset",
),
url(
"^speakers/(?P<pk>[0-9]+)/toggle-arrived$",
speaker.SpeakerToggleArrived.as_view(),
name="speakers.arrived",
),
url(
"^info/$",
speaker.InformationList.as_view(),
name="speakers.information.list",
),
url(
"^info/new$",
speaker.InformationDetail.as_view(),
name="speakers.information.create",
),
url(
"^info/(?P<pk>[0-9]+)/$",
speaker.InformationDetail.as_view(),
name="speakers.information.view",
),
url(
"^info/(?P<pk>[0-9]+)/delete$",
speaker.InformationDelete.as_view(),
name="speakers.information.delete",
),
url(
"^reviews/$",
review.ReviewDashboard.as_view(),
name="reviews.dashboard",
),
url(
"^reviews/regenerate/$",
review.RegenerateDecisionMails.as_view(),
name="reviews.regenerate",
),
url(
"^settings/$",
event.EventDetail.as_view(),
name="settings.event.view",
),
url(
"^settings/mail$",
event.EventMailSettings.as_view(),
name="settings.mail.view",
),
url(
"^settings/plugins$",
plugins.EventPluginsView.as_view(),
name="settings.plugins.select",
),
url(
"^settings/widget$",
event.WidgetSettings.as_view(),
name="settings.widget",
),
url(
"^settings/review/$",
event.EventReviewSettings.as_view(),
name="settings.review",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/up$",
event.phase_move_up,
name="settings.review.phase.up",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/down$",
event.phase_move_down,
name="settings.review.phase.down",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/delete$",
event.PhaseDelete.as_view(),
name="settings.review.phasedelete",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/activate$",
event.PhaseActivate.as_view(),
name="settings.review.phasedelete",
),
url(
"^schedule/$", schedule.ScheduleView.as_view(), name="schedule.main"
),
url(
"^schedule/export/$",
schedule.ScheduleExportView.as_view(),
name="schedule.export",
),
url(
"^schedule/export/trigger$",
schedule.ScheduleExportTriggerView.as_view(),
name="schedule.export.trigger",
),
url(
"^schedule/export/download$",
schedule.ScheduleExportDownloadView.as_view(),
name="schedule.export.download",
),
url(
"^schedule/release$",
schedule.ScheduleReleaseView.as_view(),
name="schedule.release",
),
url(
r"^schedule/quick/(?P<code>\w+)/$",
schedule.QuickScheduleView.as_view(),
name="schedule.quick",
),
url(
"^schedule/reset$",
schedule.ScheduleResetView.as_view(),
name="schedule.reset",
),
url(
"^schedule/toggle$",
schedule.ScheduleToggleView.as_view(),
name="schedule.toggle",
),
url(
"^schedule/resend_mails$",
schedule.ScheduleResendMailsView.as_view(),
name="schedule.resend_mails",
),
url(
"^schedule/rooms/$",
schedule.RoomList.as_view(),
name="schedule.rooms.list",
),
url(
"^schedule/rooms/new$",
schedule.RoomDetail.as_view(),
name="schedule.rooms.create",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/$",
schedule.RoomDetail.as_view(),
name="schedule.rooms.view",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/delete$",
schedule.RoomDelete.as_view(),
name="schedule.rooms.delete",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/up$",
schedule.room_move_up,
name="schedule.rooms.up",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/down$",
schedule.room_move_down,
name="schedule.rooms.down",
),
url(
"^schedule/api/talks/$",
schedule.TalkList.as_view(),
name="schedule.api.talks",
),
url(
"^schedule/api/talks/(?P<pk>[0-9]+)/$",
schedule.TalkUpdate.as_view(),
name="schedule.api.update",
),
url(
"^schedule/api/availabilities/(?P<talkid>[0-9]+)/(?P<roomid>[0-9]+)/$",
schedule.RoomTalkAvailabilities.as_view(),
name="schedule.api.availabilities",
),
]
),
),
]
| 40.54201 | 91 | 0.358771 | 1,657 | 24,609 | 5.234762 | 0.138805 | 0.08439 | 0.138344 | 0.021905 | 0.420221 | 0.316117 | 0.200599 | 0.068711 | 0.028706 | 0.021674 | 0 | 0.007509 | 0.512983 | 24,609 | 606 | 92 | 40.608911 | 0.716229 | 0 | 0 | 0.444444 | 0 | 0.004975 | 0.208745 | 0.113129 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.004975 | 0.008292 | 0 | 0.008292 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
37db93135f06b7cc7a06b9ea9f0839b0af335d54 | 6,889 | py | Python | src/ITN/srmg/core/RiemannianRight.py | Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection | 2e35afaa891badf5a235b5d995102e4dc8a4cf0d | [
"MIT"
] | 1 | 2022-03-24T06:54:36.000Z | 2022-03-24T06:54:36.000Z | src/ITN/srmg/core/RiemannianRight.py | Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection | 2e35afaa891badf5a235b5d995102e4dc8a4cf0d | [
"MIT"
] | null | null | null | src/ITN/srmg/core/RiemannianRight.py | Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection | 2e35afaa891badf5a235b5d995102e4dc8a4cf0d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
'''
Author: Shuangchi He / Yulv
Email: yulvchi@qq.com
Date: 2022-03-19 10:33:38
Motto: Entities should not be multiplied unnecessarily.
LastEditors: Shuangchi He
LastEditTime: 2022-03-23 00:52:55
FilePath: /Awesome-Ultrasound-Standard-Plane-Detection/src/ITN/srmg/core/RiemannianRight.py
Description: Modify here please
Init from https://github.com/yuanwei1989/plane-detection Author: Yuanwei Li (3 Oct 2018)
# Copyright (c) 2006-2017, Nina Milone, Bishesh Kanal, Benjamin Hou
# Copyright (c) 2006-2017, Imperial College of Science, Technology and Medicine
# Produced at Biomedical Image Analysis Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
Statistics on Riemannian Manifolds and Groups
---------------------------------------------
This is a set of codes to compare the computing of the different types of means on Lie groups.
These codes can be used to reproduce the experiments illustrated in the video developed for the
MICCAI Educational challenge 2014, available at: url of the video.
:Authors:
`Nina Miolane <website>`
`Bishesh Khanal <website>`
:Organization:
Asclepios Team, INRIA Sophia Antipolis.
:Version:
2017.07.05
Requirements
------------
* `Numpy 1.11 <http://www.numpy.org>`_
Notes
-----
----------
(1) Defining a mean on Lie group.
Nina Miolane. Medical Imaging. 2013. <hal-00938320>
'''
import numpy
import math
from srmg.common.group import *
from srmg.common.util import *
EPS = 1e-5
def riemExpR(a,f0,v):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Riemannian exponential and logarithm from any point f0 (for left- and right-invariant metric)
"""
f = grpCompose((riemExpIdR(a, numpy.linalg.lstsq(jR(f0),v)[0])), f0)
return f
def riemExpIdR(a,v):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Riemannian exponential and logarithm from Id (for left- and right-invariant metric)
"""
v=grpReg(-v);
f = numpy.zeros(6)
f[0:3] = v[0:3]
f[3:6] = a * v[3:6]
f = grpInv(f)
return f
def sigma2R(a,m,tabf,tabw):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
"""
siz = tabf.shape[0]
if siz < 2:
print('Error: Calculating variance requires at least 2 points')
return 0
s = 0
for i in range(0,siz):
s = s + tabw[i] * normA2R(a,m,riemLogR(a,m,tabf[i,:]));
return s
def riemLogR(a,f0,f):
"""
DESCRIPTION
Attributes:
a: ?????
f0: ????
f: ????
Return:
v: ?????
"""
v=numpy.dot(jR(f0),riemLogIdR(a,grpCompose(f,grpInv(f0))))
return v
def riemLogIdR(a,f):
"""
DESCRIPTION
Attributes:
a: ?????
f: ????
Return:
v: ?????
"""
v = numpy.zeros(6)
v[0:3] = f[0:3]
v[3:6] = numpy.dot(rotMat(-f[0:3]),f[3:6]);
return v
def qR(a,f):
"""
Left- and right- invariant inner product in the principal chart (propagation of Frobenius inner product)
Attributes:
a: ?????
f: ????
Return:
g: ?????
"""
f = grpReg(f)
g0 = numpy.zeros([6,6])
g0[0:3,0:3] = numpy.eye(3)
g0[3:6,3:6] = a * numpy.eye(3)
g = numpy.dot(numpy.dot(numpy.linalg.inv(jR(f).T) , g0) , numpy.linalg.inv(jR(f)))
return g
def jR(f):
"""
Differentials of the left and right translations for SO(3) in the principal chart
Attributes:
r: ?????
Return:
Jl: ?????
"""
#f = makeColVector(f,6); # unnecessary if 1D
f = grpReg(f);
Jr = numpy.zeros([6,6])
Jr[0:3,0:3] = jRotR(f[0:3]);
Jr[3:6,0:3] = -skew(f[3:6]);
Jr[3:6,3:6] = numpy.eye(3);
return Jr
def normA2R(a,f,v):
"""
This function calculates the normalised left
Attributes:
a: ?????
f: ?????
v: ?????
Return:
n: normalised vector
"""
v=grpReg(v);
n=numpy.dot(numpy.dot(v.T,qR(a,f)),v);
return n
def frechetR(a,tabf,tabw):
"""
This function computes the frechet-L mean
Attributes:
img: The fixed image that will be transformed (simpleitk type)
a: ?????
tabf: SE3 data points (Nx6 vector)
tabw: data point weights (Nx1 vector)
Return:
m: The mean
"""
siz = tabf.shape[0]
if siz < 2:
print('Error: Calculating mean requires at least 2 points')
m = tabf[0,:]
# Iteration 0
mbis=m;
print('mbisR=' + str(mbis))
aux=numpy.zeros(6);
for i in range (0,siz):
aux=aux+tabw[i]*riemLogR(a,mbis,tabf[i,:]);
m=riemExpR(a,mbis,aux);
# Iteration 1 until converges
while (normA2R(a,mbis,riemLogR(a,mbis,m))>EPS*sigma2R(a,mbis,tabf,tabw)):
mbis=m;
print('mbisR=' + str(mbis))
aux=numpy.zeros(6);
for i in range (0,siz):
aux=aux+tabw[i]*riemLogR(a,mbis,tabf[i,:]);
m=riemExpR(a,mbis,aux);
return m
| 27.556 | 108 | 0.609958 | 953 | 6,889 | 4.408185 | 0.376705 | 0.005237 | 0.015711 | 0.011426 | 0.240895 | 0.197096 | 0.179243 | 0.179243 | 0.179243 | 0.179243 | 0 | 0.034612 | 0.266076 | 6,889 | 249 | 109 | 27.666667 | 0.796282 | 0.650167 | 0 | 0.304348 | 0 | 0 | 0.05653 | 0 | 0 | 0 | 0 | 0.024096 | 0 | 1 | 0.130435 | false | 0 | 0.057971 | 0 | 0.333333 | 0.057971 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
37dc25007d47db4fa96ca0730b82167ce6738233 | 4,658 | py | Python | v0449gRpc_pb2.py | StormDev87/VPH_bot_python | ae83a0b61e234912c0136ef0f176e7a88603ff28 | [
"MIT"
] | 1 | 2022-02-28T16:20:33.000Z | 2022-02-28T16:20:33.000Z | v0449gRpc_pb2.py | StormDev87/VPH_bot_python | ae83a0b61e234912c0136ef0f176e7a88603ff28 | [
"MIT"
] | null | null | null | v0449gRpc_pb2.py | StormDev87/VPH_bot_python | ae83a0b61e234912c0136ef0f176e7a88603ff28 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v0449gRpc.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0fv0449gRpc.proto\x12\tv0449gRpc\"\x1b\n\x0b\x64\x61taRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1a\n\x08\x64\x61ta2Plc\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1f\n\x0cslaveReq2Plc\x12\x0f\n\x07request\x18\x01 \x01(\x05\"\x1a\n\x08\x64\x61ta2Hmi\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1b\n\ndata2PlcJs\x12\r\n\x05jsSer\x18\x01 \x01(\t\"\x1b\n\ndata2HmiJs\x12\r\n\x05jsSer\x18\x01 \x01(\t\"\x1c\n\ndata2PlcPb\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1d\n\ndataAnswer\x12\x0f\n\x07message\x18\x01 \x01(\t2\x93\x01\n\x0cv0449gRpcSvc\x12=\n\x0bxchRtDataJs\x12\x15.v0449gRpc.data2PlcJs\x1a\x15.v0449gRpc.data2HmiJs\"\x00\x12\x44\n\x10xchRtDataJsSlave\x12\x17.v0449gRpc.slaveReq2Plc\x1a\x15.v0449gRpc.data2HmiJs\"\x00\x62\x06proto3')
_DATAREQUEST = DESCRIPTOR.message_types_by_name['dataRequest']
_DATA2PLC = DESCRIPTOR.message_types_by_name['data2Plc']
_SLAVEREQ2PLC = DESCRIPTOR.message_types_by_name['slaveReq2Plc']
_DATA2HMI = DESCRIPTOR.message_types_by_name['data2Hmi']
_DATA2PLCJS = DESCRIPTOR.message_types_by_name['data2PlcJs']
_DATA2HMIJS = DESCRIPTOR.message_types_by_name['data2HmiJs']
_DATA2PLCPB = DESCRIPTOR.message_types_by_name['data2PlcPb']
_DATAANSWER = DESCRIPTOR.message_types_by_name['dataAnswer']
dataRequest = _reflection.GeneratedProtocolMessageType('dataRequest', (_message.Message,), {
'DESCRIPTOR' : _DATAREQUEST,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.dataRequest)
})
_sym_db.RegisterMessage(dataRequest)
data2Plc = _reflection.GeneratedProtocolMessageType('data2Plc', (_message.Message,), {
'DESCRIPTOR' : _DATA2PLC,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2Plc)
})
_sym_db.RegisterMessage(data2Plc)
slaveReq2Plc = _reflection.GeneratedProtocolMessageType('slaveReq2Plc', (_message.Message,), {
'DESCRIPTOR' : _SLAVEREQ2PLC,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.slaveReq2Plc)
})
_sym_db.RegisterMessage(slaveReq2Plc)
data2Hmi = _reflection.GeneratedProtocolMessageType('data2Hmi', (_message.Message,), {
'DESCRIPTOR' : _DATA2HMI,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2Hmi)
})
_sym_db.RegisterMessage(data2Hmi)
data2PlcJs = _reflection.GeneratedProtocolMessageType('data2PlcJs', (_message.Message,), {
'DESCRIPTOR' : _DATA2PLCJS,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2PlcJs)
})
_sym_db.RegisterMessage(data2PlcJs)
data2HmiJs = _reflection.GeneratedProtocolMessageType('data2HmiJs', (_message.Message,), {
'DESCRIPTOR' : _DATA2HMIJS,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2HmiJs)
})
_sym_db.RegisterMessage(data2HmiJs)
data2PlcPb = _reflection.GeneratedProtocolMessageType('data2PlcPb', (_message.Message,), {
'DESCRIPTOR' : _DATA2PLCPB,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2PlcPb)
})
_sym_db.RegisterMessage(data2PlcPb)
dataAnswer = _reflection.GeneratedProtocolMessageType('dataAnswer', (_message.Message,), {
'DESCRIPTOR' : _DATAANSWER,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.dataAnswer)
})
_sym_db.RegisterMessage(dataAnswer)
_V0449GRPCSVC = DESCRIPTOR.services_by_name['v0449gRpcSvc']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DATAREQUEST._serialized_start=30
_DATAREQUEST._serialized_end=57
_DATA2PLC._serialized_start=59
_DATA2PLC._serialized_end=85
_SLAVEREQ2PLC._serialized_start=87
_SLAVEREQ2PLC._serialized_end=118
_DATA2HMI._serialized_start=120
_DATA2HMI._serialized_end=146
_DATA2PLCJS._serialized_start=148
_DATA2PLCJS._serialized_end=175
_DATA2HMIJS._serialized_start=177
_DATA2HMIJS._serialized_end=204
_DATA2PLCPB._serialized_start=206
_DATA2PLCPB._serialized_end=234
_DATAANSWER._serialized_start=236
_DATAANSWER._serialized_end=265
_V0449GRPCSVC._serialized_start=268
_V0449GRPCSVC._serialized_end=415
# @@protoc_insertion_point(module_scope)
| 43.12963 | 790 | 0.800129 | 527 | 4,658 | 6.658444 | 0.239089 | 0.042747 | 0.056996 | 0.054716 | 0.266743 | 0.167569 | 0.165289 | 0.165289 | 0.129952 | 0 | 0 | 0.090549 | 0.077716 | 4,658 | 107 | 791 | 43.53271 | 0.726257 | 0.145771 | 0 | 0.210526 | 1 | 0.013158 | 0.29336 | 0.181772 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.065789 | 0 | 0.065789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
37de891f427c0291be7aba179849ea2f6a86e5c6 | 281 | py | Python | Python/Programming Basics/Simple Calculations/17. Daily Earnings.py | teodoramilcheva/softuni-software-engineering | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | [
"MIT"
] | null | null | null | Python/Programming Basics/Simple Calculations/17. Daily Earnings.py | teodoramilcheva/softuni-software-engineering | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | [
"MIT"
] | null | null | null | Python/Programming Basics/Simple Calculations/17. Daily Earnings.py | teodoramilcheva/softuni-software-engineering | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | [
"MIT"
] | null | null | null | workdays = float(input())
daily_tips = float(input())
exchange_rate = float(input())
salary = workdays * daily_tips
annual_income = salary * 12 + salary * 2.5
net_income = annual_income - annual_income * 25 / 100
result = net_income / 365 * exchange_rate
print('%.2f' % result)
| 23.416667 | 53 | 0.711744 | 39 | 281 | 4.897436 | 0.512821 | 0.157068 | 0.188482 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055085 | 0.160142 | 281 | 11 | 54 | 25.545455 | 0.754237 | 0 | 0 | 0 | 0 | 0 | 0.014235 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
37e0cdbd73052a4cfa66dd46c357ae89f7505242 | 424 | py | Python | python/p21.py | tonyfg/project_euler | 3a9e6352a98faaa506056b42160c91bffe93838c | [
"WTFPL"
] | null | null | null | python/p21.py | tonyfg/project_euler | 3a9e6352a98faaa506056b42160c91bffe93838c | [
"WTFPL"
] | null | null | null | python/p21.py | tonyfg/project_euler | 3a9e6352a98faaa506056b42160c91bffe93838c | [
"WTFPL"
] | null | null | null | #Q: Evaluate the sum of all the amicable numbers under 10000.
#A: 31626
def divisor_sum(n):
return sum([i for i in xrange (1, n//2+1) if not n%i])
def sum_amicable(start, end):
sum = 0
for i in xrange(start, end):
tmp = divisor_sum(i)
if i == divisor_sum(tmp) and i != tmp:
sum += i+tmp
return sum/2 #each pair is found twice, so divide by 2 ;)
print sum_amicable(1,10000)
| 26.5 | 61 | 0.610849 | 77 | 424 | 3.298701 | 0.493506 | 0.11811 | 0.047244 | 0.094488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071429 | 0.273585 | 424 | 15 | 62 | 28.266667 | 0.753247 | 0.261792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
37e4f2c4b90817314cd77bae4c4800a1c5a1cfd8 | 11,933 | py | Python | alerter/src/monitorables/nodes/chainlink_node.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 41 | 2019-08-23T12:40:42.000Z | 2022-03-28T11:06:02.000Z | alerter/src/monitorables/nodes/chainlink_node.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 147 | 2019-08-30T22:09:48.000Z | 2022-03-30T08:46:26.000Z | alerter/src/monitorables/nodes/chainlink_node.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 3 | 2019-09-03T21:12:28.000Z | 2021-08-18T14:27:56.000Z | from datetime import datetime
from typing import Optional, Dict, List, Union
from schema import Schema, Or
from src.monitorables.nodes.node import Node
from src.utils.exceptions import InvalidDictSchemaException
class ChainlinkNode(Node):
def __init__(self, node_name: str, node_id: str, parent_id: str) -> None:
super().__init__(node_name, node_id, parent_id)
# Metrics
self._went_down_at_prometheus = None
self._current_height = None
self._total_block_headers_received = None
self._max_pending_tx_delay = None
self._process_start_time_seconds = None
self._total_gas_bumps = None
self._total_gas_bumps_exceeds_limit = None
self._no_of_unconfirmed_txs = None
self._total_errored_job_runs = None
self._current_gas_price_info = {
'percentile': None,
'price': None,
}
self._eth_balance_info = {}
# This variable stores the url of the source used to get prometheus node
# data. Note that this had to be done because multiple prometheus
# sources can be associated with the same node, where at the same time
# only one source is available, and sources switch from time to time.
self._last_prometheus_source_used = None
# This stores the timestamp of the last successful monitoring round.
self._last_monitored_prometheus = None
@property
def is_down_prometheus(self) -> bool:
return self._went_down_at_prometheus is not None
@property
def went_down_at_prometheus(self) -> Optional[float]:
return self._went_down_at_prometheus
@property
def current_height(self) -> Optional[int]:
return self._current_height
@property
def total_block_headers_received(self) -> Optional[int]:
return self._total_block_headers_received
@property
def max_pending_tx_delay(self) -> Optional[int]:
return self._max_pending_tx_delay
@property
def process_start_time_seconds(self) -> Optional[float]:
return self._process_start_time_seconds
@property
def total_gas_bumps(self) -> Optional[int]:
return self._total_gas_bumps
@property
def total_gas_bumps_exceeds_limit(self) -> Optional[int]:
return self._total_gas_bumps_exceeds_limit
@property
def no_of_unconfirmed_txs(self) -> Optional[int]:
return self._no_of_unconfirmed_txs
@property
def total_errored_job_runs(self) -> Optional[int]:
return self._total_errored_job_runs
@property
def current_gas_price_info(self) -> Dict[str, Optional[float]]:
return self._current_gas_price_info
@property
def eth_balance_info(self) -> Dict[str, Union[str, float]]:
return self._eth_balance_info
@property
def last_prometheus_source_used(self) -> Optional[str]:
return self._last_prometheus_source_used
@property
def last_monitored_prometheus(self) -> Optional[float]:
return self._last_monitored_prometheus
@staticmethod
def get_int_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing integer prometheus
: metrics.
"""
return [
'current_height',
'total_block_headers_received',
'max_pending_tx_delay', 'total_gas_bumps',
'total_gas_bumps_exceeds_limit', 'no_of_unconfirmed_txs',
'total_errored_job_runs'
]
@staticmethod
def get_float_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing float prometheus
: metrics.
"""
return [
'went_down_at_prometheus', 'process_start_time_seconds',
'last_monitored_prometheus'
]
@staticmethod
def get_dict_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing dict prometheus
: metrics.
"""
return ['current_gas_price_info', 'eth_balance_info']
@staticmethod
def get_str_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing string prometheus
: metrics.
"""
return ['last_prometheus_source_used']
def get_all_prometheus_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing prometheus metrics
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [
*str_prometheus_metric_attributes,
*int_prometheus_metric_attributes,
*float_prometheus_metric_attributes,
*dict_prometheus_metric_attributes
]
def get_int_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing int metrics.
"""
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
return [*int_prometheus_metric_attributes]
def get_float_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing float metrics.
"""
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
return [*float_prometheus_metric_attributes]
def get_dict_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing dict metrics.
"""
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [*dict_prometheus_metric_attributes]
def get_str_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing str metrics.
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
return [*str_prometheus_metric_attributes]
def get_all_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing metrics
"""
prometheus_metric_attributes = \
self.get_all_prometheus_metric_attributes()
return [*prometheus_metric_attributes]
def set_went_down_at_prometheus(
self, went_down_at_prometheus: Optional[float]) -> None:
self._went_down_at_prometheus = went_down_at_prometheus
def set_prometheus_as_down(self, downtime: Optional[float]) -> None:
"""
This function sets the node's prometheus interface as down. It sets the
time that the interface was initially down to the parameter 'downtime'
if it is not None, otherwise it sets it to the current timestamp.
:param downtime:
:return:
"""
if downtime is None:
self.set_went_down_at_prometheus(datetime.now().timestamp())
else:
self.set_went_down_at_prometheus(downtime)
def set_prometheus_as_up(self) -> None:
"""
This function sets a node's prometheus interface as up. A node's
interface is said to be up if went_down_at_prometheus is None.
:return: None
"""
self.set_went_down_at_prometheus(None)
def set_current_height(self, new_height: Optional[int]) -> None:
self._current_height = new_height
def set_total_block_headers_received(
self, new_total_block_headers_received: Optional[int]) -> None:
self._total_block_headers_received = new_total_block_headers_received
def set_max_pending_tx_delay(
self, new_max_pending_tx_delay: Optional[int]) -> None:
self._max_pending_tx_delay = new_max_pending_tx_delay
def set_process_start_time_seconds(
self, new_process_start_time_seconds: Optional[float]) -> None:
self._process_start_time_seconds = new_process_start_time_seconds
def set_total_gas_bumps(self, new_total_gas_bumps: Optional[int]) -> None:
self._total_gas_bumps = new_total_gas_bumps
def set_total_gas_bumps_exceeds_limit(
self, new_total_gas_bumps_exceeds_limit: Optional[int]) -> None:
self._total_gas_bumps_exceeds_limit = new_total_gas_bumps_exceeds_limit
def set_no_of_unconfirmed_txs(
self, new_no_of_unconfirmed_txs: Optional[int]) -> None:
self._no_of_unconfirmed_txs = new_no_of_unconfirmed_txs
def set_total_errored_job_runs(
self, new_total_errored_job_runs: Optional[int]) -> None:
self._total_errored_job_runs = new_total_errored_job_runs
def set_current_gas_price_info(self, new_percentile: Optional[float],
new_price: Optional[float]) -> None:
"""
This method sets the current_gas_price_info dict based on the new
percentile and price. This is done in this way to protect the Dict
schema.
:param new_percentile: The new percentile to be stored
:param new_price: The new gas to be stored
:return: None
"""
self._current_gas_price_info['percentile'] = new_percentile
self._current_gas_price_info['price'] = new_price
@staticmethod
def _new_eth_balance_info_valid(new_eth_balance_info: Dict) -> bool:
"""
This method checks that the new eth_balance_info dict obeys the required
schema.
:param new_eth_balance_info: The dict to check
:return: True if the dict obeys the required schema
: False otherwise
"""
schema = Schema(Or({
'address': str,
'balance': float,
'latest_usage': float,
}, {}))
return schema.is_valid(new_eth_balance_info)
def set_eth_balance_info(
self, new_eth_balance_info: Dict[str, Union[str, float]]) -> None:
"""
This method sets the new_eth_balance_info. It first checks that the new
dict obeys the required schema. If not, an InvalidDictSchemaException is
raised.
:param new_eth_balance_info: The new eth_balance_info to store.
:return: None
"""""
if self._new_eth_balance_info_valid(new_eth_balance_info):
self._eth_balance_info = new_eth_balance_info
else:
raise InvalidDictSchemaException('new_eth_balance_info')
def set_last_prometheus_source_used(
self, new_last_prometheus_source_used: Optional[str]) -> None:
self._last_prometheus_source_used = new_last_prometheus_source_used
def set_last_monitored_prometheus(
self, new_last_monitored_prometheus: Optional[float]) -> None:
self._last_monitored_prometheus = new_last_monitored_prometheus
def reset(self) -> None:
"""
This method resets all metrics to their initial state
:return: None
"""
self.set_went_down_at_prometheus(None)
self.set_current_height(None)
self.set_total_block_headers_received(None)
self.set_max_pending_tx_delay(None)
self.set_process_start_time_seconds(None)
self.set_total_gas_bumps(None)
self.set_total_gas_bumps_exceeds_limit(None)
self.set_no_of_unconfirmed_txs(None)
self.set_total_errored_job_runs(None)
self.set_current_gas_price_info(None, None)
self.set_eth_balance_info({})
self.set_last_prometheus_source_used(None)
self.set_last_monitored_prometheus(None)
| 37.407524 | 80 | 0.67636 | 1,466 | 11,933 | 5.090723 | 0.108458 | 0.079325 | 0.111483 | 0.037518 | 0.646791 | 0.404395 | 0.257939 | 0.223771 | 0.192952 | 0.171513 | 0 | 0 | 0.252661 | 11,933 | 318 | 81 | 37.525157 | 0.836847 | 0.185704 | 0 | 0.217617 | 0 | 0 | 0.039777 | 0.024369 | 0 | 0 | 0 | 0 | 0 | 1 | 0.217617 | false | 0 | 0.025907 | 0.072539 | 0.378238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
37eb1b0657a210c55097430b315fbcd465c4cdfe | 814 | py | Python | policykit/django_db_logger/migrations/0002_initial.py | mashton/policyk | 623523d76d63c06b6d559ad7b477d80512fbd2e7 | [
"MIT"
] | 78 | 2020-05-08T17:25:38.000Z | 2022-01-13T05:44:50.000Z | policykit/django_db_logger/migrations/0002_initial.py | mashton/policyk | 623523d76d63c06b6d559ad7b477d80512fbd2e7 | [
"MIT"
] | 302 | 2020-02-20T07:04:30.000Z | 2022-02-25T17:44:23.000Z | policykit/django_db_logger/migrations/0002_initial.py | mashton/policyk | 623523d76d63c06b6d559ad7b477d80512fbd2e7 | [
"MIT"
] | 13 | 2020-04-17T19:44:26.000Z | 2022-02-25T17:18:04.000Z | # Generated by Django 3.2.2 on 2021-09-02 15:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('django_db_logger', '0001_initial'),
('policyengine', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='evaluationlog',
name='community',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='policyengine.community'),
),
migrations.AddField(
model_name='evaluationlog',
name='proposal',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='policyengine.proposal'),
),
]
| 29.071429 | 134 | 0.638821 | 89 | 814 | 5.730337 | 0.438202 | 0.078431 | 0.082353 | 0.129412 | 0.521569 | 0.521569 | 0.34902 | 0.34902 | 0.34902 | 0.34902 | 0 | 0.037097 | 0.238329 | 814 | 27 | 135 | 30.148148 | 0.785484 | 0.055283 | 0 | 0.3 | 1 | 0 | 0.179922 | 0.056063 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
37eb8f04b291f998b42a8e819f9ce512c5fabcfb | 277 | py | Python | code/doubanUtils.py | verazuo/douban_crawler | 042e870c74df8b6f4eb1cd2af3b90d5b6699ab8f | [
"MIT"
] | 1 | 2021-04-03T12:41:29.000Z | 2021-04-03T12:41:29.000Z | code/doubanUtils.py | verazuo/douban_crawler | 042e870c74df8b6f4eb1cd2af3b90d5b6699ab8f | [
"MIT"
] | null | null | null | code/doubanUtils.py | verazuo/douban_crawler | 042e870c74df8b6f4eb1cd2af3b90d5b6699ab8f | [
"MIT"
] | null | null | null | import requests
import re
from bs4 import BeautifulSoup
def nextPageLink(sess,soup,page,head=""):
NextPage=soup.find(class_='next').link.get('href')
req=sess.get(head + NextPage)
print(f'第{page}页:',req.status_code)
return BeautifulSoup(req.text,'html.parser') | 27.7 | 54 | 0.722022 | 41 | 277 | 4.829268 | 0.731707 | 0.121212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004115 | 0.122744 | 277 | 10 | 55 | 27.7 | 0.8107 | 0 | 0 | 0 | 0 | 0 | 0.100719 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.375 | 0 | 0.625 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
37ebf327c9046920009b45cecc899607501e8a37 | 2,530 | py | Python | sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/models/__init__.py | RAY-316/azure-sdk-for-python | 4f7790deaf46c6f4e965f099f36eb73a7954ad5b | [
"MIT"
] | null | null | null | sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/models/__init__.py | RAY-316/azure-sdk-for-python | 4f7790deaf46c6f4e965f099f36eb73a7954ad5b | [
"MIT"
] | null | null | null | sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/models/__init__.py | RAY-316/azure-sdk-for-python | 4f7790deaf46c6f4e965f099f36eb73a7954ad5b | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AcquiredPhoneNumbers
from ._models_py3 import CommunicationError
from ._models_py3 import CommunicationErrorResponse
from ._models_py3 import PhoneNumberCapabilities
from ._models_py3 import PhoneNumberCapabilitiesRequest
from ._models_py3 import PhoneNumberCost
from ._models_py3 import PhoneNumberOperation
from ._models_py3 import PhoneNumberPurchaseRequest
from ._models_py3 import PhoneNumberSearchRequest
from ._models_py3 import PhoneNumberSearchResult
from ._models_py3 import PurchasedPhoneNumber
except (SyntaxError, ImportError):
from ._models import AcquiredPhoneNumbers # type: ignore
from ._models import CommunicationError # type: ignore
from ._models import CommunicationErrorResponse # type: ignore
from ._models import PhoneNumberCapabilities # type: ignore
from ._models import PhoneNumberCapabilitiesRequest # type: ignore
from ._models import PhoneNumberCost # type: ignore
from ._models import PhoneNumberOperation # type: ignore
from ._models import PhoneNumberPurchaseRequest # type: ignore
from ._models import PhoneNumberSearchRequest # type: ignore
from ._models import PhoneNumberSearchResult # type: ignore
from ._models import PurchasedPhoneNumber # type: ignore
from ._phone_numbers_client_enums import (
BillingFrequency,
PhoneNumberAssignmentType,
PhoneNumberCapabilityType,
PhoneNumberOperationStatus,
PhoneNumberOperationType,
PhoneNumberType,
)
__all__ = [
'AcquiredPhoneNumbers',
'CommunicationError',
'CommunicationErrorResponse',
'PhoneNumberCapabilities',
'PhoneNumberCapabilitiesRequest',
'PhoneNumberCost',
'PhoneNumberOperation',
'PhoneNumberPurchaseRequest',
'PhoneNumberSearchRequest',
'PhoneNumberSearchResult',
'PurchasedPhoneNumber',
'BillingFrequency',
'PhoneNumberAssignmentType',
'PhoneNumberCapabilityType',
'PhoneNumberOperationStatus',
'PhoneNumberOperationType',
'PhoneNumberType',
]
| 40.806452 | 94 | 0.729644 | 202 | 2,530 | 8.935644 | 0.351485 | 0.121884 | 0.079224 | 0.115789 | 0.289197 | 0.145152 | 0 | 0 | 0 | 0 | 0 | 0.005644 | 0.159684 | 2,530 | 61 | 95 | 41.47541 | 0.843368 | 0.235178 | 0 | 0 | 0 | 0 | 0.196242 | 0.131524 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.470588 | 0 | 0.470588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
37f57e46f9f9adb655b1a0930224aed655bce6c7 | 1,396 | py | Python | tests/test_app/library/loans/admin.py | Pijuli/django-jazzmin | e3f9d45183d58f78bf4c6793969490631a84681d | [
"MIT"
] | 972 | 2020-05-12T19:51:01.000Z | 2022-03-31T20:18:33.000Z | tests/test_app/library/loans/admin.py | Pijuli/django-jazzmin | e3f9d45183d58f78bf4c6793969490631a84681d | [
"MIT"
] | 290 | 2020-05-12T17:35:21.000Z | 2022-03-31T15:18:59.000Z | tests/test_app/library/loans/admin.py | Pijuli/django-jazzmin | e3f9d45183d58f78bf4c6793969490631a84681d | [
"MIT"
] | 166 | 2020-06-11T10:50:47.000Z | 2022-03-24T12:19:00.000Z | from django.contrib import admin
from django.urls import path
from .models import BookLoan, Library
from .views import CustomView
class BookLoanInline(admin.StackedInline):
model = BookLoan
extra = 1
readonly_fields = ("id", "duration")
fields = (
"book",
"imprint",
"status",
"due_back",
"borrower",
"loan_start",
"duration",
)
@admin.register(BookLoan)
class BookLoanAdmin(admin.ModelAdmin):
list_display = ("book", "status", "borrower", "due_back", "id")
list_filter = ("status", "due_back")
autocomplete_fields = ("borrower",)
search_fields = ("book__title",)
readonly_fields = ("id",)
fieldsets = (
(None, {"fields": ("book", "imprint", "id")}),
("Availability", {"fields": ("status", "due_back", "duration", "borrower")}),
)
def get_urls(self):
"""
Add in a custom view to demonstrate =
"""
urls = super().get_urls()
return urls + [path("custom_view", CustomView.as_view(), name="custom_view")]
def response_change(self, request, obj):
ret = super().response_change(request, obj)
if "reserve" in request.POST:
obj.status = "r"
obj.save()
return ret
@admin.register(Library)
class LibraryAdmin(admin.ModelAdmin):
list_display = ("name", "address", "librarian")
| 25.851852 | 85 | 0.593123 | 146 | 1,396 | 5.527397 | 0.472603 | 0.034696 | 0.048327 | 0.064436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000961 | 0.254298 | 1,396 | 53 | 86 | 26.339623 | 0.774256 | 0.026504 | 0 | 0 | 0 | 0 | 0.180524 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.55 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
37f98cbd357a8c5c3fa954e32959aafbe7a7882b | 653 | py | Python | sina_spider/items.py | yanwen0614/Weibo | 5d85e39d0cf7fc848bf5a06df08acbf38661db8d | [
"Apache-2.0"
] | null | null | null | sina_spider/items.py | yanwen0614/Weibo | 5d85e39d0cf7fc848bf5a06df08acbf38661db8d | [
"Apache-2.0"
] | null | null | null | sina_spider/items.py | yanwen0614/Weibo | 5d85e39d0cf7fc848bf5a06df08acbf38661db8d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class TweetsItem(Item):
# define the fields for your item here like:
Author = Field()
Title = Field()
Create_time = Field()
Id = Field()
Context = Field()
Source = Field()
Url = Field()
class TopicItem(Item):
Url = Field()
Title = Field()
Category = Field()
context = Field()
Id = Field()
Hotlevel = Field()
Time = Field()
def main():
item = TopicItem()
pass
if __name__ == '__main__':
main() | 18.138889 | 51 | 0.603369 | 80 | 653 | 4.8125 | 0.575 | 0.036364 | 0.077922 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002075 | 0.261868 | 653 | 36 | 52 | 18.138889 | 0.796681 | 0.278714 | 0 | 0.272727 | 0 | 0 | 0.017204 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.045455 | 0.045455 | 0 | 0.818182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
37fa8681617c1303e48ae88c3c02ae64abad5b16 | 9,259 | py | Python | emission/core/wrapper/client.py | Andrew-Tan/e-mission-server | 91d59bee86e63d803e401f10f4b6a2502effedda | [
"BSD-3-Clause"
] | null | null | null | emission/core/wrapper/client.py | Andrew-Tan/e-mission-server | 91d59bee86e63d803e401f10f4b6a2502effedda | [
"BSD-3-Clause"
] | 1 | 2017-08-31T19:54:16.000Z | 2017-08-31T19:54:16.000Z | emission/core/wrapper/client.py | Andrew-Tan/e-mission-server | 91d59bee86e63d803e401f10f4b6a2502effedda | [
"BSD-3-Clause"
] | null | null | null | import json
import logging
import dateutil.parser
from datetime import datetime
# Our imports
from emission.core.get_database import get_profile_db, get_client_db, get_pending_signup_db
import emission.clients.common
class Client:
def __init__(self, clientName):
# TODO: write background process to ensure that there is only one client with each name
# Maybe clean up unused clients?
self.clientName = clientName
self.settings_filename = "emission/clients/%s/settings.json" % self.clientName
self.__reload()
def __reload(self):
self.clientJSON = None
if self.clientName is not None:
self.clientJSON = get_client_db().find_one({'name': self.clientName})
# clientJSON can be None if we are creating an entry for the first time
if self.clientJSON is None:
# Avoid Attribute error while trying to determine whether the client is active
self.startDatetime = None
self.endDatetime = None
else:
# Do eagerly or lazily? Can also do super lazily and have
self.startDatetime = dateutil.parser.parse(self.clientJSON['start_date'])
self.endDatetime = dateutil.parser.parse(self.clientJSON['end_date'])
def isActive(self, now):
logging.debug("Comparing %s to %s and %s" % (now, self.startDatetime, self.endDatetime))
if self.startDatetime is None:
return False
else:
if self.startDatetime > now:
# Study has not yet started
return False
else:
if self.endDatetime is None:
# Study has no end time
return True
else:
if self.endDatetime > now:
# study has not yet ended
return True
else:
# study has already ended
return False
# Smart settings call, which returns the override settings if the client is
# active, and
def getSettings(self):
if (self.isActive(datetime.now())):
logging.debug("For client %s, returning settings %s" % (self.clientName, self.clientJSON['client_settings']))
return self.clientJSON['client_settings']
else:
# Returning empty dict instead of None to make the client code, which
# will want to merge this, easier
logging.debug("For client %s, active = false, returning {}" % (self.clientName))
return {}
def getDates(self):
return (self.startDatetime, self.endDatetime)
# Figure out if the JSON object here should always be passed in
# Having it be passed in is a lot more flexible
# Let's compromise for now by passing it in and seeing how much of a hassle it is
# That will also ensure that the update_client script is not a complete NOP
def __update(self, newEntry):
get_client_db().update({'name': self.clientName}, newEntry, upsert = True)
self.__reload()
def update(self, createKey = True):
import uuid
newEntry = json.load(open(self.settings_filename))
if createKey:
newEntry['key'] = str(uuid.uuid4())
# logging.info("Updating with new entry %s" % newEntry)
self.__update(newEntry)
return newEntry['key']
def __loadModule(self):
import importlib
clientModule = importlib.import_module("emission.clients.%s.%s" % (self.clientName, self.clientName))
return clientModule
def callMethod(self, methodName, request):
clientModule = self.__loadModule()
logging.debug("called client with %s %s" % (self.clientName, methodName))
# import clients.carshare.carshare as clientModule
method = getattr(clientModule, methodName)
logging.debug("Invoking %s on module %s" % (method, clientModule))
return method(request)
def getClientKey(self):
if self.clientJSON is None:
return None
logging.debug("About to return %s from JSON %s" % (self.clientJSON['key'], self.clientJSON))
return self.clientJSON['key']
def __validateKey(self, clientKey):
if (not self.isActive(datetime.now())):
logging.info("Client %s is not yet active, so key %s is not valid" %
(self.clientName, clientKey))
return False
client_key = self.getClientKey()
if client_key == clientKey:
return True
else:
logging.info("For client %s, incoming key %s does not match stored key %s!" %
(self.clientName, clientKey, client_key))
return False
# What should we do if a user registers again after they have installed the app?
# Options are:
# - NOP
# - update the study field
# - Return error
# For now, we update the study field, pending discussions with Maita on error reporting
# What should we do if a user registers for a study after having installed
# the app or having participated in a different study?
# - add the study to the list of registered studies (but we don't support multiple studies!)
# - update the registered study
# - return an error
# For now, we update the registered study since it makes life easiest for us
# TODO: Figure out what to do here
# Also, note that always inserting it is also fine if we move to an eventual
# consistency model, since we will eventually clean it up again. The end
# result will still be a NOP, though
def __preRegister(self, userEmail):
from emission.core.wrapper.user import User
from emission.analysis.result import userclient
if User.isRegistered(userEmail):
User.fromEmail(userEmail).setStudy(self.clientName)
else:
pendingDoc = {
'user_email': userEmail,
'study': self.clientName,
'last_update': datetime.now()}
# Should I do insert or upsert here? If a user has pre-registered for one
# study and then pre-registers for another study before registering, do we
# want to throw an error or just update silently?
# Update silently for now
writeResult = get_pending_signup_db().update({'user_email': userEmail}, pendingDoc, upsert=True)
print 'in __preRegister, writeResult = %s' % writeResult
if 'err' in writeResult and writeResult['err'] is not None:
e = Exception()
e.code = writeResult['err'][0]["code"]
e.msg = writeResult['err'][0]["errmsg"]
raise e
return (get_pending_signup_db().find({'study': self.clientName}).count(),
userclient.countForStudy(self.clientName))
def preRegister(self, clientKey, userEmail):
if not self.__validateKey(clientKey):
e = Exception()
e.code = 403
e.msg = "This is not the client key for your study, or your study has already ended. Please contact e-mission@lists.eecs.berkeley.edu to obtain a client key, or restart your study"
raise e
return self.__preRegister(userEmail)
def __callJavascriptCallback(self, methodName, params):
if self.isActive(datetime.now()):
clientModule = self.__loadModule()
method = getattr(clientModule, methodName)
return method(params)
else:
return None
def callJavascriptCallback(self, clientKey, method, request):
if not self.__validateKey(clientKey):
e = Exception()
e.code = 403
e.msg = "This is not the client key for your study, or your study has already ended. Please contact e-mission@lists.eecs.berkeley.edu to obtain a client key, or restart your study"
raise e
return self.__callJavascriptCallback(method, request)
# BEGIN: Standard customization hooks
def getClientConfirmedModeQuery(self, mode):
if self.isActive(datetime.now()):
clientModeField = self.getClientConfirmedModeField()
return {clientModeField: mode}
else:
return {}
def getClientConfirmedModeField(self):
if self.isActive(datetime.now()):
clientModule = self.__loadModule()
return clientModule.getClientConfirmedModeField()
else:
return None
def getSectionFilter(self, uuid):
if self.isActive(datetime.now()):
return self.__loadModule().getSectionFilter(uuid)
else:
return []
def getResult(self, uuid):
if self.isActive(datetime.now()):
return self.__loadModule().getResult(uuid)
else:
return None
def clientSpecificSetters(self, uuid, sectionId, predictedModeMap):
if self.isActive(datetime.now()):
return self.__loadModule().clientSpecificSetters(uuid, sectionId, predictedModeMap)
else:
return None
def runBackgroundTasks(self, uuid):
if self.isActive(datetime.now()):
self.__loadModule().runBackgroundTasks(uuid)
else:
logging.debug("Client is not active, skipping call...")
# END: Standard customization hooks
# This reads the combined set of queries from all clients
# Read the design decisions for an example of how to improve this
@staticmethod
def getClientConfirmedModeQueries(mode):
queryList = emission.clients.common.getConfirmFields()
queryListWithMode = [{query: mode} for query in queryList]
return [{'$or': queryListWithMode}]
@staticmethod
def getPendingClientRegs(userName):
studyList = []
userEmailQuery = {'user_email': userName}
pendingReg = get_pending_signup_db().find_one(userEmailQuery)
if pendingReg != None:
studyList = [pendingReg['study']]
return studyList
@staticmethod
def deletePendingClientRegs(userName):
userEmailQuery = {'user_email': userName}
get_pending_signup_db().remove(userEmailQuery)
| 38.102881 | 186 | 0.696836 | 1,172 | 9,259 | 5.430034 | 0.259386 | 0.037398 | 0.028284 | 0.032527 | 0.181961 | 0.130264 | 0.118165 | 0.112979 | 0.080453 | 0.080453 | 0 | 0.001235 | 0.212766 | 9,259 | 242 | 187 | 38.260331 | 0.871862 | 0.233827 | 0 | 0.368421 | 0 | 0.011696 | 0.130515 | 0.017166 | 0 | 0 | 0 | 0.004132 | 0 | 0 | null | null | 0 | 0.064327 | null | null | 0.005848 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5302a9b7f4d36ed1d8c39a9e74b3775344df1bd4 | 2,028 | py | Python | HoursSelect.py | Maxahoy/ClassVolumeSilencer | 9a05f9dd4efbbbddc74377a27027fa40b2167d02 | [
"MIT"
] | null | null | null | HoursSelect.py | Maxahoy/ClassVolumeSilencer | 9a05f9dd4efbbbddc74377a27027fa40b2167d02 | [
"MIT"
] | null | null | null | HoursSelect.py | Maxahoy/ClassVolumeSilencer | 9a05f9dd4efbbbddc74377a27027fa40b2167d02 | [
"MIT"
] | null | null | null | """
This is how I'm gonna schedule hours
IDEA: import the format example file that I'm using and is saved in the same directory
"""
import csv
import pprint
from tkinter import *
from tkinter.filedialog import askopenfilename
import StringProcessing
def selectHoursFile():
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
print(filename)
return filename
"""
Receives a file location, opens the csv
The format looks like this:
CLASS STARTS,Class name (optional),MON,TUES,WED,THURS,FRI,,CLASS ENDS,MON,TUES,WED,THURS,FRI
1, Stats, 10:20:00 AM,,10:20:00 AM,,10:20:00 AM,,,11:15:00 AM,,11:15:00 AM,,11:15:00 AM
2,,,09:35:00 AM,,09:35:00 AM,,,,,10:55:00 AM,,10:55:00 AM,
3,,,11:30:00 AM,11:30:00 AM,11:30:00 AM,11:30:00 AM,,,,12:25:00 PM,12:25:00 PM,12:25:00 PM,12:25:00 PM
4,,,,,,09:10:00 AM,,,,,,,10:05:00 AM
5,,12:00:00 PM,01:00:00 PM,01:00:00 PM,01:00:00 PM,01:00:00 PM,,,,04:30:00 PM,04:30:00 PM,04:30:00 PM,04:30:00 PM
6,,,,,,,,,,,,,
7,,,,,,,,,,,,,
8,,,,,,,,,,,,,
9,,,,,,,,,,,,,
10,,,,,,,,,,,,,
11,,,,,,,,,,,,,
12,,,,,,,,,,,,,
13,,,,,,,,,,,,,
14,,,,,,,,,,,,,
15,,,,,,,,,,,,,
"""
def interpretCSVFormat(csvFile):
#first open the file with the filepath
classList = dict()
with open(csvFile, "r") as csvOpen:
#next populate a temporary dictionary for the classes
tempDict = dict()
classID = 0
rowReader = csv.reader(csvOpen, delimiter=',', quotechar="'")
for row in rowReader:
#dictionary format: class ID::string of class days
classTimes = row
#print(row)
tempDict[classID] = str(classTimes)
classID = classID + 1
print(StringProcessing.lineList(str(classTimes)))
del tempDict[0]
pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(tempDict)
#TODO: make the sections using ClassScheduleStorage
| 28.971429 | 113 | 0.607988 | 318 | 2,028 | 3.877358 | 0.430818 | 0.051906 | 0.029197 | 0.025953 | 0.193025 | 0.163828 | 0.147607 | 0.147607 | 0.131387 | 0.131387 | 0 | 0.123833 | 0.207594 | 2,028 | 69 | 114 | 29.391304 | 0.643435 | 0.233728 | 0 | 0 | 0 | 0 | 0.003831 | 0 | 0 | 0 | 0 | 0.014493 | 0 | 1 | 0.086957 | false | 0 | 0.217391 | 0 | 0.347826 | 0.173913 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53066ef029d7bd3ef7be8bb9baad9578898d6c71 | 2,325 | py | Python | projection.py | ogawan/nisa | d758e41e4983cc35477e81d944689b0226f00ef5 | [
"MIT"
] | null | null | null | projection.py | ogawan/nisa | d758e41e4983cc35477e81d944689b0226f00ef5 | [
"MIT"
] | null | null | null | projection.py | ogawan/nisa | d758e41e4983cc35477e81d944689b0226f00ef5 | [
"MIT"
] | null | null | null | from matplotlib import pyplot as plt
def nisa_projection(years=30, annual_deposit=80, initial_budget=100):
"""
This is a function to plot deposit of TSUMITATE NISA
Parameters:
---------------
years: integer
How many years are you going to continue?
annual_depoist: integer
Annual deposit into the NISA account.
initial_budget: integer
The initial budget.
Returns:
--------------
matplotlib figure
"""
for j in [1.00,1.01, 1.02, 1.03, 1.04, 1.05]:
original = initial_budget
ganbon = []
box = []
for i in range(0,years):
if i == 0:
box.append(original)
ganbon.append(original)
gan = ganbon[-1] + annual_deposit
original = original * j + annual_deposit
if i > 0:
box.append(original)
ganbon.append(gan)
plt.scatter(list(range(0,years)), box)
plt.legend(["0%", "1%", "2%", "3%", "4%", "5%"])
plt.xlabel("Years")
plt.ylabel("Money (Man yen)")
# Reference: https://plotly.com/python/figure-labels/
import pandas as pd
import plotly.graph_objects as go
def nisa_projection_plotly(years=30, annual_deposit=80, initial_budget=100):
"""
This is a function to plot deposit of TSUMITATE NISA
Parameters:
---------------
years: integer
How many years are you going to continue?
annual_depoist: integer
Annual deposit into the NISA account.
initial_budget: integer
The initial budget.
Returns:
--------------
plotly figures.
"""
dic_ = {}
for j in [1.00,1.01, 1.02, 1.03, 1.04, 1.05]:
original = initial_budget
ganbon = []
box = []
for i in range(0,years):
if i == 0:
box.append(original)
ganbon.append(original)
gan = ganbon[-1] + annual_deposit
original = original * j + annual_deposit
if i > 0:
box.append(original)
ganbon.append(gan)
dic_["{} %".format(str(j)[-1])] = box
df = pd.DataFrame(dic_)
fig = go.Figure()
for i in df.columns:
fig.add_trace(go.Scatter(x=df.index, y=df[i],name=i))
fig.update_layout(
title="NISA PLOT",
xaxis_title="Years",
yaxis_title="Man Yen",
width=500,
height=400,
)
fig.show()
nisa_projection(30, 80, 100)
nisa_projection_plotly(30, 80, 100)
| 21.728972 | 76 | 0.594409 | 317 | 2,325 | 4.271293 | 0.33123 | 0.076809 | 0.011817 | 0.020679 | 0.636632 | 0.636632 | 0.636632 | 0.636632 | 0.636632 | 0.636632 | 0 | 0.050058 | 0.261075 | 2,325 | 106 | 77 | 21.933962 | 0.738068 | 0.287742 | 0 | 0.509804 | 0 | 0 | 0.036008 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0 | 0.058824 | 0 | 0.098039 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53104f17c4720a21e638155abf65cadc6cce2788 | 24,765 | py | Python | src/poretitioner/utils/filtering.py | uwmisl/poretitioner | 0ff9f67a3b25fdcb460b11c970b2ed366da07da7 | [
"MIT"
] | 2 | 2021-03-11T21:27:16.000Z | 2021-03-18T00:58:22.000Z | src/poretitioner/utils/filtering.py | uwmisl/poretitioner | 0ff9f67a3b25fdcb460b11c970b2ed366da07da7 | [
"MIT"
] | 12 | 2021-02-19T19:36:05.000Z | 2021-03-24T15:38:02.000Z | src/poretitioner/utils/filtering.py | uwmisl/poretitioner | 0ff9f67a3b25fdcb460b11c970b2ed366da07da7 | [
"MIT"
] | null | null | null | """
=========
filtering.py
=========
This module provides more granular filtering for captures.
You can customize your own filters too.
"""
from __future__ import annotations
import re
from abc import ABC, ABCMeta, abstractmethod
from dataclasses import dataclass
from json import JSONEncoder
from pathlib import PosixPath
from typing import (
Any,
Dict,
Iterable,
Mapping,
NewType,
Optional,
Protocol,
Type,
TypedDict,
Union,
)
import h5py
import numpy as np
from h5py import File as Fast5File
from ..hdf5 import (
HasFast5,
HDF5_Group,
HDF5_GroupSerialableDataclass,
HDF5_GroupSerializable,
HDF5_GroupSerializing,
IsAttr,
)
from ..logger import Logger, getLogger
from ..signals import Capture
from .core import NumpyArrayLike, PathLikeOrString, ReadId, stripped_by_keys
from .plugin import Plugin
CaptureOrTimeSeries = Union[Capture, NumpyArrayLike]
# Unique identifier for a collection of filters (e.g. "ProfJeffsAwesomeFilters")
FilterSetId = NewType("FilterSetId", str)
# Unique identifier for an individual filter (e.g. "min_frac")
FilterName = NewType("FilterName", str)
__all__ = [
"does_pass_filters",
"get_filters",
"FilterName",
"FilterSetId",
"FilterConfig",
"Filter",
"Filters",
"DEFAULT_FILTER_PLUGINS",
"FilterSet",
"FilterConfigs",
"FilterPlugin",
"PATH",
]
@dataclass(frozen=True)
class FILTER_PATH:
ROOT = f"/Filter/"
@classmethod
def filter_set_path(cls, filter_set_id: FilterSetId) -> str:
filter_path = str(PosixPath(FILTER_PATH.ROOT, filter_set_id))
return filter_path
@classmethod
def filter_set_pass_path(cls, filter_set_id: FilterSetId) -> str:
pass_path = str(PosixPath(FILTER_PATH.filter_set_path(filter_set_id), "pass"))
return pass_path
@classmethod
def filter_set_pass_path_for_read_id(cls, filter_set_id: FilterSetId, read_id: ReadId) -> str:
pass_path = str(PosixPath(FILTER_PATH.filter_set_pass_path(filter_set_id), read_id))
return pass_path
class FilterConfig(TypedDict):
"""A blueprint for how to construct a FilterPlugin.
Contains a name, and any number of other attributes
Note on terminology:
- FilterConfig: A high-level description of a filter.
- FilterPlugin: An actual, callable, implementation of a FilterConfig.
For custom plugins, make sure "filepath" is an attribute that points to the file to laod
"""
# Mapping of a FilterName to filter configurations.
FilterConfigs = NewType("FilterConfigs", Dict[FilterName, FilterConfig])
# TODO: Filter Plugin should check that name is unique. https://github.com/uwmisl/poretitioner/issues/91
class FilterPlugin(Plugin):
"""
Abstract class for Filter plugins. To write your own filter, subclass this abstract
class and implement the `apply` method and `name` property.
"""
@classmethod
@abstractmethod
def name(cls) -> str:
"""Unique name for this filter.
Make sure it doesn't conflict with any existing names.
Returns
-------
str
The unique name for this filter (e.g. "fourier_transform").
Raises
------
NotImplementedError
Raised if this filter is called without this name method being implemented.
"""
raise NotImplementedError(
"'name' class method not implemented for filter. This class method should return a unique name for this filter."
)
@abstractmethod
def apply(self, capture: CaptureOrTimeSeries) -> bool:
"""Returns True if a capture passes a given filter criteria.
For instance, a range filter would check that a capture's summary statistsics lie within a given range.
Parameters
----------
capture : np.typing.ArrayLike
Time series capture to filter.
Returns
-------
bool
Whether this capture passes the filter.
Raises
------
NotImplementedError
Raised when the filter method isn't implemented by the consuming Filter class
"""
raise NotImplementedError(
"'apply' method not implemented for filter. This method should return True if and only if applied to a capture that meets the filter criterion. For instance, "
)
def __call__(self, capture: CaptureOrTimeSeries) -> bool:
"""Apply the filter.
Defining `__call__` lets us do nice things like:
class MyCustomFilter(FilterPlugin):
def apply(capture):
# ...
pass
# Later in code where filtering is done....
valid_captures = []
filters = [ MyCustomFilter(), AnotherCustomFilter(), ... ]
valid_captures = [capture for capture in captures if all([filt(capture) for filt in filters])]
for capture in captures: # You'd want to parallelize this in a real life example...
for filt in filters:
filtered_captures = filt(capture).
Parameters
----------
capture : CaptureOrTimeSeries
Capture to filter.
Returns
-------
bool
Whether this capture passes the filter.
"""
result = self.apply(capture)
return result
RANGE_FILTER_DEFAULT_MINIMUM: float = -np.inf
RANGE_FILTER_DEFAULT_MAXIMUM: float = np.inf
class RangeFilter(FilterPlugin):
def __init__(self, minimum: Optional[float] = None, maximum: Optional[float] = None):
"""A filter that filters based on whether a signal falls between a maximum and a minimum.
Parameters
----------
minimum : float, optional
The smallest value this signal should be allowed to take (inclusive), by default RangeFilter.DEFAULT_MINIMUM
maximum : float, optional
The largest value this signal should be allowed to take (inclusive), by default RangeFilter.DEFAULT_MAXIMUM
"""
self.minimum = minimum if minimum is not None else RANGE_FILTER_DEFAULT_MINIMUM
self.maximum = maximum if maximum is not None else RANGE_FILTER_DEFAULT_MAXIMUM
def extract(self, capture: CaptureOrTimeSeries) -> NumpyArrayLike:
"""Extracts a summary statistic from the capture (e.g. mean, length, standard deviation).
Identity operation by default (just returns the capture).
You can use this function to transform the data in a useful way before processing it (e.g.
getting the mean value of a capture before filtering based on that mean.)
Note: If we picture the filtering workflow as an ETL (Extract-Transform-Load) pipeline, this would be the "transform"
(take data, modify it for a later purpose), but I feel that "transform" is perhaps a misleading function name in this context.
Parameters
----------
capture : CaptureOrTimeSeries
Capture from which to extract data.
"""
try:
signal = capture.fractionalized()
except AttributeError:
signal = capture
else:
signal = capture
return signal
# signal = getattr(capture, Capture.fractionalized.__name__, capture)
def is_in_range(self, value: Union[NumpyArrayLike, float]) -> bool:
try:
# If the value is just a float, we can use this handy syntax:
return self.minimum <= value <= self.maximum
except ValueError:
# But we're not allowed to use that syntax on numpy arrays.
return all(np.logical_and(self.minimum <= value, value <= self.maximum))
def apply(self, signal):
value = self.extract(signal)
return self.is_in_range(value)
class StandardDeviationFilter(RangeFilter):
"""Filters for captures with standard deviations in some range."""
@classmethod
def name(cls) -> str:
return "stdv"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.std(signal)
class MeanFilter(RangeFilter):
"""Filters for captures with an arithmetic mean within a range."""
@classmethod
def name(cls) -> str:
return "mean"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.mean(signal)
class MedianFilter(RangeFilter):
"""Filters for captures with a median within a range."""
@classmethod
def name(cls) -> str:
return "median"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.median(signal)
class MinimumFilter(RangeFilter):
"""Filters for captures with a minimum within a range."""
@classmethod
def name(cls) -> str:
return "min"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.min(signal)
class MaximumFilter(RangeFilter):
"""Filters for captures with a maximum within a range."""
@classmethod
def name(cls) -> str:
return "max"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.max(signal)
class LengthFilter(RangeFilter):
"""Filters captures based on their length."""
@classmethod
def name(cls) -> str:
return "length"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return len(signal)
class EjectedFilter(FilterPlugin):
"""Filters captures based on whether they were ejected from the pore."""
@classmethod
def name(cls) -> str:
return "ejected"
def extract(self, capture: Capture):
return capture.ejected
"""
How to Create Your Own Custom Filter:
Need more advanced filtering than what we provide out of the box? No problem.
Create your own custom filter by inheriting from the FilterPlugin class.
For this example, let's do something complex. Say you only want to examine captures
that have more than 5 samples with a hyperbolic tangent greater than some threshold.
That means our custom filter's `apply` function should return True if and only if
the signal has more than 5 samples greater than the threshold, after taking the hyperbolic tangent in `extract`.
"""
class MyCustomFilter(FilterPlugin):
threshold: float = 0.5 # Totally arbitrary.
def name(self):
return "foo"
def extract(self, capture):
# Do the transformations here, or pre-process it before the filter.
# Gets the hyperbolic tangent of the signal.
extracted = np.tanh(capture.signal)
return extracted
def apply(self, signal):
# Only return true if more than 5 samples have a square root greater than 2.0 (arbitrary)
extracted = self.extract(signal)
# If we want to filter out signals with fewer than 5 matching samples, then we
# should retrun True when there are 5 or more matching samples.
n_meeting_threshold = len(
extracted[extracted > self.threshold]
) # Number of samples greater than the threshold
meets_criteria = (
n_meeting_threshold >= 5
) # Are there at least 5 samples meeting this threshold?
return meets_criteria
def apply_feature_filters(capture: CaptureOrTimeSeries, filters: List[FilterPlugin]) -> bool:
"""
Check whether an array of current values (i.e. a single nanopore capture)
passes a set of filters. Filters can be based on summary statistics
(e.g., mean) and/or a range of allowed values.
Notes on filter behavior: If the filters list is empty, there are no filters
and the capture passes.
Parameters
----------
capture : CaptureOrTimeSeries | NumpyArrayLike
Capture containing time series of nanopore current values for a single capture, or the signal itself.
filters : List[FilterPlugin]
List of FilterPlugin instances. Write your own filter by subclassing FilterPlugin.
Returns
-------
boolean
True if capture passes all filters; False otherwise.
"""
if filters is None:
filters = []
# TODO: Parallelize? https://github.com/uwmisl/poretitioner/issues/67
filtered = [filter_out(capture) for filter_out in filters]
print(filtered)
# Did this signal pass all filters?
all_passed = all(filtered)
return all_passed
def check_capture_ejection_by_read(f5, read_id):
"""Checks whether the current capture was in the pore until the voltage
was reversed.
Parameters
----------
f5 : h5py.File object (open for reading or more)
Capture fast5 file
read_id : TODO
Returns
-------
boolean
True if the end of the capture coincides with the end of a voltage window.
"""
try:
ejected = f5.get(f"/read_{read_id}/Signal").attrs["ejected"]
except AttributeError:
raise ValueError(f"path /read_{read_id} does not exist in the fast5 file.")
return ejected
def check_capture_ejection(end_capture, voltage_ends, tol_obs=20):
"""Checks whether the current capture was in the pore until the voltage
was reversed.
Essentially checks whether a value (end_capture) is close enough (within
a margin of tol_obs) to any value in voltage_ends.
Parameters
----------
end_capture : numeric
The end time of the capture.
voltage_ends : list of numeric
List of times when the standard voltage ends.
tol_obs : int, optional
Tolerance for defining when the end of the capture = voltage end, by default 20
Returns
-------
boolean
True if the end of the capture coincides with the end of a voltage window.
"""
for voltage_end in voltage_ends:
if np.abs(end_capture - voltage_end) < tol_obs:
return True
return False
def filter_like_existing(config, example_fast5, example_filter_path, fast5_files, new_filter_path):
# Filters a set of fast5 files exactly the same as an existing filter
# TODO : #68 : implement
raise NotImplementedError()
def get_filter_pass_path(filter_set_id, read_id):
return FILTER_PATH.filter_set_pass_path(filter_set_id)
__DEFAULT_FILTER_PLUGINS = [
MeanFilter,
StandardDeviationFilter,
MedianFilter,
MinimumFilter,
MaximumFilter,
LengthFilter,
]
DEFAULT_FILTER_PLUGINS = {
filter_plugin_class.name(): filter_plugin_class
for filter_plugin_class in __DEFAULT_FILTER_PLUGINS
}
class Filtering(Protocol):
"""Classes that adhere to the Filtering protocol
provide an 'apply' method to an input that returns True
if and only if the input passes its filter.
These are also callable, so calling a filter on an input
is functionally equivalent to calling its apply method.
"""
def __call__(self, *args, **kwargs) -> bool:
raise NotImplementedError("Filtering protocol hasn't implemented __call__ yet!")
def apply(self, *args, **kwargs) -> bool:
raise NotImplementedError("Filtering protocol hasn't implemented Apply yet!")
@dataclass
class Filter(Filtering):
"""A named filter that can be applied to some data.
You can use this filter by just calling it on some data.
my_signal = [1,2,3,4]
filter = Filter(...)
passed_filter: bool = filter(my_signal)
Parameters
----------
config : FilterConfig
A description of this filter's configuration (e.g. where it was loaded from).
plugin : FilterPlugin
The actual implementation of this filter.
We have this class defined with
"""
config: FilterConfig
plugin: FilterPlugin
def __call__(self, *args, **kwargs) -> bool:
return self.plugin(*args, **kwargs)
def apply(self, *args, **kwargs) -> bool:
return self.plugin.apply(*args, **kwargs)
@property
def name(self) -> FilterName:
return FilterName(self.plugin.__class__.name())
def as_attr(self) -> Dict[str, Any]:
name = self.name
attrs = {**vars(self.config), **vars(self.plugin), name: name}
return attrs
def from_attr(self, attr) -> IsAttr:
...
import json
@dataclass
class HDF5_FilterSerialable(Filter, HDF5_GroupSerialableDataclass):
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
log = log if log is not None else getLogger()
# Note: This line simply registers a group with the name 'name' in the parent group.
this_group = HDF5_Group(parent_group.require_group(self.name))
all_attrs = {**self.config, **vars(self.plugin)}
this_group.create_attrs(all_attrs)
# Implementers must now write their serialized instance to this group.
return this_group
@classmethod
def from_group(
cls, group: HDF5_Group, log: Optional[Logger] = None
) -> HDF5_GroupSerialableDataclass:
# You see, the trouble is, in the above 'as_group' call, we lumped together
# all the attributes of the FilterConfig and the FilterPlugin, not knowing
# which attributes belonged to which class.
#
# Now, here in `from_group`, it's time to pay the piper and figure out which attribute
# goes where to create a new Filter instance.
#
# This is likely achievable through the plugin architecture, since the plugin's
# name is unique, we can try to find a plugin with a given name, then get its attributes from there.
# Load
log.warning("Filter.from_group not implemented...It's a whole thing (see comment)")
# This is pure Hail Mary.
return super().from_group(group, log)
# class Filters(HDF5_GroupSerialableDataclass):
# filters:
Filters = Dict[FilterName, Filter]
def get_filters(filter_configs: Optional[FilterConfigs] = None) -> Filters:
"""Creates Filters from a list of filter configurations.
Parameters
----------
filter_configs : Optional[FilterConfigs]
A mapping of filter names to their configurations, None by default (i.e. no filtering).
Returns
-------
Filters
A set of callable/applyable filters.
"""
filter_configs = filter_configs if filter_configs is not None else FilterConfigs({})
my_filters = {
name: filter_from_config(name, filter_config)
for name, filter_config in filter_configs.items()
}
return my_filters
def does_pass_filters(capture: CaptureOrTimeSeries, filters: Iterable[Filter]) -> bool:
"""
Check whether an array of values (e.g. a single nanopore capture)
passes a set of filters. Filters can be based on summary statistics
(e.g., mean) and/or a range of allowed values.
Parameters
----------
capture : CaptureOrTimeSeries | NumpyArrayLike
Capture containing time series of nanopore current values for a single capture, or the signal itself.
filters : Iterable[Filter]
The set of filters to apply. Write your own filter by subclassing FilterPlugin.
Returns
-------
boolean
True if capture passes all filters; False otherwise.
"""
all_passed = True
for some_filter in filters:
if not some_filter(capture):
return False
return all_passed
@dataclass(frozen=True)
class FilterSetProtocol(Filtering, Protocol):
filter_set_id: FilterSetId
filters: Filters
@classmethod
def from_filter_configs(cls, name: FilterSetId, filter_configs: FilterConfigs = None):
...
@dataclass(frozen=True, init=False)
class FilterSet(FilterSetProtocol):
"""
A collection of filters with a name for easy
identification. Essentially a mapping of filter names to their implementations.
"""
def validate(self):
raise NotImplementedError("Implement validation for filters!")
def __init__(self, filter_set_id: FilterSetId, filters: Filters) -> None:
filterset = super().__init__(self)
object.__setattr__(self, "filter_set_id", filter_set_id)
object.__setattr__(self, "filters", filters)
# self.name = name
# self.filters = filters
############################
#
# FilterSetProtocol
#
############################
@classmethod
def from_filter_configs(cls, name: FilterSetId, filter_configs: FilterConfigs = None):
filters: Filters = get_filters(filter_configs)
filter_set = cls.__new__(cls, name, filters)
filter_set.__init__(name, filters)
return filter_set
def apply(self, capture: CaptureOrTimeSeries) -> bool:
return does_pass_filters(capture, self.filters.values())
def __call__(self, capture: CaptureOrTimeSeries) -> bool:
return self.apply(capture)
class HDF5_FilterSet(FilterSet, HDF5_GroupSerialableDataclass):
def __init__(self, filter_set: FilterSet) -> None:
self._filterset = filter_set
############################
#
# HDF5_GroupSerializable
#
############################
def name(self):
return self._filterset.filter_set_id
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
filter_set_group = parent_group.require_group(self.name())
for name, filter_t in self._filterset.filters.items():
hdf5_filter = HDF5_FilterSerialable(filter_t.config, filter_t.plugin)
hdf5_filter.as_group(filter_set_group)
return HDF5_Group(filter_set_group)
# @classmethod
# def from_group(
# cls, group: HDF5_Group, log: Optional[Logger] = None
# ) -> HDF5_GroupSerializable:
# raise NotImplementedError(
# f"from_group not implemented for {cls.__name__}. Make sure you write a method that returns a serialzied version of this object."
# )
def filter_from_config(name: str, config: FilterConfig, log: Logger = getLogger()) -> Filter:
"""Creates a Filter from a config spefication. If no "filename" is present in the FilterConfig, it's
assumed to be one of the default filtesr
Parameters
----------
name : str
The unique name of a filter.
config : FilterConfig
Filter configuration to build the plugin.
log : Logger, optional
Logger to use for information/warnings/debug, by default getLogger()
Returns
-------
Filter
A filter that can be applied to some data.
Raises
------
AttributeError
A filter plugin could not be built from the configuration description. If this error is raised, be sure to check
1) A plugin class with the name in the configuration is defined at the filepath described in the configuration
2) The plugin class inherits from the `FilterPlugin` abstract base class.
"""
filepath = config.get("filepath", None)
# TODO: For non-default FilterPlugins, load/unpickle the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
plugin = None
if name in DEFAULT_FILTER_PLUGINS:
plugin = DEFAULT_FILTER_PLUGINS[name]()
else:
# TODO: For non-default FilterPlugins, load the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
plugin = plugin_from_file(name, filepath)
pass
# Make sure any plugin attributes defined in the config are moved over to the plugin instance.
try:
# Here, we take care of setting whatever attributes the plugin config defines on the new plugin instance.
for key, value in config.items():
object.__setattr__(plugin, key, value)
except AttributeError as e:
log.warning(
"""
Uh oh, couldn't find plugin '{name}'. Are you sure:
1) A plugin class with the name '{name}' is defined in the file {filepath}?
2) That plugin class inherits from `FilterPlugin`?
"""
)
raise e
my_filter = Filter(config, plugin)
return my_filter
def plugin_from_file(name: str, filepath: PathLikeOrString):
"""[summary]
Parameters
----------
name : str
[description]
filepath : PathLikeOrString
[description]
Returns
-------
[type]
[description]
Raises
------
NotImplementedError
[description]
"""
# TODO: For non-default FilterPlugins, load/unpickle the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
raise NotImplementedError(
"Plugin from file has not been implemented! This method should take in a filepath and filter name, and return a runnable FilterPlugin!"
)
| 31.190176 | 171 | 0.666828 | 3,036 | 24,765 | 5.326416 | 0.170619 | 0.015027 | 0.008843 | 0.011688 | 0.286315 | 0.257127 | 0.217612 | 0.182858 | 0.176241 | 0.157133 | 0 | 0.003861 | 0.247083 | 24,765 | 793 | 172 | 31.229508 | 0.863402 | 0.42023 | 0 | 0.248387 | 0 | 0.009677 | 0.075717 | 0.003594 | 0 | 0 | 0 | 0.007566 | 0 | 1 | 0.174194 | false | 0.051613 | 0.051613 | 0.051613 | 0.448387 | 0.003226 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5315716313b67d48f560713c76b83b311d4a39e6 | 13,110 | py | Python | tests/test_db.py | beloglazov/openstack-neat | a5a853ae2affb0cdc582e3ab641737f5ebd3d0a7 | [
"Apache-2.0"
] | 34 | 2015-01-04T08:02:37.000Z | 2022-02-19T14:43:47.000Z | tests/test_db.py | beloglazov/openstack-neat | a5a853ae2affb0cdc582e3ab641737f5ebd3d0a7 | [
"Apache-2.0"
] | 3 | 2015-01-23T07:45:15.000Z | 2019-07-03T11:16:27.000Z | tests/test_db.py | beloglazov/openstack-neat | a5a853ae2affb0cdc582e3ab641737f5ebd3d0a7 | [
"Apache-2.0"
] | 22 | 2015-01-14T17:54:46.000Z | 2021-08-09T06:09:17.000Z | # Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import datetime
import neat.db_utils as db_utils
import logging
logging.disable(logging.CRITICAL)
class Db(TestCase):
@qc(1)
def insert_select():
db = db_utils.init_db('sqlite:///:memory:')
db.vms.insert().execute(uuid='test')
assert db.vms.select().execute().first()['uuid'] == 'test'
db.vm_resource_usage.insert().execute(vm_id=1, cpu_mhz=1000)
assert db.vm_resource_usage.select(). \
execute().first()['cpu_mhz'] == 1000
@qc(10)
def select_cpu_mhz_for_vm(
uuid=str_(of='abc123-', min_length=36, max_length=36),
cpu_mhz=list_(of=int_(min=0, max=3000), min_length=0, max_length=10),
n=int_(min=1, max=10)
):
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid)
vm_id = result.inserted_primary_key[0]
for mhz in cpu_mhz:
db.vm_resource_usage.insert().execute(
vm_id=vm_id,
cpu_mhz=mhz)
assert db.select_cpu_mhz_for_vm(uuid, n) == cpu_mhz[-n:]
@qc(10)
def select_last_cpu_mhz_for_vms(
vms=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=1, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = {}
for uuid, data in vms.items():
for value in data:
db.insert_vm_cpu_mhz({uuid: value})
if data:
res[uuid] = data[-1]
assert db.select_last_cpu_mhz_for_vms() == res
@qc(10)
def select_vm_id(
uuid1=str_(of='abc123-', min_length=36, max_length=36),
uuid2=str_(of='abc123-', min_length=36, max_length=36)
):
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid1)
vm_id = result.inserted_primary_key[0]
assert db.select_vm_id(uuid1) == vm_id
assert db.select_vm_id(uuid2) == vm_id + 1
@qc(10)
def insert_vm_cpu_mhz(
vms=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=tuple_(int_(min=1, max=3000),
list_(of=int_(min=1, max=3000),
min_length=0, max_length=10)),
min_length=0, max_length=5
)
):
db = db_utils.init_db('sqlite:///:memory:')
initial_data = []
data_to_submit = {}
final_data = {}
for uuid, data in vms.items():
vm_id = db.select_vm_id(uuid)
data_to_submit[uuid] = data[0]
final_data[uuid] = list(data[1])
final_data[uuid].append(data[0])
for cpu_mhz in data[1]:
initial_data.append({'vm_id': vm_id,
'cpu_mhz': cpu_mhz})
if initial_data:
db.vm_resource_usage.insert().execute(initial_data)
db.insert_vm_cpu_mhz(data_to_submit)
for uuid, data in final_data.items():
assert db.select_cpu_mhz_for_vm(uuid, 11) == data
@qc(1)
def update_host():
db = db_utils.init_db('sqlite:///:memory:')
db.update_host('host1', 3000, 4, 4000)
hosts = db.hosts.select().execute().fetchall()
assert len(hosts) == 1
host = hosts[0]
assert host['hostname'] == 'host1'
assert host['cpu_mhz'] == 3000
assert host['cpu_cores'] == 4
assert host['ram'] == 4000
db.update_host('host1', 3500, 8, 8000L)
hosts = db.hosts.select().execute().fetchall()
assert len(hosts) == 1
host = hosts[0]
assert host['hostname'] == 'host1'
assert host['cpu_mhz'] == 3500
assert host['cpu_cores'] == 8
assert host['ram'] == 8000L
@qc(10)
def select_cpu_mhz_for_host(
hostname=str_(of='abc123', min_length=5, max_length=10),
cpu_mhz=list_(of=int_(min=0, max=3000), min_length=0, max_length=10),
n=int_(min=1, max=10)
):
db = db_utils.init_db('sqlite:///:memory:')
host_id = db.update_host(hostname, 1, 1, 1)
for mhz in cpu_mhz:
db.host_resource_usage.insert().execute(
host_id=host_id,
cpu_mhz=mhz)
assert db.select_cpu_mhz_for_host(hostname, n) == cpu_mhz[-n:]
@qc(10)
def select_last_cpu_mhz_for_hosts(
hosts=dict_(
keys=str_(of='abc123', min_length=5, max_length=10),
values=list_(of=int_(min=1, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = {}
for hostname, data in hosts.items():
db.update_host(hostname, 1, 1, 1)
for value in data:
db.insert_host_cpu_mhz(hostname, value)
if data:
res[hostname] = data[-1]
else:
res[hostname] = 0
assert db.select_last_cpu_mhz_for_hosts() == res
@qc(10)
def insert_host_cpu_mhz(
hostname=str_(of='abc123', min_length=5, max_length=10),
cpu_mhz=list_(of=int_(min=0, max=3000), min_length=1, max_length=10)
):
db = db_utils.init_db('sqlite:///:memory:')
db.update_host(hostname, 1, 1, 1)
for value in cpu_mhz:
db.insert_host_cpu_mhz(hostname, value)
assert db.select_cpu_mhz_for_host(hostname, len(cpu_mhz)) == cpu_mhz
@qc(1)
def select_host_characteristics():
db = db_utils.init_db('sqlite:///:memory:')
assert db.select_host_characteristics() == ({}, {}, {})
db.update_host('host1', 3000, 4, 4000)
db.update_host('host2', 3500, 8, 8000)
assert db.select_host_characteristics() == \
({'host1': 3000, 'host2': 3500},
{'host1': 4, 'host2': 8},
{'host1': 4000, 'host2': 8000})
@qc(1)
def select_host_id():
db = db_utils.init_db('sqlite:///:memory:')
host1_id = db.hosts.insert().execute(
hostname='host1',
cpu_mhz=1,
cpu_cores=1,
ram=1).inserted_primary_key[0]
host2_id = db.hosts.insert().execute(
hostname='host2',
cpu_mhz=1,
cpu_cores=1,
ram=1).inserted_primary_key[0]
assert db.select_host_id('host1') == host1_id
assert db.select_host_id('host2') == host2_id
@qc(1)
def select_host_ids():
db = db_utils.init_db('sqlite:///:memory:')
assert db.select_host_ids() == {}
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
assert db.select_host_ids() == hosts
@qc(1)
def cleanup_vm_resource_usage(
uuid=str_(of='abc123-', min_length=36, max_length=36)
):
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid)
vm_id = result.inserted_primary_key[0]
time = datetime.datetime.today()
for i in range(10):
db.vm_resource_usage.insert().execute(
vm_id=1,
cpu_mhz=i,
timestamp=time.replace(second=i))
assert db.select_cpu_mhz_for_vm(uuid, 100) == range(10)
db.cleanup_vm_resource_usage(time.replace(second=5))
assert db.select_cpu_mhz_for_vm(uuid, 100) == range(5, 10)
@qc(1)
def cleanup_host_resource_usage(
hostname=str_(of='abc123', min_length=5, max_length=10)
):
db = db_utils.init_db('sqlite:///:memory:')
host_id = db.update_host(hostname, 1, 1, 1)
time = datetime.datetime.today()
for i in range(10):
db.host_resource_usage.insert().execute(
host_id=1,
cpu_mhz=i,
timestamp=time.replace(second=i))
assert db.select_cpu_mhz_for_host(hostname, 100) == range(10)
db.cleanup_host_resource_usage(time.replace(second=5))
assert db.select_cpu_mhz_for_host(hostname, 100) == range(5, 10)
def test_insert_host_states(self):
db = db_utils.init_db('sqlite:///:memory:')
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
db.insert_host_states({'host1': 0, 'host2': 1})
db.insert_host_states({'host1': 0, 'host2': 0})
db.insert_host_states({'host1': 1, 'host2': 1})
result = db.host_states.select().execute().fetchall()
host1 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host1'],
result), key=lambda x: x[0])]
self.assertEqual(host1, [0, 0, 1])
host2 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host2'],
result), key=lambda x: x[0])]
self.assertEqual(host2, [1, 0, 1])
@qc(10)
def select_host_states(
hosts=dict_(
keys=str_(of='abc123', min_length=1, max_length=5),
values=list_(of=int_(min=0, max=1),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = {}
for host, data in hosts.items():
db.update_host(host, 1, 1, 1)
for state in data:
db.insert_host_states({host: state})
if data:
res[host] = data[-1]
else:
res[host] = 1
assert db.select_host_states() == res
@qc(10)
def select_active_hosts(
hosts=dict_(
keys=str_(of='abc123', min_length=1, max_length=5),
values=list_(of=int_(min=0, max=1),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = []
for host, data in hosts.items():
db.update_host(host, 1, 1, 1)
for state in data:
db.insert_host_states({host: state})
if data and data[-1] == 1 or not data:
res.append(host)
assert set(db.select_active_hosts()) == set(res)
@qc(10)
def select_inactive_hosts(
hosts=dict_(
keys=str_(of='abc123', min_length=1, max_length=5),
values=list_(of=int_(min=0, max=1),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
hosts = {'1ab': [0], '3222': [0, 0, 1, 1, 1, 1, 0, 0], 'b222b': [0, 0, 1, 1, 1, 0, 1]}
db = db_utils.init_db('sqlite:///:memory:')
res = []
for host, data in hosts.items():
db.update_host(host, 1, 1, 1)
for state in data:
db.insert_host_states({host: state})
if data and data[-1] == 0:
res.append(host)
assert set(db.select_inactive_hosts()) == set(res)
def test_insert_host_overload(self):
db = db_utils.init_db('sqlite:///:memory:')
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
db.insert_host_overload('host2', False)
db.insert_host_overload('host1', True)
db.insert_host_overload('host1', False)
db.insert_host_overload('host2', True)
result = db.host_overload.select().execute().fetchall()
host1 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host1'],
result), key=lambda x: x[0])]
self.assertEqual(host1, [1, 0])
host2 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host2'],
result), key=lambda x: x[0])]
self.assertEqual(host2, [0, 1])
@qc(1)
def insert_select():
db = db_utils.init_db('sqlite:///:memory:')
db.vms.insert().execute(uuid='x' * 36).inserted_primary_key[0]
vm_id = db.vms.insert().execute(uuid='vm' * 18).inserted_primary_key[0]
host_id = db.update_host('host', 1, 1, 1)
db.insert_vm_migration('vm' * 18, 'host')
result = db.vm_migrations.select().execute().first()
assert result[1] == vm_id
assert result[2] == host_id
| 36.825843 | 94 | 0.555683 | 1,802 | 13,110 | 3.813541 | 0.099334 | 0.037544 | 0.026193 | 0.037835 | 0.74025 | 0.677532 | 0.638242 | 0.579162 | 0.544529 | 0.532451 | 0 | 0.055386 | 0.300381 | 13,110 | 355 | 95 | 36.929577 | 0.693851 | 0.042182 | 0 | 0.565916 | 0 | 0 | 0.06091 | 0 | 0 | 0 | 0 | 0 | 0.125402 | 0 | null | null | 0 | 0.016077 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53186bfab87cf033e365cec1f3ce308e9a8c439b | 32,090 | py | Python | src/python/triangula/chassis.py | peterbrazil/brazil | 3823dca6f05b6946251800125d45069048d1bca1 | [
"Apache-2.0"
] | null | null | null | src/python/triangula/chassis.py | peterbrazil/brazil | 3823dca6f05b6946251800125d45069048d1bca1 | [
"Apache-2.0"
] | null | null | null | src/python/triangula/chassis.py | peterbrazil/brazil | 3823dca6f05b6946251800125d45069048d1bca1 | [
"Apache-2.0"
] | null | null | null | from math import cos, sin, degrees, radians, pi
from time import time
from euclid import Vector2, Point2
from numpy import array as np_array
from numpy.linalg import solve as np_solve
__author__ = 'tom'
def test():
chassis = HoloChassis(wheels=[
HoloChassis.OmniWheel(position=Point2(1, 0), angle=0, radius=60),
HoloChassis.OmniWheel(position=Point2(-1, 0), angle=0, radius=60)]
)
print chassis.get_wheel_speeds(Motion(translation=Vector2(0, 0), rotation=0.5))
print chassis.get_wheel_speeds(Motion(translation=Vector2(0, 0), rotation=0.5), origin=Point2(1, 0))
def rotate_point(point, angle, origin=None):
"""
Rotate a Point2 around another Point2
:param euclid.Point2 point:
The point to rotate
:param float angle:
Angle in radians, clockwise rotation
:param euclid.Point2 origin:
Origin of the rotation, defaults to (0,0) if not specified
:return:
A new :class:`euclid.Point2` containing the rotated input point
"""
if origin is None:
origin = Point2(0, 0)
s = sin(-angle)
c = cos(-angle)
return Point2(c * (point.x - origin.x) - s * (point.y - origin.y) + origin.x,
s * (point.x - origin.x) + c * (point.y - origin.y) + origin.y)
def rotate_vector(vector, angle, origin=None):
"""
Rotate a :class:`euclid.Vector2` around a :class:`euclid.Point2`
:param euclid.Vector2 vector:
The vector to rotate
:param float angle:
Angle in radians, clockwise rotation
:param euclid.Point2 origin:
Origin of the rotation, defaults to (0,0) if not specified
:return:
A new :class:`euclid.Point2` containing the rotated input point
"""
if origin is None:
origin = Point2(0, 0)
s = sin(-angle)
c = cos(-angle)
return Vector2(c * (vector.x - origin.x) - s * (vector.y - origin.y) + origin.x,
s * (vector.x - origin.x) + c * (vector.y - origin.y) + origin.y)
def smallest_difference(a, b, max_value=2 * pi):
"""
Given two floats, a and b, and a maximum possible value for both a and b, calculate the smallest delta from a to b.
For example, if a=1.0, b=2.5 and max_value=2.6, this should return -1.1, as subtracting 1.1 from a would result in
-0.1, which will then be transformed to 2.5 after taking its modulus with 2.6. If max_value was 10, it would return
+1.5, as this is the lower magnitude delta needed to go from 1.0 to 2.5. This function is used when calculating the
shortest delta between two pose orientations, for this reason the max_value defaults to 2*pi for use when working
in radians.
If either a or b are less than zero or greater than the maximum value they will be treated as a % max_value or b %
max_value respectively for the purposes of this calculation.
:param float a:
First value (see above)
:param b:
Second value (see above)
:param max_value:
Modulus, defaults to 2*pi if not specified
:return:
A value d such that (a + d) % max_value == b, and abs(d) is minimal (as there would be an infinite number of
possible d that satisfy this relationship).
"""
mod_a = a % max_value
mod_b = b % max_value
if abs(mod_a - mod_b) <= max_value / 2:
return mod_b - mod_a
elif mod_a >= mod_b:
return mod_b + (max_value - mod_a)
else:
return -(mod_a + (max_value - mod_b))
def get_regular_triangular_chassis(wheel_distance, wheel_radius, max_rotations_per_second):
"""
Build a HoloChassis object with three wheels, each identical in size and maximum speed. Each wheel is positioned
at the corner of a regular triangle, and with direction perpendicular to the normal vector at that corner.
:param wheel_distance:
Distance in millimetres between the contact points of each pair of wheels (i.e. the length of each edge of the
regular triangle)
:param wheel_radius:
Wheel radius in millimetres
:param max_rotations_per_second:
Maximum wheel speed in revolutions per second
:return:
An appropriately configured HoloChassis
"""
point = Point2(0, cos(radians(30)) * wheel_distance / 2.0)
vector = Vector2(-2 * pi * wheel_radius, 0)
# Pink
wheel_a = HoloChassis.OmniWheel(
position=point,
vector=vector,
max_speed=max_rotations_per_second)
# Yellow
wheel_b = HoloChassis.OmniWheel(
position=rotate_point(point, pi * 2 / 3),
vector=rotate_vector(vector, pi * 2 / 3),
max_speed=max_rotations_per_second)
# Green
wheel_c = HoloChassis.OmniWheel(
position=rotate_point(point, pi * 4 / 3),
vector=rotate_vector(vector, pi * 4 / 3),
max_speed=max_rotations_per_second)
return HoloChassis(wheels=[wheel_a, wheel_b, wheel_c])
class WheelSpeeds:
"""
A simple container to hold desired wheel speeds, and to indicate whether any speeds were scaled back due to
impossibly high values.
"""
def __init__(self, speeds, scaling):
"""
Create a new wheel speeds container
:param speeds:
A sequence of float values, one per wheel, in revolutions per second
:param float scaling:
If a requested translation or rotation was too fast for the chassis to perform, it will return an instance
of this class with the scaling set to a value greater than 1.0. This indicates that it was unable to
provide the requested trajectory but has instead provided the highest magnitude one possible. This parameter
then contains the proportion of the requested trajectory that was possible to provide. For example, if
the motion requested was a translation of 10mm/s in the X axis and a rotation of 10 radians per second, but
on calculation this resulted in excessive wheel speeds which weren't possible, it might be scaled back to
6mm/s on X and 6 radians per second - the motion is proportionately the same just slower, and in this case
the scaling value would be 0.6.
"""
self.speeds = speeds
self.scaling = scaling
def __str__(self):
return 'WheelSpeeds[ speeds={}, scaling={} ]'.format(self.speeds, self.scaling)
class Motion:
"""
A container to hold the translation and rotation vector representing the robot's motion. This is always expressed
in the robot's coordinate frame, so a translation component of 0,1 always means the robot is heading forwards,
irrespective of the current orientation of the robot (i.e. if the robot was turned 90 degrees in world space this
0,1 motion would be a movement along the X axis in world space, but the Y axis in robot space). The rotation
component of the motion is expressed in radians per second, positive values corresponding to clockwise rotation
when viewed from the direction relative to the plane such that X is positive to the right and Y positive upwards.
"""
def __init__(self, translation=None, rotation=0):
"""
Constructor
:param euclid.Vector2 translation:
Vector2 representing the translation component in robot coordinate space of the motion. Defaults to
Vector2(0,0)
:param float rotation:
Rotation in radians per second. Defaults to 0.
"""
if translation is not None:
self.translation = translation
else:
self.translation = Vector2(0, 0)
self.rotation = rotation
def __str__(self):
return 'Motion[ x={}, y={}, theta={} (deg={}) ]'.format(self.translation.x, self.translation.y, self.rotation,
degrees(self.rotation))
class DeadReckoning:
"""
Encapsulates the logic required to track the robot's position in world space using wheel encoders and chassis
kinematics. To update the state of this object you need to call the update_from_counts function - this will
compute the difference in counts for each wheel, and from this derive the rotational speed for each wheel since
the last measurement. The :class:`triangula.chassis.HoloChassis` is then used to convert these speeds into an arc,
with the assumption that wheel speeds were constant during the time interval. This arc is used to update the
:class:`triangula.chassis.Pose` representing the current best estimate of the robot's position.
Because this is in effect integrating over sensor readings, any errors, particularly in the chassis geometry or
dimensions, or in the number of counts per revolution (for example if the gearing isn't quite what you think it is
or there's enough slop in the gearbox that readings can drift) will accumulate over time. To mitigate this, if you
have precise instantaneous information such as a compass reading every few seconds, these readings can be used to
explicitly set the position, orientation, or both of the :class:`triangula.chassis.Pose` tracked by this class.
As there's an implicit assumption that wheel speeds are constant between encoder readings, this class will yield
more accurate results when updated frequently. The exact optimal update frequency will depend on the encoder
resolutions, chassis geometry etc. Some manual tuning may be required.
"""
def __init__(self, chassis, counts_per_revolution=64 * 19, max_count_value=1 << 15):
"""
Constructor
:param triangula.chassis.HoloChassis chassis:
The :class:`triangula.chassis.HoloChassis` to be used to define kinematics for this DeadReckoning
:param float counts_per_revolution:
The number of counts registered by the wheel encoders per revolution of the wheel. Defaults to 64*19 to
be the 64 count encoder fitted to a 19:1 reduction gearbox.
:param int max_count_value:
The largest value read from the encoders, this is used to determine when we've wrapped around the zero
point, defaults to 1<<16 to reflect that count values are held in the microcontroller module as a uint16_t
"""
self.chassis = chassis
self.counts_per_revolution = counts_per_revolution
self.max_count_value = max_count_value
self.last_encoder_values = None
self.last_reading_time = None
self.pose = None
def reset(self):
"""
Clear the state of this :class:`triangula.chassis.DeadReckoning`
"""
self.last_encoder_values = None
self.last_reading_time = None
self.pose = None
def set_position(self, position):
"""
Explicitly set the position of the robot in world coordinates. Overrides the current value tracked by this
instance. Use this when you have better information and want to update the state accordingly.
:param euclid.Point2 position:
The new position to set, as a :class:`euclid.Point2`, coordinates are in mm
"""
self.pose.position = position
return self.pose
def set_orientation(self, orientation):
"""
Explicitly set the orientation of the robot in world coordinates. Use this to explicitly update the orientation,
for example when you have a sufficiently accurate compass fix that it can be used to eliminate any accumulated
errors built up by the dead reckoning algorithm.
:param float orientation:
The new orientation to set, in radians from the positive Y axis, clockwise rotations being positive. This
value will be normalised to the range 0-2PI
:return:
The current (updated) value of the :class:`triangula.chassis.Pose`
"""
self.pose.orientation = orientation % (2 * pi)
return self.pose
def update_from_counts(self, counts):
"""
Update the pose from a new set of encoder values
:param counts:
A list of encoder counts, one per wheel
:return:
The updated :class:`triangula.chassis.Pose` object (this is also modified in the internal state of the
DeadReckoning)
"""
reading_time = time()
if self.last_encoder_values is None:
self.last_encoder_values = counts
self.last_reading_time = reading_time
self.pose = Pose(Point2(0, 0), 0)
else:
time_delta = reading_time - self.last_reading_time
wheel_speeds = [smallest_difference(current_reading, last_reading, self.max_count_value) / (
self.counts_per_revolution * time_delta) for last_reading, current_reading
in zip(counts, self.last_encoder_values)]
motion = self.chassis.calculate_motion(speeds=wheel_speeds)
self.pose = self.pose.calculate_pose_change(motion, time_delta)
self.last_encoder_values = counts
self.last_reading_time = reading_time
return self.pose
class Pose:
"""
A container to hold the position as a Point2 along with orientation in radians, where 0 corresponds to the positive
Y axis (0,1). Orientation is expressed in radians, with positive values indicating a rotation from the positive Y
axis in the clockwise direction, i.e. a rotation of 0 is North, pi/2 East, pi South and 3pi/2 West.
"""
def __init__(self, position=None, orientation=0):
"""
Constructor
:param euclid.Point2 position:
A Point2 containing the position of the centre of the robot. Defaults to Point2(0,0)
:param float orientation:
Orientation in radians, 0 being the positive Y axis, positive values correspond to clockwise rotations, i.e.
pi/4 is East. This value will be normalised to be between 0 and 2 * pi. Defaults to 0
"""
if position is not None:
self.position = position
else:
self.position = Point2(0, 0)
self.orientation = orientation % (2 * pi)
def distance_to_pose(self, to_pose):
"""
Return the distance to the other pose position
:param triangula.chassis.Pose to_pose:
The target pose
"""
return abs(self.position - to_pose.position)
def is_close_to(self, to_pose, max_distance=0.001, max_orientation_difference=radians(1)):
"""
Check whether we're close to the specified pose, defining closeness as both distance on the plane and difference
in orientation.
:param to_pose:
The target pose
:param max_distance:
Maximum distance within which we'll count as being close, defaults to 0.001
:param max_orientation_difference:
Maximum number of radians we can be off the target pose's orientation to count as close, defaults to 1
degree (calculated with ``radians(1)``)
:return:
True if this pose is regarded as close to the other, False otherwise
"""
if self.distance_to_pose(to_pose) > max_distance:
return False
elif abs(smallest_difference(self.orientation, to_pose.orientation)) > max_orientation_difference:
return False
else:
return True
def translate(self, vector):
"""
Create a new pose, with the same orientation as this one and the specified translation applied to its position.
:param euclid.Vector2 vector:
Vector by which the position of this pose should be translated when creating the new Pose
:return:
Returns the new Pose
"""
return Pose(position=self.position + vector, orientation=self.orientation)
def pose_to_pose_vector(self, to_pose):
"""
Calculates the Vector2, in robot coordinate space (remember that Pose objects use world coordinates!) that
represents the translation required to move from this Pose to the specified target Pose.
:param triangula.chassis.Pose to_pose:
A target :class:`triangula.chassis.Pose`, the resultant vector in robot space will translate the robot to
the position contained in this pose. Note that this does not take any account of the orientation component
of the to_pose, only the starting one.
:return:
A :class:`euclid.Vector2` containing the translation part, in robot space, of the motion required to move
from this Pose to the target.
"""
return rotate_vector(
vector=Vector2(to_pose.position.x - self.position.x, to_pose.position.y - self.position.y),
angle=-self.orientation)
def pose_to_pose_motion(self, to_pose, time_seconds):
"""
Calculates a Motion which should be applied to the current Pose to move the robot towards the target, such that
it should hit the target at no less than time_seconds into the future. This function must be called on any Pose
update, i.e. from a dead reckoning module, as it doesn't do any course planning (it would, for example, be
possible to calculate a single constant motion to move in an arc to the target Pose, but this would be rather
inefficient, better to incrementally home in on the target by repeatedly calling this function). To move as
fast as possible to the target, set the time to something implausibly small, then use the chassis functions
to limit the resultant motion to the range possible for the chassis. This would require some kind of motion
limit to avoid skidding and messing up the Pose calculation logic.
:param to_pose:
A target :class:`triangula.chassis.Pose`
:param time_seconds:
A the minimum number of seconds to transition to the target pose.
:return:
A :class:`triangula.chassis.Motion` containing the motion required to attain the target pose in the
specified time. This is highly likely to be impossible, in which case using the chassis functions to
determine the wheel power and extract the scaling factor will give the actual time (ignoring acceleration
limits) to transition to the target.
"""
translation = self.pose_to_pose_vector(to_pose=to_pose)
rotation = smallest_difference(self.orientation, to_pose.orientation)
return Motion(translation=translation / time_seconds, rotation=rotation / time_seconds)
def calculate_pose_change(self, motion, time_delta):
"""
Given this as the starting Pose, a Motion and a time in seconds, calculate the resultant Pose at the end of the
time interval.
This makes use of the fact that if you travel in a consistent direction while turning at a constant rate you
will describe an arc. By calculating the centre point of this arc we can simply rotate the starting pose around
this centre point. This is considerably simpler than integrating over the motion 3-vector. A special case is
used to avoid division by zero errors when there is no rotation component to the motion.
:param triangula.chassis.Motion motion:
The motion of the robot, assumed to be constant for the duration of the time interval. The motion is
expressed in the robot's coordinate frame, so a translation of (0,1) is always a forward motion,
irrespective of the current orientation.
:param float time_delta:
The time in seconds during which the specified motion should be applied.
:return:
A :class:`triangula.chassis.Pose` which represents resultant pose after applying the supplied motion for the
given time.
"""
# Total delta in orientation angle over the time interval
orientation_delta = motion.rotation * time_delta
# Scaled translation vector rotated into world coordinate space (motion uses robot space)
translation_vector_world = rotate_vector(motion.translation, self.orientation) * time_delta
':type : euclid.Vector2'
if orientation_delta == 0:
# No orientation, trivially add the rotated, scaled, translation vector to the current pose
return self.translate(translation_vector_world)
else:
centre_of_rotation = self.position + translation_vector_world.cross() / orientation_delta
':type : euclid.Point2'
final_position = rotate_point(self.position, angle=orientation_delta, origin=centre_of_rotation)
return Pose(position=final_position, orientation=self.orientation + orientation_delta)
def __str__(self):
return 'Pose[x={}, y={}, orientation={} (deg={})]'.format(self.position.x, self.position.y, self.orientation,
degrees(self.orientation))
class HoloChassis:
"""
An assembly of wheels at various positions and angles, which can be driven independently to create a holonomic drive
system. A holonomic system is one where number of degrees of freedom in the system is equal to the number of
directly controllable degrees of freedom, so for a chassis intended to move in two dimensions the degrees of freedom
are two axes of translation and one of rotation. For a full holonomic system we therefore need at least three wheels
defined.
"""
def __init__(self, wheels):
"""
Create a new chassis, specifying a set of wheels.
:param wheels:
A sequence of :class:`triangula.chassis.HoloChassis.OmniWheel` objects defining the wheels for this chassis.
"""
self.wheels = wheels
self._matrix_coefficients = np_array([[wheel.co_x, wheel.co_y, wheel.co_theta] for wheel in self.wheels])
def calculate_motion(self, speeds):
"""
Invert the motion to speed calculation to obtain the actual linear and angular velocity of the chassis given
a vector of wheel speeds. See http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.linalg.solve.html
:param speeds:
An array of wheel speeds, expressed as floats with units of radians per second, positive being towards
the wheel vector.
:return:
A :class:`triangula.chassis.Motion` object containing the calculated translation and rotation in the robot's
coordinate space.
"""
motion_array = np_solve(self._matrix_coefficients, np_array(speeds))
return Motion(Vector2(x=float(motion_array[0]),
y=float(motion_array[1])),
rotation=float(motion_array[2]))
def get_max_translation_speed(self):
"""
Calculate the maximum translation speed, assuming all directions are equivalent and that there is no rotation
component to the motion.
:return:
Maximum speed in millimetres per second as a float
"""
unrealistic_speed = 10000.0
scaling = self.get_wheel_speeds(Motion(translation=Vector2(0, unrealistic_speed), rotation=0)).scaling
return unrealistic_speed * scaling
def get_max_rotation_speed(self):
"""
Calculate the maximum rotation speed around the origin in radians per second, assuming no translation motion
at the same time.
:return:
Maximum radians per second as a float
"""
unrealistic_speed = 2 * pi * 100
scaling = self.get_wheel_speeds(Motion(translation=Vector2(0, 0), rotation=unrealistic_speed)).scaling
return unrealistic_speed * scaling
def get_wheel_speeds(self, motion, origin=Point2(x=0, y=0)):
"""
Calculate speeds to drive each wheel in the chassis at to attain the specified rotation / translation 3-vector.
:param triangula.chassis.Motion motion:
Desired motion of the robot chassis
:param euclid.Point2 origin:
Optional, can define the centre of rotation to be something other than 0,0. Units are in millimetres.
Defaults to rotating around x=0, y=0.
:return:
A :class:`triangula.chassis.WheelSpeeds` containing both the target wheel speeds and the scaling, if any,
which was required to bring those speeds into the allowed range for all wheels. This prevents unexpected
motion in cases where only a single wheel is being asked to turn too fast, in such cases all wheel speeds
will be scaled back such that the highest is within the bounds allowed for that particular wheel. This
can accommodate wheels with different top speeds.
"""
def velocity_at(point):
"""
Compute the velocity as a Vector2 at the specified point given the enclosing translation and rotation values
Method: Normalise the vector from the origin to the point, then take the cross of itself to produce a unit
vector with direction that of a rotation around the origin. Scale this by the distance from the origin and
by the rotation in radians per second, then simply add the translation vector.
:param euclid.Point2 point:
Point at which to calculate velocity
:return:
A :class:`euclid.Vector2` representing the velocity at the specified point in mm/s
"""
d = point - origin
return d.cross() * motion.rotation + motion.translation
wheel_speeds = list(wheel.speed(velocity_at(wheel.position)) for wheel in self.wheels)
scale = 1.0
for speed, wheel in zip(wheel_speeds, self.wheels):
if wheel.max_speed is not None and abs(speed) > wheel.max_speed:
wheel_scale = wheel.max_speed / abs(speed)
scale = min(scale, wheel_scale)
return WheelSpeeds(speeds=list(speed * scale for speed in wheel_speeds), scaling=scale)
class OmniWheel:
"""
Defines a single omni-wheel within a chassis assembly. Omni-wheels are wheels formed from rollers, where the
motion of the roller is perpendicular to the motion of the primary wheel. This is distinct from a mechanum wheel
where the rollers are at an angle (normally around 40-30 degrees) to the primary wheel. Omni-wheels must be
positioned on the chassis with non-parallel unit vectors, mechanum wheels can in some cases be positioned with
all unit vectors parallel.
A wheel has a location relative to the chassis centre and a vector describing the direction of motion of the
wheel when driven with a positive angular velocity. The location is specified in millimetres, and the magnitude
of the wheel vector should be equal to the number of millimetres travelled in a single revolution. This allows
for different sized wheels to be handled within the same chassis.
"""
def __init__(self, position, max_speed=0, angle=None, radius=None, vector=None):
"""
Create a new omni-wheel object, specifying the position and either a direction vector directly or the angle
in degrees clockwise from the position Y axis along with the radius of the wheel.
:param euclid.Point2 position:
The wheel's contact point with the surface, specified relative to the centre of the
chassis. Units are millimetres.
:param float max_speed:
The maximum number of revolutions per second allowed for this wheel. When calculating the wheel speeds
required for a given trajectory this value is used to scale back all motion if any wheel would have to
move at an impossible speed. If not specified this defaults to None, indicating that no speed limit
should be placed on this wheel.
:param angle:
The angle, specified in radians from the positive Y axis where positive values are clockwise from this
axis when viewed from above, of the direction of travel of the wheel when driven with a positive speed.
If this value is specified then radius must also be specified and dx,dy left as None.
:param radius:
The radius in millimetres of the wheel, measuring from the centre to the contact point with the surface,
this may be hard to determine for some wheels based on their geometry, particularly for wheels with
cylindrical rollers, as the radius will vary. For these cases it may be worth directly measuring the
circumference of the entire assembly and calculating radius rather than measuring directly. This is used
to determine the magnitude of the direction vector. If this is not None then the angle must also be
specified, and dx,dy left as None.
:param euclid.Vector2 vector:
2 dimensional vector defining the translation of the wheel's contact point after a full
revolution of the wheel.
"""
self.position = position
self.max_speed = max_speed
if angle is None and radius is None and vector is not None:
# Specify wheel based on direct vector """
self.vector = vector
elif angle is not None and radius is not None and vector is None:
# Specify based on angle from positive Y axis and radius """
circumference = 2 * pi * radius
self.vector = Vector2(sin(angle) * circumference, cos(angle) * circumference)
else:
raise ValueError('Must specify exactly one of angle and radius or translation vector')
self.vector_magnitude_squared = self.vector.magnitude_squared()
self.co_x = self.vector.x / self.vector_magnitude_squared
self.co_y = self.vector.y / self.vector_magnitude_squared
self.co_theta = (self.vector.x * self.position.y -
self.vector.y * self.position.x) / self.vector_magnitude_squared
def speed(self, velocity):
"""
Given a velocity at a wheel contact point, calculate the speed in revolutions per second at which the wheel
should be driven.
Method: we want to find the projection of the velocity onto the vector representing the drive of this wheel.
We store the vector representing a single revolution of travel as self.vector, so the projection onto this
would be velocity.dot(self.vector / abs(self.vector)). However, we want revolutions per second, so we must
then divide again by abs(self.vector), leading to
velocity.dot(self.vector / abs(self.vector))/abs(self.vector). Because the definition of the dot product is
the sum of x1*x2, y1*y2, ... any scalar applied to each x, y ... of a single vector can be moved outside
the dot product, so we can simplify as velocity.dot(self.vector) / abs(self.vector)^2. As the magnitude of
the vector is taken by sqrt(x^2+y^2) we can simply express this as (x^2+y^2), held in the convenient
function magnitude_squared(). So our final simplified form is
velocity.dot(self.vector) / self.vector.magnitude_squared(). For efficiency, and because self.vector doesn't
change, we can pre-compute this.
:param euclid.Vector2 velocity:
The velocity at the wheel's contact point with the surface, expressed in mm/s
:return:
Target wheel speed in rotations per second to hit the desired vector at the contact point.
"""
return velocity.dot(self.vector) / self.vector_magnitude_squared
| 51.508828 | 120 | 0.67186 | 4,464 | 32,090 | 4.759409 | 0.146281 | 0.01012 | 0.013838 | 0.008237 | 0.189636 | 0.132543 | 0.104443 | 0.077991 | 0.057187 | 0.052057 | 0 | 0.010755 | 0.269804 | 32,090 | 622 | 121 | 51.59164 | 0.895954 | 0.010969 | 0 | 0.212766 | 0 | 0 | 0.023068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.026596 | null | null | 0.010638 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5319f9beb8c0372d2483c2292e3473295821dc00 | 12,467 | py | Python | libs/PureCloudPlatformClientV2/models/management_unit.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 1 | 2021-10-08T20:46:45.000Z | 2021-10-08T20:46:45.000Z | libs/PureCloudPlatformClientV2/models/management_unit.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | null | null | null | libs/PureCloudPlatformClientV2/models/management_unit.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class ManagementUnit(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ManagementUnit - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'name': 'str',
'division': 'Division',
'business_unit': 'BusinessUnitReference',
'start_day_of_week': 'str',
'time_zone': 'str',
'settings': 'ManagementUnitSettingsResponse',
'metadata': 'WfmVersionedEntityMetadata',
'version': 'int',
'date_modified': 'datetime',
'modified_by': 'UserReference',
'self_uri': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'division': 'division',
'business_unit': 'businessUnit',
'start_day_of_week': 'startDayOfWeek',
'time_zone': 'timeZone',
'settings': 'settings',
'metadata': 'metadata',
'version': 'version',
'date_modified': 'dateModified',
'modified_by': 'modifiedBy',
'self_uri': 'selfUri'
}
self._id = None
self._name = None
self._division = None
self._business_unit = None
self._start_day_of_week = None
self._time_zone = None
self._settings = None
self._metadata = None
self._version = None
self._date_modified = None
self._modified_by = None
self._self_uri = None
@property
def id(self):
"""
Gets the id of this ManagementUnit.
The globally unique identifier for the object.
:return: The id of this ManagementUnit.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ManagementUnit.
The globally unique identifier for the object.
:param id: The id of this ManagementUnit.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this ManagementUnit.
:return: The name of this ManagementUnit.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ManagementUnit.
:param name: The name of this ManagementUnit.
:type: str
"""
self._name = name
@property
def division(self):
"""
Gets the division of this ManagementUnit.
The division to which this entity belongs.
:return: The division of this ManagementUnit.
:rtype: Division
"""
return self._division
@division.setter
def division(self, division):
"""
Sets the division of this ManagementUnit.
The division to which this entity belongs.
:param division: The division of this ManagementUnit.
:type: Division
"""
self._division = division
@property
def business_unit(self):
"""
Gets the business_unit of this ManagementUnit.
The business unit to which this management unit belongs
:return: The business_unit of this ManagementUnit.
:rtype: BusinessUnitReference
"""
return self._business_unit
@business_unit.setter
def business_unit(self, business_unit):
"""
Sets the business_unit of this ManagementUnit.
The business unit to which this management unit belongs
:param business_unit: The business_unit of this ManagementUnit.
:type: BusinessUnitReference
"""
self._business_unit = business_unit
@property
def start_day_of_week(self):
"""
Gets the start_day_of_week of this ManagementUnit.
Start day of week for scheduling and forecasting purposes. Moving to Business Unit
:return: The start_day_of_week of this ManagementUnit.
:rtype: str
"""
return self._start_day_of_week
@start_day_of_week.setter
def start_day_of_week(self, start_day_of_week):
"""
Sets the start_day_of_week of this ManagementUnit.
Start day of week for scheduling and forecasting purposes. Moving to Business Unit
:param start_day_of_week: The start_day_of_week of this ManagementUnit.
:type: str
"""
allowed_values = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
if start_day_of_week.lower() not in map(str.lower, allowed_values):
# print("Invalid value for start_day_of_week -> " + start_day_of_week)
self._start_day_of_week = "outdated_sdk_version"
else:
self._start_day_of_week = start_day_of_week
@property
def time_zone(self):
"""
Gets the time_zone of this ManagementUnit.
The time zone for the management unit in standard Olson format. Moving to Business Unit
:return: The time_zone of this ManagementUnit.
:rtype: str
"""
return self._time_zone
@time_zone.setter
def time_zone(self, time_zone):
"""
Sets the time_zone of this ManagementUnit.
The time zone for the management unit in standard Olson format. Moving to Business Unit
:param time_zone: The time_zone of this ManagementUnit.
:type: str
"""
self._time_zone = time_zone
@property
def settings(self):
"""
Gets the settings of this ManagementUnit.
The configuration settings for this management unit
:return: The settings of this ManagementUnit.
:rtype: ManagementUnitSettingsResponse
"""
return self._settings
@settings.setter
def settings(self, settings):
"""
Sets the settings of this ManagementUnit.
The configuration settings for this management unit
:param settings: The settings of this ManagementUnit.
:type: ManagementUnitSettingsResponse
"""
self._settings = settings
@property
def metadata(self):
"""
Gets the metadata of this ManagementUnit.
Version info metadata for this management unit. Deprecated, use settings.metadata
:return: The metadata of this ManagementUnit.
:rtype: WfmVersionedEntityMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this ManagementUnit.
Version info metadata for this management unit. Deprecated, use settings.metadata
:param metadata: The metadata of this ManagementUnit.
:type: WfmVersionedEntityMetadata
"""
self._metadata = metadata
@property
def version(self):
"""
Gets the version of this ManagementUnit.
The version of the underlying entity. Deprecated, use field from settings.metadata instead
:return: The version of this ManagementUnit.
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this ManagementUnit.
The version of the underlying entity. Deprecated, use field from settings.metadata instead
:param version: The version of this ManagementUnit.
:type: int
"""
self._version = version
@property
def date_modified(self):
"""
Gets the date_modified of this ManagementUnit.
The date and time at which this entity was last modified. Deprecated, use field from settings.metadata instead. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The date_modified of this ManagementUnit.
:rtype: datetime
"""
return self._date_modified
@date_modified.setter
def date_modified(self, date_modified):
"""
Sets the date_modified of this ManagementUnit.
The date and time at which this entity was last modified. Deprecated, use field from settings.metadata instead. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param date_modified: The date_modified of this ManagementUnit.
:type: datetime
"""
self._date_modified = date_modified
@property
def modified_by(self):
"""
Gets the modified_by of this ManagementUnit.
The user who last modified this entity. Deprecated, use field from settings.metadata instead
:return: The modified_by of this ManagementUnit.
:rtype: UserReference
"""
return self._modified_by
@modified_by.setter
def modified_by(self, modified_by):
"""
Sets the modified_by of this ManagementUnit.
The user who last modified this entity. Deprecated, use field from settings.metadata instead
:param modified_by: The modified_by of this ManagementUnit.
:type: UserReference
"""
self._modified_by = modified_by
@property
def self_uri(self):
"""
Gets the self_uri of this ManagementUnit.
The URI for this object
:return: The self_uri of this ManagementUnit.
:rtype: str
"""
return self._self_uri
@self_uri.setter
def self_uri(self, self_uri):
"""
Sets the self_uri of this ManagementUnit.
The URI for this object
:param self_uri: The self_uri of this ManagementUnit.
:type: str
"""
self._self_uri = self_uri
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.612827 | 208 | 0.599422 | 1,410 | 12,467 | 5.146099 | 0.158156 | 0.039691 | 0.132304 | 0.040518 | 0.501792 | 0.422409 | 0.366593 | 0.343716 | 0.321665 | 0.293275 | 0 | 0.002374 | 0.324216 | 12,467 | 420 | 209 | 29.683333 | 0.858872 | 0.466111 | 0 | 0.104575 | 0 | 0 | 0.099775 | 0.014468 | 0 | 0 | 0 | 0 | 0 | 1 | 0.202614 | false | 0 | 0.03268 | 0 | 0.359477 | 0.006536 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
531dc0c210eb864fa15db98132f5b9dc46d4e0b4 | 3,140 | py | Python | verteste/ui/ui_about.py | Chum4k3r/Verteste | 216c04468ff14c392ee3c6aebe12a0fa0e98767c | [
"MIT"
] | null | null | null | verteste/ui/ui_about.py | Chum4k3r/Verteste | 216c04468ff14c392ee3c6aebe12a0fa0e98767c | [
"MIT"
] | null | null | null | verteste/ui/ui_about.py | Chum4k3r/Verteste | 216c04468ff14c392ee3c6aebe12a0fa0e98767c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'aboutdialog.ui'
##
## Created by: Qt User Interface Compiler version 6.1.1
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import * # type: ignore
from PySide6.QtGui import * # type: ignore
from PySide6.QtWidgets import * # type: ignore
class Ui_AboutDialog(QDialog):
# Caixa de diálogo utilizada para criação ou edição de linhas
def __init__(self, parent=None):
QDialog.__init__(self, parent=parent)
self.setupUi(self)
return
def setupUi(self, Dialog):
if not Dialog.objectName():
Dialog.setObjectName(u"Dialog")
Dialog.resize(400, 300)
self.verticalLayout = QVBoxLayout(Dialog)
self.verticalLayout.setObjectName(u"verticalLayout")
self.label = QLabel(Dialog)
self.label.setObjectName(u"label")
font = QFont()
font.setFamilies([u"Sandoval"])
font.setPointSize(18)
self.label.setFont(font)
self.label.setAlignment(Qt.AlignCenter)
self.verticalLayout.addWidget(self.label)
self.label_4 = QLabel(Dialog)
self.label_4.setObjectName(u"label_4")
self.label_4.setTextFormat(Qt.AutoText)
self.label_4.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.verticalLayout.addWidget(self.label_4)
self.label_2 = QLabel(Dialog)
self.label_2.setObjectName(u"label_2")
self.verticalLayout.addWidget(self.label_2)
self.label_3 = QLabel(Dialog)
self.label_3.setObjectName(u"label_3")
self.label_3.setAlignment(Qt.AlignCenter)
self.verticalLayout.addWidget(self.label_3)
self.label_5 = QLabel(Dialog)
self.label_5.setObjectName(u"label_5")
self.verticalLayout.addWidget(self.label_5)
self.label_6 = QLabel(Dialog)
self.label_6.setObjectName(u"label_6")
self.label_6.setTextFormat(Qt.MarkdownText)
self.label_6.setAlignment(Qt.AlignCenter)
self.verticalLayout.addWidget(self.label_6)
self.retranslateUi(Dialog)
QMetaObject.connectSlotsByName(Dialog)
# setupUi
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QCoreApplication.translate("Dialog", u"Sobre", None))
self.label.setText(QCoreApplication.translate("Dialog", u"Verteste", None))
self.label_4.setText(QCoreApplication.translate("Dialog", u"Vers\u00e3o 1.0.0", None))
self.label_2.setText(QCoreApplication.translate("Dialog", u"Desenvolvido por:", None))
self.label_3.setText(QCoreApplication.translate("Dialog", u"Jo\u00e3o Vitor Gutkoski Paes", None))
self.label_5.setText(QCoreApplication.translate("Dialog", u"C\u00f3digo fonte dispon\u00edvel em:", None))
self.label_6.setText(QCoreApplication.translate("Dialog", u"https://github.com/Chum4k3r/Verteste.git", None))
# retranslateUi
| 36.941176 | 117 | 0.653503 | 360 | 3,140 | 5.591667 | 0.325 | 0.138599 | 0.107799 | 0.111277 | 0.28763 | 0.090909 | 0.090909 | 0.090909 | 0 | 0 | 0 | 0.023893 | 0.186943 | 3,140 | 84 | 118 | 37.380952 | 0.764591 | 0.103822 | 0 | 0 | 1 | 0 | 0.09981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.057692 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5322c20dd329a34737e71921f9eef02bff3f4b61 | 691 | py | Python | pkgs/applications/virtualization/virt-manager/custom_runner.py | mornfall/nixpkgs | 0eb6f056b9ce3e32dbc3297f298472aef19f8c73 | [
"MIT"
] | 1 | 2015-03-10T08:51:43.000Z | 2015-03-10T08:51:43.000Z | pkgs/applications/virtualization/virt-manager/custom_runner.py | mornfall/nixpkgs | 0eb6f056b9ce3e32dbc3297f298472aef19f8c73 | [
"MIT"
] | null | null | null | pkgs/applications/virtualization/virt-manager/custom_runner.py | mornfall/nixpkgs | 0eb6f056b9ce3e32dbc3297f298472aef19f8c73 | [
"MIT"
] | null | null | null | #!/usr/bin/python -t
# this script was written to use /etc/nixos/nixpkgs/pkgs/development/python-modules/generic/wrap.sh
# which already automates python executable wrapping by extending the PATH/pythonPath
# from http://docs.python.org/library/subprocess.html
# Warning Invoking the system shell with shell=True can be a security hazard if combined with untrusted input. See the warning under Frequently Used Arguments for details.
from subprocess import Popen, PIPE, STDOUT
cmd = 'PYTHON_EXECUTABLE_PATH -t THE_CUSTOM_PATH/share/virt-manager/THE_CUSTOM_PROGRAM.py'
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output = p.stdout.read()
print output
| 49.357143 | 171 | 0.798842 | 107 | 691 | 5.093458 | 0.719626 | 0.058716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.11288 | 691 | 13 | 172 | 53.153846 | 0.88907 | 0.612156 | 0 | 0 | 0 | 0 | 0.311787 | 0.296578 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.2 | null | null | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53233f3a8dea82db62a5a19f4e770dd7fed41add | 3,331 | py | Python | napari/layers/_source.py | napari/napari-gui | 9beb1a0b797890718e1c4f372cbd6256747f9101 | [
"BSD-3-Clause"
] | 7 | 2018-07-03T17:35:46.000Z | 2018-11-07T15:48:58.000Z | napari/layers/_source.py | guiwitz/napari | 1546f18ecc13364d5415623a9c11ed760ff043e2 | [
"BSD-3-Clause"
] | 120 | 2018-09-04T22:05:13.000Z | 2019-03-02T01:13:57.000Z | napari/layers/_source.py | napari/napari-gui | 9beb1a0b797890718e1c4f372cbd6256747f9101 | [
"BSD-3-Clause"
] | 8 | 2018-09-04T21:48:26.000Z | 2019-01-29T04:48:30.000Z | from __future__ import annotations
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Optional, Tuple
from magicgui.widgets import FunctionGui
from pydantic import BaseModel
class Source(BaseModel):
"""An object to store the provenance of a layer.
Parameters
----------
path: str, optional
filpath/url associated with layer
reader_plugin: str, optional
name of reader plugin that loaded the file (if applicable)
sample: Tuple[str, str], optional
Tuple of (sample_plugin, sample_name), if layer was loaded via
`viewer.open_sample`.
widget: FunctionGui, optional
magicgui widget, if the layer was added via a magicgui widget.
"""
path: Optional[str] = None
reader_plugin: Optional[str] = None
sample: Optional[Tuple[str, str]] = None
widget: Optional[FunctionGui] = None
class Config:
arbitrary_types_allowed = True
frozen = True
def __deepcopy__(self, memo):
"""Custom deepcopy implementation.
this prevents deep copy. `Source` doesn't really need to be copied
(i.e. if we deepcopy a layer, it essentially has the same `Source`).
Moreover, deepcopying a widget is challenging, and maybe odd anyway.
"""
return self
# layer source context management
_LAYER_SOURCE: ContextVar[dict] = ContextVar('_LAYER_SOURCE', default={})
@contextmanager
def layer_source(**source_kwargs):
"""Creates context in which all layers will be given `source_kwargs`.
The module-level variable `_LAYER_SOURCE` holds a set of key-value pairs
that can be used to create a new `Source` object. Any routine in napari
that may result in the creation of a new layer (such as opening a file,
using a particular plugin, or calling a magicgui widget) can use this
context manager to declare that any layers created within the context
result from a specific source. (This applies even if the layer
isn't "directly" created in the context, but perhaps in some sub-function
within the context).
`Layer.__init__` will call :func:`current_source`, to query the current
state of the `_LAYER_SOURCE` variable.
Contexts may be stacked, meaning a given layer.source can reflect the
actions of multiple events (for instance, an `open_sample` call that in
turn resulted in a `reader_plugin` opening a file). However, the "deepest"
context will "win" in the case where multiple calls to `layer_source`
provide conflicting values.
Parameters
----------
**source_kwargs
keys/values should be valid parameters for :class:`Source`.
Examples
--------
>>> with layer_source(path='file.ext', reader_plugin='plugin'): # doctest: +SKIP
... points = some_function_that_creates_points()
...
>>> assert points.source == Source(path='file.ext', reader_plugin='plugin') # doctest: +SKIP
"""
token = _LAYER_SOURCE.set({**_LAYER_SOURCE.get(), **source_kwargs})
try:
yield
finally:
_LAYER_SOURCE.reset(token)
def current_source():
"""Get the current layer :class:`Source` (inferred from context).
The main place this function is used is in :meth:`Layer.__init__`.
"""
return Source(**_LAYER_SOURCE.get())
| 33.31 | 97 | 0.691684 | 441 | 3,331 | 5.097506 | 0.419501 | 0.063612 | 0.009786 | 0.015125 | 0.040925 | 0.040925 | 0.040925 | 0.040925 | 0.040925 | 0 | 0 | 0 | 0.221255 | 3,331 | 99 | 98 | 33.646465 | 0.866615 | 0.664365 | 0 | 0 | 0 | 0 | 0.014444 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.230769 | 0 | 0.653846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
532376f847c7965500c6f9f56d9f6308f976ea4f | 1,599 | py | Python | tests/unit/test_BaseDirection.py | vpalex999/project-mars | 6e21c5acfe6105a7b7c87a79770e7420bda46f26 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_BaseDirection.py | vpalex999/project-mars | 6e21c5acfe6105a7b7c87a79770e7420bda46f26 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_BaseDirection.py | vpalex999/project-mars | 6e21c5acfe6105a7b7c87a79770e7420bda46f26 | [
"Apache-2.0"
] | null | null | null | import pytest
import src.constants as cnst
from src.directions import BaseDirection
@pytest.fixture
def base_direction():
return BaseDirection()
def test_init_BaseDirection(base_direction):
assert isinstance(base_direction, BaseDirection)
def test_current_direction_is(base_direction):
assert base_direction.current == cnst.NORTH
@pytest.mark.parametrize(["turn_func", "expected_direction"], [
# turn_left
(lambda f: f.turn_left(), cnst.WEST),
(lambda f: f.turn_left().turn_left(), cnst.SOUTH),
(lambda f: f.turn_left().turn_left().turn_left(), cnst.EAST),
(lambda f: f.turn_left().turn_left().turn_left().turn_left(), cnst.NORTH),
(lambda f: f.turn_left().turn_left().turn_left().turn_left().turn_left(), cnst.WEST),
# turn_right()
(lambda f: f.turn_right(), cnst.EAST),
(lambda f: f.turn_right().turn_right(), cnst.SOUTH),
(lambda f: f.turn_right().turn_right().turn_right(), cnst.WEST),
(lambda f: f.turn_right().turn_right().turn_right().turn_right(), cnst.NORTH),
(lambda f: f.turn_right().turn_right().turn_right().turn_right().turn_right(), cnst.EAST),
# any combinations
(lambda f: f.turn_left().turn_right(), cnst.NORTH),
(lambda f: f.turn_left().turn_left().turn_right(), cnst.WEST),
(lambda f: f.turn_left().turn_right().turn_left(), cnst.WEST),
(lambda f: f.turn_left().turn_right().turn_left().turn_right().turn_right(), cnst.EAST),
]
)
def test_turn_direction(base_direction, turn_func, expected_direction):
turn_func(base_direction)
assert base_direction.current == expected_direction
| 35.533333 | 94 | 0.707942 | 232 | 1,599 | 4.594828 | 0.150862 | 0.172608 | 0.180113 | 0.157599 | 0.681051 | 0.603189 | 0.473734 | 0.426829 | 0.401501 | 0.311445 | 0 | 0 | 0.126329 | 1,599 | 44 | 95 | 36.340909 | 0.763064 | 0.02439 | 0 | 0 | 0 | 0 | 0.017352 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.133333 | false | 0 | 0.1 | 0.033333 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5326222cf04cc16e106a9b078150b32472ee3fb7 | 1,520 | py | Python | resources/models/Image.py | sphildreth/roadie-python | 1465ac0f4282356ab5a074020b4f0a9f28058a86 | [
"MIT"
] | null | null | null | resources/models/Image.py | sphildreth/roadie-python | 1465ac0f4282356ab5a074020b4f0a9f28058a86 | [
"MIT"
] | null | null | null | resources/models/Image.py | sphildreth/roadie-python | 1465ac0f4282356ab5a074020b4f0a9f28058a86 | [
"MIT"
] | null | null | null | import io
from PIL import Image as PILImage
from sqlalchemy import Column, ForeignKey, LargeBinary, Index, Integer, String
from resources.models.ModelBase import Base
class Image(Base):
# If this is used then the image is stored in the database
image = Column(LargeBinary(length=16777215), default=None)
# If this is used then the image is remote and this is the url
url = Column(String(500))
caption = Column(String(100))
# This is a PhotoHash of the image for assistance in deduping
signature = Column(String(50))
artistId = Column(Integer, ForeignKey("artist.id"), index=True)
releaseId = Column(Integer, ForeignKey("release.id"), index=True)
def averageHash(self):
try:
hash_size = 8
# Open the image, resize it and convert it to black & white.
image = PILImage.open(io.BytesIO(self.image)).resize((hash_size, hash_size), PILImage.ANTIALIAS).convert(
'L')
pixels = list(image.getdata())
# Compute the hash based on each pixels value compared to the average.
avg = sum(pixels) / len(pixels)
bits = "".join(map(lambda pixel: '1' if pixel > avg else '0', pixels))
hashformat = "0{hashlength}x".format(hashlength=hash_size ** 2 // 4)
return int(bits, 2).__format__(hashformat)
except:
return None
def __unicode__(self):
return self.caption
def __str__(self):
return self.caption or self.signature
| 38 | 117 | 0.647368 | 199 | 1,520 | 4.864322 | 0.512563 | 0.024793 | 0.016529 | 0.024793 | 0.053719 | 0.053719 | 0.053719 | 0.053719 | 0 | 0 | 0 | 0.020354 | 0.256579 | 1,520 | 39 | 118 | 38.974359 | 0.836283 | 0.200658 | 0 | 0 | 0 | 0 | 0.029777 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.148148 | 0.074074 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
53288d2b29e82fc8c4f0e83a7806673cbfd64265 | 538 | py | Python | dont_worry.py | karianjahi/fahrer_minijob | 020a9de27b77f8e0bcdec198a37cfb7f1d4736ed | [
"MIT"
] | null | null | null | dont_worry.py | karianjahi/fahrer_minijob | 020a9de27b77f8e0bcdec198a37cfb7f1d4736ed | [
"MIT"
] | null | null | null | dont_worry.py | karianjahi/fahrer_minijob | 020a9de27b77f8e0bcdec198a37cfb7f1d4736ed | [
"MIT"
] | null | null | null | class Hey:
def __init__(jose, name="mours"):
jose.name = name
def get_name(jose):
return jose.name
class Person(object):
def __init__(self, name, phone):
self.name = name
self.phone = phone
class Teenager(Person):
def __init__(self, *args, **kwargs):
self.website = kwargs.pop("website")
super(Teenager, self).__init__(*args, **kwargs)
if __name__ == "__main__":
#print(Hey().get_name())
teen = Teenager("Joseph Njeri", 924, "www.fowr.gd")
print(teen.website) | 26.9 | 55 | 0.615242 | 68 | 538 | 4.485294 | 0.426471 | 0.068852 | 0.072131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007282 | 0.234201 | 538 | 20 | 56 | 26.9 | 0.73301 | 0.042751 | 0 | 0 | 0 | 0 | 0.083495 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0 | 0.0625 | 0.5 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
532b482d23a5934d3f01f1f60135af259bfe9eb5 | 449 | py | Python | tests/zoo/tree.py | dynalz/odmantic | f20f08f8ab1768534c1e743f7539bfe4f8c73bdd | [
"0BSD"
] | 486 | 2020-10-19T05:33:53.000Z | 2022-03-30T12:54:57.000Z | tests/zoo/tree.py | dynalz/odmantic | f20f08f8ab1768534c1e743f7539bfe4f8c73bdd | [
"0BSD"
] | 183 | 2020-10-19T18:15:25.000Z | 2022-03-31T04:59:21.000Z | tests/zoo/tree.py | dynalz/odmantic | f20f08f8ab1768534c1e743f7539bfe4f8c73bdd | [
"0BSD"
] | 53 | 2020-10-19T09:35:01.000Z | 2022-03-31T20:39:51.000Z | import enum
from typing import Dict, List
from odmantic.field import Field
from odmantic.model import Model
class TreeKind(str, enum.Enum):
BIG = "big"
SMALL = "small"
class TreeModel(Model):
name: str = Field(primary_key=True, default="Acacia des montagnes")
average_size: float = Field(mongo_name="size")
discovery_year: int
kind: TreeKind
genesis_continents: List[str]
per_continent_density: Dict[str, float]
| 22.45 | 71 | 0.721604 | 61 | 449 | 5.196721 | 0.590164 | 0.07571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.184855 | 449 | 19 | 72 | 23.631579 | 0.86612 | 0 | 0 | 0 | 0 | 0 | 0.071269 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
532d64eb017b0350df305fd05c57bebebc901080 | 6,729 | py | Python | dash_daq/Slider.py | luiztauffer/dash-daq | 4975093449bdc4d7ff4cd366ac82a847cdf24c34 | [
"MIT"
] | null | null | null | dash_daq/Slider.py | luiztauffer/dash-daq | 4975093449bdc4d7ff4cd366ac82a847cdf24c34 | [
"MIT"
] | null | null | null | dash_daq/Slider.py | luiztauffer/dash-daq | 4975093449bdc4d7ff4cd366ac82a847cdf24c34 | [
"MIT"
] | null | null | null | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Slider(Component):
"""A Slider component.
A slider component with support for
a target value.
Keyword arguments:
- id (string; optional):
The ID used to identify this component in Dash callbacks.
- className (string; optional):
Additional CSS class for the root DOM node.
- color (dict; default colors.DARKER_PRIMARY):
Color configuration for the slider's track.
`color` is a string | dict with keys:
- default (string; optional):
Fallback color to use when color.ranges has gaps.
- gradient (boolean; optional):
Display ranges as a gradient between given colors. Requires
color.ranges to be contiguous along the entirety of the
gauge's range of values.
- ranges (dict; optional):
Define multiple color ranges on the slider's track. The key
determines the color of the range and the value is the
start,end of the range itself.
`ranges` is a dict with keys:
- color (list of numbers; optional)
- disabled (boolean; optional):
If True, the handles can't be moved.
- dots (boolean; optional):
When the step value is greater than 1, you can set the dots to
True if you want to render the slider with dots. Note: dots are
disabled automatically when using color.ranges.
- handleLabel (dict; optional):
Configuration of the slider handle's label. Passing falsy value
will disable the label.
`handleLabel` is a string | dict with keys:
- color (string; optional)
- label (string; optional)
- showCurrentValue (boolean; optional)
- style (dict; optional)
- included (boolean; optional):
If the value is True, it means a continuous value is included.
Otherwise, it is an independent value.
- labelPosition (a value equal to: 'top', 'bottom'; default 'bottom'):
Where the component label is positioned.
- marks (dict; optional):
Marks on the slider. The key determines the position, and the
value determines what will show. If you want to set the style of a
specific mark point, the value should be an object which contains
style and label properties.
`marks` is a dict with keys:
- number (dict; optional)
`number` is a string
Or dict with keys:
- label (string; optional)
- style (dict; optional)
- max (number; optional):
Maximum allowed value of the slider.
- min (number; default 0):
Minimum allowed value of the slider.
- persisted_props (list of a value equal to: 'value's; default ['value']):
Properties whose user interactions will persist after refreshing
the component or the page. Since only `value` is allowed this prop
can normally be ignored.
- persistence (boolean | string | number; optional):
Used to allow user interactions in this component to be persisted
when the component - or the page - is refreshed. If `persisted` is
truthy and hasn't changed from its previous value, a `value` that
the user has changed while using the app will keep that change, as
long as the new `value` also matches what was given originally.
Used in conjunction with `persistence_type`.
- persistence_type (a value equal to: 'local', 'session', 'memory'; default 'local'):
Where persisted user changes will be stored: memory: only kept in
memory, reset on page refresh. local: window.localStorage, data is
kept after the browser quit. session: window.sessionStorage, data
is cleared once the browser quit.
- size (number; default 265):
Size of the slider in pixels.
- step (number; optional):
Value by which increments or decrements are made.
- targets (dict; optional):
Targets on the slider. The key determines the position, and the
value determines what will show. If you want to set the style of a
specific target point, the value should be an object which
contains style and label properties.
`targets` is a dict with keys:
- number (dict; optional)
`number` is a string
Or dict with keys:
- color (string; optional)
- label (string; optional)
- showCurrentValue (boolean; optional)
- style (dict; optional)
- theme (dict; default light):
Theme configuration to be set by a ThemeProvider.
- updatemode (a value equal to: 'mouseup', 'drag'; default 'mouseup'):
Determines when the component should update its value. If
`mouseup`, then the slider will only trigger its value when the
user has finished dragging the slider. If `drag`, then the slider
will update its value continuously as it is being dragged. Only
use `drag` if your updates are fast.
- value (number; optional):
The value of the input.
- vertical (boolean; optional):
If True, the slider will be vertical."""
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, marks=Component.UNDEFINED, color=Component.UNDEFINED, value=Component.UNDEFINED, className=Component.UNDEFINED, labelPosition=Component.UNDEFINED, disabled=Component.UNDEFINED, dots=Component.UNDEFINED, included=Component.UNDEFINED, min=Component.UNDEFINED, max=Component.UNDEFINED, step=Component.UNDEFINED, vertical=Component.UNDEFINED, size=Component.UNDEFINED, targets=Component.UNDEFINED, theme=Component.UNDEFINED, handleLabel=Component.UNDEFINED, updatemode=Component.UNDEFINED, persistence=Component.UNDEFINED, persisted_props=Component.UNDEFINED, persistence_type=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'className', 'color', 'disabled', 'dots', 'handleLabel', 'included', 'labelPosition', 'marks', 'max', 'min', 'persisted_props', 'persistence', 'persistence_type', 'size', 'step', 'targets', 'theme', 'updatemode', 'value', 'vertical']
self._type = 'Slider'
self._namespace = 'dash_daq'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'className', 'color', 'disabled', 'dots', 'handleLabel', 'included', 'labelPosition', 'marks', 'max', 'min', 'persisted_props', 'persistence', 'persistence_type', 'size', 'step', 'targets', 'theme', 'updatemode', 'value', 'vertical']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Slider, self).__init__(**args)
| 38.895954 | 651 | 0.693862 | 879 | 6,729 | 5.262799 | 0.281001 | 0.081712 | 0.018158 | 0.011241 | 0.257242 | 0.21444 | 0.207955 | 0.207955 | 0.207955 | 0.207955 | 0 | 0.000948 | 0.216228 | 6,729 | 172 | 652 | 39.122093 | 0.876185 | 0.716897 | 0 | 0 | 1 | 0 | 0.203899 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.052632 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53315defe5a40f6e5f9bc740259ebb1dfe1b3225 | 3,515 | py | Python | __init__.py | NeonJarbas/skill-ddg | 48476ad650e72f68ee7e96dd92c6d18f841ce6ec | [
"Apache-2.0"
] | null | null | null | __init__.py | NeonJarbas/skill-ddg | 48476ad650e72f68ee7e96dd92c6d18f841ce6ec | [
"Apache-2.0"
] | null | null | null | __init__.py | NeonJarbas/skill-ddg | 48476ad650e72f68ee7e96dd92c6d18f841ce6ec | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ovos_utils.gui import can_use_gui
from adapt.intent import IntentBuilder
from mycroft.skills.common_query_skill import CommonQuerySkill, CQSMatchLevel
from mycroft.skills.core import intent_handler
from neon_solver_ddg_plugin import DDGSolver
class DuckDuckGoSkill(CommonQuerySkill):
def __init__(self):
super().__init__()
self.duck = DDGSolver()
# for usage in tell me more / follow up questions
self.idx = 0
self.results = []
self.image = None
# intents
@intent_handler("search_duck.intent")
def handle_search(self, message):
query = message.data["query"]
summary = self.ask_the_duck(query)
if summary:
self.speak_result()
else:
self.speak_dialog("no_answer")
@intent_handler(IntentBuilder("DuckMore").require("More").
require("DuckKnows"))
def handle_tell_more(self, message):
""" Follow up query handler, "tell me more"."""
# query = message.data["DuckKnows"]
# data, related_queries = self.duck.get_infobox(query)
# TODO maybe do something with the infobox data ?
self.speak_result()
# common query
def CQS_match_query_phrase(self, utt):
summary = self.ask_the_duck(utt)
if summary:
self.idx += 1 # spoken by common query
return (utt, CQSMatchLevel.GENERAL, summary,
{'query': utt,
'image': self.image,
'answer': summary})
def CQS_action(self, phrase, data):
""" If selected show gui """
self.display_ddg(data["answer"], data["image"])
# duck duck go api
def ask_the_duck(self, query):
# context for follow up questions
self.set_context("DuckKnows", query)
self.idx = 0
self.results = self.duck.long_answer(query, lang=self.lang)
self.image = self.duck.get_image(query)
if self.results:
return self.results[0]["summary"]
def display_ddg(self, summary=None, image=None):
if not can_use_gui(self.bus):
return
image = image or \
self.image or \
"https://github.com/JarbasSkills/skill-ddg/raw/master/ui/logo.png"
if image.startswith("/"):
image = "https://duckduckgo.com" + image
self.gui['summary'] = summary or ""
self.gui['imgLink'] = image
self.gui.show_page("DuckDelegate.qml", override_idle=60)
def speak_result(self):
if self.idx + 1 > len(self.results):
self.speak_dialog("thats all")
self.remove_context("DuckKnows")
self.idx = 0
else:
self.display_ddg(self.results[self.idx]["summary"],
self.results[self.idx]["img"])
self.speak(self.results[self.idx]["summary"])
self.idx += 1
def create_skill():
return DuckDuckGoSkill()
| 35.867347 | 82 | 0.62276 | 438 | 3,515 | 4.876712 | 0.383562 | 0.029494 | 0.042135 | 0.025281 | 0.068352 | 0.048689 | 0 | 0 | 0 | 0 | 0 | 0.005074 | 0.271124 | 3,515 | 97 | 83 | 36.237113 | 0.828649 | 0.244666 | 0 | 0.171875 | 0 | 0.015625 | 0.094584 | 0 | 0 | 0 | 0 | 0.010309 | 0 | 1 | 0.140625 | false | 0 | 0.078125 | 0.015625 | 0.296875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
533191e519d8de6668af0108951a5877a4213bac | 6,639 | py | Python | openstack_lease_it/openstack_lease_it/settings.py | LAL/openstack-lease-it | 4ff983911825eac886fa6f76d6efc25225a698b7 | [
"Apache-2.0"
] | null | null | null | openstack_lease_it/openstack_lease_it/settings.py | LAL/openstack-lease-it | 4ff983911825eac886fa6f76d6efc25225a698b7 | [
"Apache-2.0"
] | 11 | 2017-04-13T16:48:16.000Z | 2017-11-22T08:13:39.000Z | openstack_lease_it/openstack_lease_it/settings.py | LAL/openstack-lease-it | 4ff983911825eac886fa6f76d6efc25225a698b7 | [
"Apache-2.0"
] | 3 | 2017-04-06T09:08:40.000Z | 2021-05-25T08:15:00.000Z | """
Django settings for openstack_lease_it project.
Generated by 'django-admin startproject' using Django 1.8.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import ast
import logging
from openstack_lease_it.config import GLOBAL_CONFIG, load_config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load configuration
load_config()
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = GLOBAL_CONFIG['DJANGO_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = ast.literal_eval(GLOBAL_CONFIG['DJANGO_DEBUG'])
# ALLOWED_HOSTS secure django app access
ALLOWED_HOSTS = []
# A email as format must match this regular expression
# If you not understand, please
EMAIL_REGEXP = r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\.-]+\.[A-Za-z]*$"
# Application definition
INSTALLED_APPS = (
'openstack_auth',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'openstack_lease_it',
'lease_it',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'openstack_lease_it.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'openstack_lease_it.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_CHARSET = 'utf-8'
# We use memcached as cache backend
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '{MEMCACHED_HOST}:{MEMCACHED_PORT}'.format(**GLOBAL_CONFIG),
}
}
SESSION_COOKIE_SECURE = False
SESSION_TIMEOUT = 1800
# A token can be near the end of validity when a page starts loading, and
# invalid during the rendering which can cause errors when a page load.
# TOKEN_TIMEOUT_MARGIN defines a time in seconds we retrieve from token
# validity to avoid this issue. You can adjust this time depending on the
# performance of the infrastructure.
TOKEN_TIMEOUT_MARGIN = 100
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = '/'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
if GLOBAL_CONFIG['BACKEND_PLUGIN'] == 'Openstack':
# UserId on django-openstack_auth need specific User model
AUTH_USER_MODEL = 'openstack_auth.User'
# Define keystone URL for authentification
OPENSTACK_KEYSTONE_URL = GLOBAL_CONFIG['OS_AUTH_URL']
# We use keystone v3 API
OPENSTACK_API_VERSIONS = {
"identity": GLOBAL_CONFIG['OS_IDENTITY_API_VERSION'],
}
# We use multidomain
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
# We load Openstack_auth backend
AUTHENTICATION_BACKENDS = (
'openstack_auth.backend.KeystoneBackend',
'django.contrib.auth.backends.ModelBackend',
)
else:
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
# Configure logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(asctime)s: %(message)s'
},
},
'handlers': {
'django': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'django.log'),
'formatter': 'simple'
},
'main': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'main.log'),
'formatter': 'simple'
},
'notification': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'notification.log'),
'formatter': 'simple'
},
'instances': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'instances.log'),
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['django'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
'main': {
'handlers': ['main'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
'notification': {
'handlers': ['notification'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
'instances': {
'handlers': ['instances'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
},
}
LOGGER = logging.getLogger('main')
LOGGER_NOTIFICATION = logging.getLogger('notification')
LOGGER_INSTANCES = logging.getLogger('instances')
| 31.023364 | 89 | 0.658081 | 714 | 6,639 | 5.953782 | 0.355742 | 0.053634 | 0.05928 | 0.043284 | 0.230064 | 0.19713 | 0.18184 | 0.127029 | 0.109151 | 0.090332 | 0 | 0.006681 | 0.210875 | 6,639 | 213 | 90 | 31.169014 | 0.804734 | 0.219009 | 0 | 0.225166 | 1 | 0.006623 | 0.437391 | 0.240148 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.02649 | 0 | 0.02649 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
533284cf96b1c69f9f29a622772bb5c570e08d44 | 3,619 | py | Python | rigl/experimental/jax/pruning/pruning.py | vishalbelsare/rigl | f18abc7d82ae3acc6736068408a0186c9efa575c | [
"Apache-2.0"
] | 276 | 2019-11-25T22:05:45.000Z | 2022-03-30T11:55:34.000Z | rigl/experimental/jax/pruning/pruning.py | vishalbelsare/rigl | f18abc7d82ae3acc6736068408a0186c9efa575c | [
"Apache-2.0"
] | 10 | 2020-02-26T14:53:50.000Z | 2021-09-08T16:27:28.000Z | rigl/experimental/jax/pruning/pruning.py | vishalbelsare/rigl | f18abc7d82ae3acc6736068408a0186c9efa575c | [
"Apache-2.0"
] | 54 | 2019-11-26T18:50:33.000Z | 2022-03-29T20:08:08.000Z | # coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Functions for pruning FLAX masked models."""
import collections
from typing import Any, Callable, Mapping, Optional, Union
import flax
import jax.numpy as jnp
from rigl.experimental.jax.pruning import masked
def weight_magnitude(weights):
"""Creates weight magnitude-based saliencies, given a weight matrix."""
return jnp.absolute(weights)
def prune(
model,
pruning_rate,
saliency_fn = weight_magnitude,
mask = None,
compare_fn = jnp.greater):
"""Returns a mask for a model where the params in each layer are pruned using a saliency function.
Args:
model: The model to create a pruning mask for.
pruning_rate: The fraction of lowest magnitude saliency weights that are
pruned. If a float, the same rate is used for all layers, otherwise if it
is a mapping, it must contain a rate for all masked layers in the model.
saliency_fn: A function that returns a float number used to rank
the importance of individual weights in the layer.
mask: If the model has an existing mask, the mask will be applied before
pruning the model.
compare_fn: A pairwise operator to compare saliency with threshold, and
return True if the saliency indicates the value should not be masked.
Returns:
A pruned mask for the given model.
"""
if not mask:
mask = masked.simple_mask(model, jnp.ones, masked.WEIGHT_PARAM_NAMES)
if not isinstance(pruning_rate, collections.Mapping):
pruning_rate_dict = {}
for param_name, _ in masked.iterate_mask(mask):
# Get the layer name from the parameter's full name/path.
layer_name = param_name.split('/')[-2]
pruning_rate_dict[layer_name] = pruning_rate
pruning_rate = pruning_rate_dict
for param_path, param_mask in masked.iterate_mask(mask):
split_param_path = param_path.split('/')
layer_name = split_param_path[-2]
param_name = split_param_path[-1]
# If we don't have a pruning rate for the given layer, don't mask it.
if layer_name in pruning_rate and mask[layer_name][param_name] is not None:
param_value = model.params[layer_name][
masked.MaskedModule.UNMASKED][param_name]
# Here any existing mask is first applied to weight matrix.
# Note: need to check explicitly is not None for np array.
if param_mask is not None:
saliencies = saliency_fn(param_mask * param_value)
else:
saliencies = saliency_fn(param_value)
# TODO: Use partition here (partial sort) instead of sort,
# since it's O(N), not O(N log N), however JAX doesn't support it.
sorted_param = jnp.sort(jnp.abs(saliencies.flatten()))
# Figure out the weight magnitude threshold.
threshold_index = jnp.round(pruning_rate[layer_name] *
sorted_param.size).astype(jnp.int32)
threshold = sorted_param[threshold_index]
mask[layer_name][param_name] = jnp.array(
compare_fn(saliencies, threshold), dtype=jnp.int32)
return mask
| 37.697917 | 100 | 0.716773 | 540 | 3,619 | 4.690741 | 0.368519 | 0.047769 | 0.017766 | 0.021319 | 0.053691 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005957 | 0.211384 | 3,619 | 95 | 101 | 38.094737 | 0.88157 | 0.522244 | 0 | 0 | 0 | 0 | 0.001203 | 0 | 0 | 0 | 0 | 0.010526 | 0 | 1 | 0.051282 | false | 0 | 0.128205 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5333f0221e4e3679432478592dc87f3e97d8ca99 | 3,643 | py | Python | venv/Lib/site-packages/dash_bootstrap_components/_components/CardLink.py | hanzzhu/chadle | ac1d63b0410bb43f3fab362bb00abfc2e8790b9d | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/dash_bootstrap_components/_components/CardLink.py | hanzzhu/chadle | ac1d63b0410bb43f3fab362bb00abfc2e8790b9d | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/dash_bootstrap_components/_components/CardLink.py | hanzzhu/chadle | ac1d63b0410bb43f3fab362bb00abfc2e8790b9d | [
"Apache-2.0"
] | null | null | null | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class CardLink(Component):
"""A CardLink component.
Use card link to add consistently styled links to your cards. Links can be
used like buttons, external links, or internal Dash style links.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
The children of this component.
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- className (string; optional):
Often used with CSS to style elements with common properties.
- external_link (boolean; optional):
If True, the browser will treat this as an external link, forcing
a page refresh at the new location. If False, this just changes
the location without triggering a page refresh. Use this if you
are observing dcc.Location, for instance. Defaults to True for
absolute URLs and False otherwise.
- href (string; optional):
URL of the resource to link to.
- key (string; optional):
A unique identifier for the component, used to improve performance
by React.js while rendering components See
https://reactjs.org/docs/lists-and-keys.html for more info.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- n_clicks (number; default 0):
An integer that represents the number of times that this element
has been clicked on.
- n_clicks_timestamp (number; default -1):
An integer that represents the time (in ms since 1970) at which
n_clicks changed. This can be used to tell which button was
changed most recently.
- style (dict; optional):
Defines CSS styles which will override styles previously set.
- target (string; optional):
Target attribute to pass on to the link. Only applies to external
links."""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, style=Component.UNDEFINED, className=Component.UNDEFINED, key=Component.UNDEFINED, href=Component.UNDEFINED, external_link=Component.UNDEFINED, n_clicks=Component.UNDEFINED, n_clicks_timestamp=Component.UNDEFINED, loading_state=Component.UNDEFINED, target=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'className', 'external_link', 'href', 'key', 'loading_state', 'n_clicks', 'n_clicks_timestamp', 'style', 'target']
self._type = 'CardLink'
self._namespace = 'dash_bootstrap_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'className', 'external_link', 'href', 'key', 'loading_state', 'n_clicks', 'n_clicks_timestamp', 'style', 'target']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(CardLink, self).__init__(children=children, **args)
| 42.360465 | 349 | 0.705188 | 485 | 3,643 | 5.175258 | 0.391753 | 0.071713 | 0.025498 | 0.018327 | 0.090837 | 0.07012 | 0.07012 | 0.07012 | 0.07012 | 0.07012 | 0 | 0.002084 | 0.209717 | 3,643 | 85 | 350 | 42.858824 | 0.869746 | 0.607192 | 0 | 0 | 1 | 0 | 0.191011 | 0.017556 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.052632 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
533866f2077fc08488ebf544ff7c3db315b050b5 | 283 | py | Python | src/marion/marion/urls/__init__.py | OmenApps/marion | f501674cafbd91f0bbad7454e4dcf3527cf4445e | [
"MIT"
] | 7 | 2021-04-06T20:33:31.000Z | 2021-09-30T23:29:24.000Z | src/marion/marion/urls/__init__.py | OmenApps/marion | f501674cafbd91f0bbad7454e4dcf3527cf4445e | [
"MIT"
] | 23 | 2020-09-09T15:01:50.000Z | 2022-01-03T08:58:36.000Z | src/marion/marion/urls/__init__.py | OmenApps/marion | f501674cafbd91f0bbad7454e4dcf3527cf4445e | [
"MIT"
] | 2 | 2020-12-14T10:07:07.000Z | 2021-06-29T00:20:43.000Z | """Urls for the marion application"""
from django.urls import include, path
from rest_framework import routers
from .. import views
router = routers.DefaultRouter()
router.register(r"requests", views.DocumentRequestViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| 18.866667 | 58 | 0.749117 | 33 | 283 | 6.393939 | 0.636364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.134276 | 283 | 14 | 59 | 20.214286 | 0.861224 | 0.109541 | 0 | 0 | 0 | 0 | 0.03252 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.375 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
533adf5d76752741ed41f712b78044d29524e61c | 2,455 | py | Python | tbx/core/migrations/0111_move_sign_up_form_into_new_app.py | arush15june/wagtail-torchbox | c4d06e096c72bd8007975dc016133024f9d27fab | [
"MIT"
] | null | null | null | tbx/core/migrations/0111_move_sign_up_form_into_new_app.py | arush15june/wagtail-torchbox | c4d06e096c72bd8007975dc016133024f9d27fab | [
"MIT"
] | null | null | null | tbx/core/migrations/0111_move_sign_up_form_into_new_app.py | arush15june/wagtail-torchbox | c4d06e096c72bd8007975dc016133024f9d27fab | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-01-15 22:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailsearchpromotions', '0002_capitalizeverbose'),
('wagtailcore', '0040_page_draft_title'),
('wagtailredirects', '0006_redirect_increase_max_length'),
('wagtailforms', '0003_capitalizeverbose'),
('torchbox', '0110_rename_blogpagetaglist_to_tag'),
]
database_operations = [
migrations.AlterModelTable('SignUpFormPageResponse', 'sign_up_form_signupformpageresponse'),
migrations.AlterModelTable('SignUpFormPage', 'sign_up_form_signupformpage'),
migrations.AlterModelTable('SignUpFormPageBullet', 'sign_up_form_signupformpagebullet'),
migrations.AlterModelTable('SignUpFormPageLogo', 'sign_up_form_signupformpagelogo'),
migrations.AlterModelTable('SignUpFormPageQuote', 'sign_up_form_signupformpagequote'),
]
state_operations = [
migrations.RemoveField(
model_name='signupformpage',
name='call_to_action_image',
),
migrations.RemoveField(
model_name='signupformpage',
name='email_attachment',
),
migrations.RemoveField(
model_name='signupformpage',
name='page_ptr',
),
migrations.RemoveField(
model_name='signupformpagebullet',
name='page',
),
migrations.RemoveField(
model_name='signupformpagelogo',
name='logo',
),
migrations.RemoveField(
model_name='signupformpagelogo',
name='page',
),
migrations.RemoveField(
model_name='signupformpagequote',
name='page',
),
migrations.DeleteModel(
name='SignUpFormPageResponse',
),
migrations.DeleteModel(
name='SignUpFormPage',
),
migrations.DeleteModel(
name='SignUpFormPageBullet',
),
migrations.DeleteModel(
name='SignUpFormPageLogo',
),
migrations.DeleteModel(
name='SignUpFormPageQuote',
),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=database_operations,
state_operations=state_operations,
)
]
| 31.474359 | 100 | 0.619959 | 177 | 2,455 | 8.310734 | 0.40113 | 0.099932 | 0.123725 | 0.14276 | 0.197145 | 0.197145 | 0 | 0 | 0 | 0 | 0 | 0.021469 | 0.279022 | 2,455 | 77 | 101 | 31.883117 | 0.809605 | 0.028106 | 0 | 0.470588 | 1 | 0 | 0.303399 | 0.149811 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.029412 | 0 | 0.102941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
534038a8e2bcedd293d3b518dec4b55832e33688 | 407 | py | Python | .history/List of Capstone Projects/FibonacciSequence_20200516134123.py | EvanthiosPapadopoulos/Python3 | ab773fd458e365c1510f98ecac65965234c881e8 | [
"MIT"
] | 1 | 2020-05-18T17:50:00.000Z | 2020-05-18T17:50:00.000Z | .history/List of Capstone Projects/FibonacciSequence_20200516134123.py | EvanthiosPapadopoulos/Python3 | ab773fd458e365c1510f98ecac65965234c881e8 | [
"MIT"
] | null | null | null | .history/List of Capstone Projects/FibonacciSequence_20200516134123.py | EvanthiosPapadopoulos/Python3 | ab773fd458e365c1510f98ecac65965234c881e8 | [
"MIT"
] | null | null | null | '''
Fibonacci Sequence
'''
import HeaderOfFiles
def fibonacciSeq(number):
'''
Generate Fibonacci Sequence to the given number.
'''
a = 1
b = 1
for i in range(number):
yield a
a,b = b,a+b
while True:
try:
f = int(input("Enter a number for Fibonacci: "))
break
except:
print("Give me a number please!")
fibonacciSeq(f) | 15.074074 | 56 | 0.547912 | 51 | 407 | 4.372549 | 0.627451 | 0.152466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007519 | 0.346437 | 407 | 27 | 57 | 15.074074 | 0.830827 | 0.164619 | 0 | 0 | 1 | 0 | 0.169811 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.142857 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5342baca137d0ce393a0884db4bee3c92fc045d0 | 1,503 | py | Python | tests/simple_gan_test.py | alanpeixinho/NiftyNet | 9a17022a71985974f9e5ca992c765d55860fdd7d | [
"Apache-2.0"
] | null | null | null | tests/simple_gan_test.py | alanpeixinho/NiftyNet | 9a17022a71985974f9e5ca992c765d55860fdd7d | [
"Apache-2.0"
] | null | null | null | tests/simple_gan_test.py | alanpeixinho/NiftyNet | 9a17022a71985974f9e5ca992c765d55860fdd7d | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, print_function
import unittest
import os
import tensorflow as tf
from tensorflow.keras import regularizers
from niftynet.network.simple_gan import SimpleGAN
from tests.niftynet_testcase import NiftyNetTestCase
class SimpleGANTest(NiftyNetTestCase):
def test_3d_reg_shape(self):
input_shape = (2, 32, 32, 32, 1)
noise_shape = (2, 512)
x = tf.ones(input_shape)
r = tf.ones(noise_shape)
simple_gan_instance = SimpleGAN()
out = simple_gan_instance(r, x, is_training=True)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(out)
self.assertAllClose(input_shape, out[0].shape)
self.assertAllClose((2, 1), out[1].shape)
self.assertAllClose((2, 1), out[2].shape)
def test_2d_reg_shape(self):
input_shape = (2, 64, 64, 1)
noise_shape = (2, 512)
x = tf.ones(input_shape)
r = tf.ones(noise_shape)
simple_gan_instance = SimpleGAN()
out = simple_gan_instance(r, x, is_training=True)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(out)
self.assertAllClose(input_shape, out[0].shape)
self.assertAllClose((2, 1), out[1].shape)
self.assertAllClose((2, 1), out[2].shape)
if __name__ == "__main__":
tf.test.main()
| 30.673469 | 65 | 0.642715 | 199 | 1,503 | 4.613065 | 0.301508 | 0.058824 | 0.074074 | 0.104575 | 0.679739 | 0.679739 | 0.62963 | 0.62963 | 0.62963 | 0.62963 | 0 | 0.035398 | 0.24817 | 1,503 | 48 | 66 | 31.3125 | 0.776991 | 0 | 0 | 0.611111 | 0 | 0 | 0.005323 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.055556 | false | 0 | 0.194444 | 0 | 0.277778 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
534549f2acefb6ab739ca7a1aa4369dcb66352ae | 5,771 | py | Python | snoopy/server/transforms/Maltego.py | aiddenkeli/Snoopy | dd76180145981b3574b419edce39dbb060bd8c8c | [
"MIT"
] | 432 | 2015-01-07T09:56:32.000Z | 2022-03-28T12:15:42.000Z | snoopy/server/transforms/Maltego.py | aiddenkeli/Snoopy | dd76180145981b3574b419edce39dbb060bd8c8c | [
"MIT"
] | 9 | 2015-01-31T10:07:28.000Z | 2021-09-10T08:13:47.000Z | snoopy/server/transforms/Maltego.py | aiddenkeli/Snoopy | dd76180145981b3574b419edce39dbb060bd8c8c | [
"MIT"
] | 135 | 2015-01-07T15:06:35.000Z | 2022-01-24T02:19:55.000Z | #!/usr/bin/python
#
# This might be horrible code...
# ...but it works
# Feel free to re-write in a better way
# And if you want to - send it to us, we'll update ;)
# maltego@paterva.com (2010/10/18)
#
import sys
from xml.dom import minidom
class MaltegoEntity(object):
value = "";
weight = 100;
displayInformation = "";
additionalFields = [];
iconURL = "";
entityType = "Phrase"
def __init__(self,eT=None,v=None):
if (eT is not None):
self.entityType = eT;
if (v is not None):
self.value = v;
self.additionalFields = None;
self.additionalFields = [];
self.weight = 100;
self.displayInformation = "";
self.iconURL = "";
def setType(self,eT=None):
if (eT is not None):
self.entityType = eT;
def setValue(self,eV=None):
if (eV is not None):
self.value = eV;
def setWeight(self,w=None):
if (w is not None):
self.weight = w;
def setDisplayInformation(self,di=None):
if (di is not None):
self.displayInformation = di;
def addAdditionalFields(self,fieldName=None,displayName=None,matchingRule=False,value=None):
self.additionalFields.append([fieldName,displayName,matchingRule,value]);
def setIconURL(self,iU=None):
if (iU is not None):
self.iconURL = iU;
def returnEntity(self):
print "<Entity Type=\"" + str(self.entityType) + "\">";
print "<Value>" + str(self.value) + "</Value>";
print "<Weight>" + str(self.weight) + "</Weight>";
if (self.displayInformation is not None):
print "<DisplayInformation><Label Name=\"\" Type=\"text/html\"><![CDATA[" + str(self.displayInformation) + "]]></Label></DisplayInformation>";
if (len(self.additionalFields) > 0):
print "<AdditionalFields>";
for i in range(len(self.additionalFields)):
if (str(self.additionalFields[i][2]) <> "strict"):
print "<Field Name=\"" + str(self.additionalFields[i][0]) + "\" DisplayName=\"" + str(self.additionalFields[i][1]) + "\">" + str(self.additionalFields[i][3]) + "</Field>";
else:
print "<Field MatchingRule=\"" + str(self.additionalFields[i][2]) + "\" Name=\"" + str(self.additionalFields[i][0]) + "\" DisplayName=\"" + str(self.additionalFields[i][1]) + "\">" + str(self.additionalFields[i][3]) + "</Field>";
print "</AdditionalFields>";
if (len(self.iconURL) > 0):
print "<IconURL>" + self.iconURL + "</IconURL>";
print "</Entity>";
class MaltegoTransform(object):
entities = []
exceptions = []
UIMessages = []
#def __init__(self):
#empty.
def addEntity(self,enType,enValue):
me = MaltegoEntity(enType,enValue);
self.addEntityToMessage(me);
return self.entities[len(self.entities)-1];
def addEntityToMessage(self,maltegoEntity):
self.entities.append(maltegoEntity);
def addUIMessage(self,message,messageType="Inform"):
self.UIMessages.append([messageType,message]);
def addException(self,exceptionString):
self.exceptions.append(exceptionString);
def throwExceptions(self):
print "<MaltegoMessage>";
print "<MaltegoTransformExceptionMessage>";
print "<Exceptions>"
for i in range(len(self.exceptions)):
print "<Exception>" + self.exceptions[i] + "</Exceptions>";
print "</Exceptions>"
print "</MaltegoTransformExceptionMessage>";
print "</MaltegoMessage>";
def returnOutput(self):
print "<MaltegoMessage>";
print "<MaltegoTransformResponseMessage>";
print "<Entities>"
for i in range(len(self.entities)):
self.entities[i].returnEntity();
print "</Entities>"
print "<UIMessages>"
for i in range(len(self.UIMessages)):
print "<UIMessage MessageType=\"" + self.UIMessages[i][0] + "\">" + self.UIMessages[i][1] + "</UIMessage>";
print "</UIMessages>"
print "</MaltegoTransformResponseMessage>";
print "</MaltegoMessage>";
def writeSTDERR(self,msg):
sys.stderr.write(str(msg));
def heartbeat(self):
self.writeSTDERR("+");
def progress(self,percent):
self.writeSTDERR("%" + str(percent));
def debug(self,msg):
self.writeSTDERR("D:" + str(msg));
class MaltegoMsg:
def __init__(self,MaltegoXML=""):
xmldoc = minidom.parseString(MaltegoXML)
#read the easy stuff like value, limits etc
self.Value = self.i_getNodeValue(xmldoc,"Value")
self.Weight = self.i_getNodeValue(xmldoc,"Weight")
self.Slider = self.i_getNodeAttributeValue(xmldoc,"Limits","SoftLimit")
self.Type = self.i_getNodeAttributeValue(xmldoc,"Entity","Type")
#read additional fields
AdditionalFields = {}
try:
AFNodes= xmldoc.getElementsByTagName("AdditionalFields")[0]
Settings = AFNodes.getElementsByTagName("Field")
for node in Settings:
AFName = node.attributes["Name"].value;
AFValue = self.i_getText(node.childNodes);
AdditionalFields[AFName] = AFValue
except:
#sure this is not the right way...;)
dontcare=1
#parse transform settings
TransformSettings = {}
try:
TSNodes= xmldoc.getElementsByTagName("TransformFields")[0]
Settings = TSNodes.getElementsByTagName("Field")
for node in Settings:
TSName = node.attributes["Name"].value;
TSValue = self.i_getText(node.childNodes);
TransformSettings[TSName] = TSValue
except:
dontcare=1
#load back into object
self.AdditionalFields = AdditionalFields
self.TransformSettings = TransformSettings
def i_getText(self,nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def i_getNodeValue(self,node,Tag):
return self.i_getText(node.getElementsByTagName(Tag)[0].childNodes)
def i_getNodeAttributeValue(self,node,Tag,Attribute):
return node.getElementsByTagName(Tag)[0].attributes[Attribute].value;
| 28.428571 | 234 | 0.665916 | 648 | 5,771 | 5.895062 | 0.271605 | 0.073298 | 0.018848 | 0.050262 | 0.143979 | 0.107853 | 0.067016 | 0.067016 | 0.067016 | 0.049738 | 0 | 0.006943 | 0.176399 | 5,771 | 202 | 235 | 28.569307 | 0.79676 | 0.061688 | 0 | 0.117647 | 0 | 0 | 0.17877 | 0.06947 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.014706 | null | null | 0.191176 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53472d85e82afcf0ecb7050477e184968c938897 | 1,017 | py | Python | metadeploy/api/migrations/0050_add_clickthrough_agreement.py | sfdc-qbranch/MetaDeploy | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | [
"BSD-3-Clause"
] | 33 | 2019-03-20T15:34:39.000Z | 2022-03-30T15:59:40.000Z | metadeploy/api/migrations/0050_add_clickthrough_agreement.py | sfdc-qbranch/MetaDeploy | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | [
"BSD-3-Clause"
] | 2,718 | 2019-02-27T19:46:07.000Z | 2022-03-11T23:18:09.000Z | metadeploy/api/migrations/0050_add_clickthrough_agreement.py | sfdc-qbranch/MetaDeploy | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | [
"BSD-3-Clause"
] | 28 | 2019-03-28T04:57:16.000Z | 2022-02-04T16:49:25.000Z | # Generated by Django 2.1.5 on 2019-02-12 21:18
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("api", "0049_add_all_other_translations")]
operations = [
migrations.CreateModel(
name="ClickThroughAgreement",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.TextField()),
],
),
migrations.AddField(
model_name="job",
name="click_through_agreement",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="api.ClickThroughAgreement",
),
),
]
| 27.486486 | 63 | 0.474926 | 81 | 1,017 | 5.82716 | 0.691358 | 0.050847 | 0.059322 | 0.09322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032646 | 0.427729 | 1,017 | 36 | 64 | 28.25 | 0.778351 | 0.044248 | 0 | 0.166667 | 1 | 0 | 0.117526 | 0.103093 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5347c20f1c46f18f1b8fe3023d484d25d1b6adf3 | 14,306 | py | Python | pub_ingest.py | mconlon17/vivo-pub-ingest | 7c03ecdd6dc5418121a6b92de1572d1cc63f5cb5 | [
"BSD-3-Clause"
] | null | null | null | pub_ingest.py | mconlon17/vivo-pub-ingest | 7c03ecdd6dc5418121a6b92de1572d1cc63f5cb5 | [
"BSD-3-Clause"
] | 1 | 2015-04-04T01:38:51.000Z | 2015-04-04T01:38:51.000Z | pubs/pub_ingest.py | mconlon17/vivo-1.5-improvement | 44d8335eb7bbe518374a53c0e1f9f39014023ee7 | [
"BSD-3-Clause"
] | null | null | null | #!/user/bin/env/python
"""
pub_ingest.py -- Read a bibtex file and make VIVO RDF
The following objects will be made as needed:
-- publisher
-- journal
-- information resource
-- timestamp for the information resource
-- people
-- authorships
-- concepts
The resulting ADD and SUB RDF file can then be read into VIVO
To Do
-- Complete refactor as an update process. Create resuable parts so that
a publication can be created from bibtex, doi or pmid
-- Improve DateTimeValue accuracy. Currently all publications are entered
as yearMonth precision. Sometimes we have more information, sometimes
we have less. We should use the information as presented by the
publisher, not overstate (yearMonth when there is only year) and not
understate (yearMonth when we know the day).
-- Reuse date objects -- only create dates when the appropriate date entity
is not already in VIVO
-- Update for VIVO-ISF
-- Update or vivofoundation and vivopubs
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2014, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "1.3"
import sys
from datetime import datetime, date
from pybtex.database.input import bibtex
import tempita
import vivotools
MAX_AUTHORS = 50
publisher_report = {}
journal_report = {}
title_report = {}
author_report = {}
disambiguation_report = {}
dictionaries = []
journal_dictionary = {}
publisher_dictionary = {}
title_dictionary = {}
def open_files(bibtex_file_name):
"""
Give the name of the bibitex file to be used as input, generate the file
names for rdf, rpt and lst. Return the open file handles
"""
base = bibtex_file_name[:bibtex_file_name.find('.')]
rpt_file = open(base+'.rpt', 'w')
lst_file = open(base+'.lst', 'w')
rdf_file = open(base+'.rdf', 'w')
return [rdf_file, rpt_file, lst_file]
def update_disambiguation_report(authors, publication_uri):
"""
Given the authors structure and thte publication_uri, add to the report
if any of the authors need to be disambiguated
"""
for value in authors.values():
if value[8] == "Disambig":
if publication_uri in disambiguation_report:
result = disambiguation_report[publication_uri]
result[len(result.keys())+1] = value
disambiguation_report[publication_uri] = result
else:
disambiguation_report[publication_uri] = {1:value}
return
# start here. Create a parser for bibtex and use it to read the file of
# bibtex entries. open the output files
print datetime.now(), "Read the BibTex"
bibtex_file_name = sys.argv[1]
[rdf_file, rpt_file, lst_file] = open_files(bibtex_file_name)
parser = bibtex.Parser()
bib_data = parser.parse_file(bibtex_file_name)
bib_sorted = sorted(bib_data.entries.items(),
key=lambda x: x[1].fields['title'])
print >>rdf_file, "<!--", len(bib_data.entries.keys()),\
"publications to be processed -->"
print datetime.now(), len(bib_data.entries.keys()),\
"publications to be processed."
# make dictionaries for people, papers, publishers, journals, concepts
print datetime.now(), "Creating the dictionaries"
print datetime.now(), "Publishers"
publisher_dictionary = vivotools.make_publisher_dictionary()
print datetime.now(), "Journals"
journal_dictionary = vivotools.make_journal_dictionary()
print datetime.now(), "People"
dictionaries = make_people_dictionaries()
print datetime.now(), "Titles"
title_dictionary = vivotools.make_title_dictionary()
print datetime.now(), "Concepts"
vivotools.make_concept_dictionary()
# process the papers
print >>rdf_file, vivotools.rdf_header()
for key, value in bib_sorted:
try:
title = value.fields['title'].title() + " "
except:
title_report["No title"] = ["No Title", None, 1]
print >>rdf_file, "<!-- No title found. No RDF necessary -->"
continue
title = abbrev_to_words(title)
title = title[0:-1]
if title in title_report:
print >>rdf_file, "<!-- Title", title,\
"handled previously. No RDF necessary -->"
title_report[title][2] = title_report[title][2] + 1
continue
else:
print >>rdf_file, "<!-- Begin RDF for " + title + " -->"
print datetime.now(), "<!-- Begin RDF for " + title + " -->"
document = {}
document['title'] = title
title_report[title] = ["Start", None, 1]
[found, uri] = vivotools.find_title(title, title_dictionary)
if not found:
title_report[title][0] = "Create" # Create
# Authors
[author_rdf, authors] = make_author_rdf(value)
document['authors'] = make_document_authors(authors)
if count_uf_authors(authors) == 0:
print >>rdf_file, "<!-- End RDF. No UF authors for " +\
title + " No RDF necessary -->"
title_report[title][0] = "No UF Auth"
continue
update_author_report(authors)
# Datetime
[datetime_rdf, datetime_uri] = make_datetime_rdf(value, title)
# Publisher
[journal_create, journal_name, journal_uri] =\
make_journal_uri(value)
[publisher_create, publisher, publisher_uri, publisher_rdf] =\
make_publisher_rdf(value)
# Journal
[journal_rdf, journal_uri] = make_journal_rdf(value,\
journal_create, journal_name, journal_uri)
# Publisher/Journal bi-directional links
publisher_journal_rdf = ""
if journal_uri != "" and publisher_uri != "" and\
(journal_create or publisher_create):
publisher_journal_rdf = \
make_publisher_journal_rdf(publisher_uri, journal_uri)
# Authorships
publication_uri = vivotools.get_vivo_uri()
title_report[title][1] = publication_uri
[authorship_rdf, authorship_uris] = make_authorship_rdf(authors,\
publication_uri)
# AuthorInAuthorships
author_in_authorship_rdf = make_author_in_authorship_rdf(authors,\
authorship_uris)
# Journal/Publication bi-directional links
if journal_uri != "" and publication_uri != "":
journal_publication_rdf = \
make_journal_publication_rdf(journal_uri, publication_uri)
# PubMed values
pubmed_rdf = ""
if 'doi' in value.fields:
[pubmed_rdf, sub] = vivotools.update_pubmed(publication_uri,\
value.fields['doi'])
if sub != "":
raise Exception("Non empty subtraction RDF"+\
"for Update PubMed")
# Publication
publication_rdf = make_publication_rdf(value,\
title,publication_uri,datetime_uri,authorship_uris)
print >>rdf_file, datetime_rdf, publisher_rdf, journal_rdf,\
publisher_journal_rdf, author_rdf, authorship_rdf,\
author_in_authorship_rdf, journal_publication_rdf,\
publication_rdf, pubmed_rdf
print >>rdf_file, "<!-- End RDF for " + title + " -->"
print >>lst_file, vivotools.string_from_document(document),\
'VIVO uri', publication_uri, '\n'
update_disambiguation_report(authors, publication_uri)
else:
title_report[title][0] = "Found"
title_report[title][1] = uri
print >>rdf_file, "<!-- Found: " + title + " No RDF necessary -->"
print >>rdf_file, vivotools.rdf_footer()
#
# Reports
#
print >>rpt_file,"""
Publisher Report
Lists the publishers that appear in the bibtex file in alphabetical order. For
each publisher, show the improved name, the number of papers in journals of this publisher,
the action to be taken for the publisher and the VIVO URI -- the URI is the new
URI to be created if Action is Create, otherwise it is the URI of the found publisher
in VIVO.
Publisher Papers Action VIVO URI
---------------------------------------------------------------------------------"""
publisher_count = 0
actions = {}
for publisher in sorted(publisher_report.keys()):
publisher_count = publisher_count + 1
[create,uri,count] = publisher_report[publisher]
if create:
result = "Create"
else:
result = "Found "
actions[result] = actions.get(result,0) + 1
print >>rpt_file, "{0:40}".format(publisher[0:40]),"{0:>3}".format(count),result,uri
print >>rpt_file,""
print >>rpt_file, "Publisher count by action"
print >>rpt_file, ""
for action in sorted(actions):
print >>rpt_file, action,actions[action]
print >>rpt_file, publisher_count,"publisher(s)"
print >>rpt_file, """
Journal Report
Lists the journals that appear in the bibtex file in alphabetical order. For
each journal, show the improved name, the number of papers t be linked to the journal,
the action to be taken for the journal and the VIVO URI -- the URI is the new
URI to be created if Action is Create, otherwise it is the URI of the found journal
in VIVO.
Journal Papers Action VIVO URI
---------------------------------------------------------------------------------"""
journal_count = 0
actions = {}
for journal in sorted(journal_report.keys()):
journal_count = journal_count + 1
[create,uri,count] = journal_report[journal]
if create:
result = "Create"
else:
result = "Found "
actions[result] = actions.get(result,0) + 1
print >>rpt_file, "{0:40}".format(journal[0:40]),"{0:>3}".format(count),result,uri
print >>rpt_file, ""
print >>rpt_file, "Journal count by action"
print >>rpt_file, ""
for action in sorted(actions):
print >>rpt_file, action,actions[action]
print >>rpt_file, journal_count,"journal(s)"
print >>rpt_file, """
Title Report
Lists the titles that appear in the bibtex file in alphabetical order. For
each title, show the action to be taken, the number of times the title appears in
the bibtex, the improved title and the VIVO URI of the publication -- the URI is the new
URI to be created if action is Create, otherwise it is the URI of the found publication
in VIVO.
Action # Title and VIVO URI
---------------------------------------------------------------------------------"""
title_count = 0
actions = {}
for title in sorted(title_report.keys()):
title_count = title_count +1
[action,uri,count] = title_report[title]
actions[action] = actions.get(action,0) + 1
print >>rpt_file, "{0:>10}".format(action),title,uri
print >>rpt_file, ""
print >>rpt_file, "Title count by action"
print >>rpt_file, ""
for action in sorted(actions):
print >>rpt_file, action,actions[action]
print >>rpt_file, title_count,"title(s)"
print >>rpt_file, """
Author Report
For each author found in the bibtex file, show the author's name followed by the number of papers
for the author in the bibtex to be entered, followed by
a pair of results for each time the author appears on a paper in the bibtex. The result
pair contains an action and a URI. The action is "non UF" if a non-UF author stub will be
be created, the URI is the URI of the new author stub. Action "Make UF" if a new UF author
stub will be created with the URI of the new author stub. "Found UF" indicate the author was
found at the URI. "Disambig" if multiple UF people were found with the given name. The URI
is the URI of one of the found people. Follow-up is needed to determine if correct and
reassign author if not correct.
Author Action URI Action URI
----------------------------------------------------------------------------------------------"""
author_count = 0
actions = {}
for author in sorted(author_report.keys()):
author_count = author_count + 1
results = ""
papers = len(author_report[author])
action = author_report[author][1][8] # 1st report, 8th value is action
actions[action] = actions.get(action,0) + 1
for key in author_report[author].keys():
value = author_report[author][key]
results = results + value[8] + " " + "{0:45}".format(value[9])
print >>rpt_file, "{0:25}".format(author),"{0:>3}".format(papers),results
print >>rpt_file, ""
print >>rpt_file, "Author count by action"
print >>rpt_file, ""
for action in sorted(actions):
print >>rpt_file, action,actions[action]
print >>rpt_file, author_count,"authors(s)"
print >>rpt_file, """
Disambiguation Report
For each publication with one or more authors to disambiguate, list the paper, and
then the authors in question with each of the possible URIs to be disambiguated, show the URI
of the paper, and then for each author that needs to be disambiguated on the paper, show
the last name, first name and middle initial and the all the URIs in VIVO for UF persons
with the same names.
"""
for uri in disambiguation_report.keys():
print >>rpt_file,"The publication at",uri,"has one or more authors in question"
for key,value in disambiguation_report[uri].items():
uris = value[9].split(";")
print >>rpt_file," ",value[4],value[5],value[6],":"
for u in uris:
person = vivotools.get_person(u)
if 'last_name' not in person:
person['last_name'] = "No last name"
if 'middle_name' not in person:
person['middle_name'] = "No middle name"
if 'first_name' not in person:
person['first_name'] = "No first name"
if 'home_department_name' not in person:
person['home_department_name'] = "No home department"
npubs = len(person['authorship_uris'])
print >>rpt_file," ",u,person['last_name'], \
person['first_name'],person['middle_name'], \
person['home_department_name'],"Number of pubs = ",npubs
print >>rpt_file
print >>rpt_file
#
# Close the files, we're done
#
rpt_file.close()
rdf_file.close()
lst_file.close()
| 37.255208 | 97 | 0.636306 | 1,843 | 14,306 | 4.781877 | 0.157895 | 0.030183 | 0.046295 | 0.01634 | 0.249404 | 0.196982 | 0.157381 | 0.136389 | 0.12822 | 0.117781 | 0 | 0.007377 | 0.241996 | 14,306 | 383 | 98 | 37.35248 | 0.80533 | 0.032853 | 0 | 0.189591 | 0 | 0.018587 | 0.315857 | 0.027016 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.018587 | null | null | 0.200743 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
534843a13bac167037ca6701e9e5332c6dec3235 | 2,986 | py | Python | src/vanilla_pytorch/prune_model.py | f2010126/LTH_Master | 709472e7e7962fbf3a56a620c536fb03d359734f | [
"MIT"
] | null | null | null | src/vanilla_pytorch/prune_model.py | f2010126/LTH_Master | 709472e7e7962fbf3a56a620c536fb03d359734f | [
"MIT"
] | 1 | 2021-06-30T13:35:32.000Z | 2021-06-30T13:35:32.000Z | src/vanilla_pytorch/prune_model.py | f2010126/LTH_Master | 709472e7e7962fbf3a56a620c536fb03d359734f | [
"MIT"
] | 1 | 2021-06-30T13:22:15.000Z | 2021-06-30T13:22:15.000Z | import torch.nn.utils.prune as prune
import torch
from src.vanilla_pytorch.utils import count_rem_weights
from src.vanilla_pytorch.models.linearnets import LeNet, init_weights
from src.vanilla_pytorch.models.resnets import Resnets
def remove_pruning(model):
for i, (name, module) in enumerate(model.named_modules()):
# name and val
if any([isinstance(module, cl) for cl in [torch.nn.Conv2d, torch.nn.Linear]]):
prune.remove(module, 'weight')
def get_masks(model, prune_amts=None):
"""
prune the lowest p% weights by magnitude per layer
:param model: model to prune
:param p_rate: prune rate = 0.2 as per paper
:param prune_amts: dictionary
:return: the created mask. model has served it's purpose.
"""
# TODO: Adjust pruning with output layer
if prune_amts is None: # ie dict is empty, use the default prune rate = 0.2
prune_amts = {"linear": 0.2, "conv": 0.2, "last": 0.2}
for i, (name, module) in enumerate(model.named_modules()):
# prune 20% of connections in all 2D-conv layers
if isinstance(module, torch.nn.Conv2d):
module = prune.l1_unstructured(module, name='weight', amount=prune_amts['conv'])
# prune 20% of connections in all linear layers
elif isinstance(module, torch.nn.Linear):
module = prune.l1_unstructured(module, name='weight', amount=prune_amts['linear'])
masks = list(model.named_buffers())
remove_pruning(model)
return masks
def update_apply_masks(model, masks):
# doesn't seem to be needed.
# for key, val in masks.items():
# print(f"key {key}")
# layer = getattr(model, key.split('.')[0])
# layer.weight_mask = val
for name, module in model.named_modules():
if any([isinstance(module, cl) for cl in [torch.nn.Conv2d, torch.nn.Linear]]):
module = prune.custom_from_mask(module, name='weight', mask=masks[name + ".weight_mask"])
# remove_pruning(model)
return model
def prune_random(model, prune_amts=None):
if prune_amts is None: # ie dict is empty, use the default prune rate =0.2
prune_amts = {"linear": 0.2, "conv": 0.2, "last": 0.2}
for name, module in model.named_modules():
# prune 20% of connections in all 2D-conv layers
if isinstance(module, torch.nn.Conv2d):
module = prune.random_unstructured(module, name='weight', amount=prune_amts['conv'])
# prune 20% of connections in all linear layers
elif isinstance(module, torch.nn.Linear):
module = prune.random_unstructured(module, name='weight', amount=prune_amts['linear'])
remove_pruning(model)
if __name__ == '__main__':
net = Resnets(in_channels=3)
net.apply(init_weights)
prune_rate = 0.8
prune_custom = {"linear": 0.2, "conv": 0.2, "last": 0.1}
for i in range(3):
masks = get_masks(net, prune_amts=prune_custom)
print(f"Count zero : {count_rem_weights(net)}")
| 38.779221 | 101 | 0.662425 | 432 | 2,986 | 4.449074 | 0.259259 | 0.056191 | 0.041623 | 0.041623 | 0.547347 | 0.541623 | 0.506244 | 0.483351 | 0.467222 | 0.433923 | 0 | 0.019239 | 0.216678 | 2,986 | 76 | 102 | 39.289474 | 0.80248 | 0.244809 | 0 | 0.380952 | 0 | 0 | 0.070136 | 0.01086 | 0 | 0 | 0 | 0.013158 | 0 | 1 | 0.095238 | false | 0 | 0.119048 | 0 | 0.261905 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53487b0b2e562895d1a372a23c376324cd33f385 | 3,484 | py | Python | tensorflow_federated/python/research/utils/checkpoint_utils_test.py | mcognetta/federated | fa0c1a00b5d77768bc2f38f503f3ef1a65693945 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/research/utils/checkpoint_utils_test.py | mcognetta/federated | fa0c1a00b5d77768bc2f38f503f3ef1a65693945 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/research/utils/checkpoint_utils_test.py | mcognetta/federated | fa0c1a00b5d77768bc2f38f503f3ef1a65693945 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ServerState save."""
import functools
import os
import attr
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.examples.mnist import models
from tensorflow_federated.python.research.utils import checkpoint_utils
@attr.s(cmp=False, frozen=False)
class Obj(object):
"""Container for all state that need to be stored in the checkpoint.
Attributes:
model: A ModelWeights structure, containing Tensors or Variables.
optimizer_state: A list of Tensors or Variables, in the order returned by
optimizer.variables().
round_num: Training round_num.
"""
model = attr.ib()
optimizer_state = attr.ib()
round_num = attr.ib()
@classmethod
def from_anon_tuple(cls, anon_tuple, round_num):
# TODO(b/130724878): These conversions should not be needed.
return cls(
model=anon_tuple.model._asdict(recursive=True),
optimizer_state=list(anon_tuple.optimizer_state),
round_num=round_num)
class SavedStateTest(tf.test.TestCase):
def test_save_and_load(self):
server_optimizer_fn = functools.partial(
tf.keras.optimizers.SGD, learning_rate=0.1, momentum=0.9)
iterative_process = tff.learning.build_federated_averaging_process(
models.model_fn, server_optimizer_fn=server_optimizer_fn)
server_state = iterative_process.initialize()
# TODO(b/130724878): These conversions should not be needed.
obj = Obj.from_anon_tuple(server_state, 1)
export_dir = os.path.join(self.get_temp_dir(), 'ckpt_1')
checkpoint_utils.save(obj, export_dir)
loaded_obj = checkpoint_utils.load(export_dir, obj)
self.assertAllClose(tf.nest.flatten(obj), tf.nest.flatten(loaded_obj))
def test_load_latest_state(self):
server_optimizer_fn = functools.partial(
tf.keras.optimizers.SGD, learning_rate=0.1, momentum=0.9)
iterative_process = tff.learning.build_federated_averaging_process(
models.model_fn, server_optimizer_fn=server_optimizer_fn)
server_state = iterative_process.initialize()
# TODO(b/130724878): These conversions should not be needed.
obj_1 = Obj.from_anon_tuple(server_state, 1)
export_dir = os.path.join(self.get_temp_dir(), 'ckpt_1')
checkpoint_utils.save(obj_1, export_dir)
# TODO(b/130724878): These conversions should not be needed.
obj_2 = Obj.from_anon_tuple(server_state, 2)
export_dir = os.path.join(self.get_temp_dir(), 'ckpt_2')
checkpoint_utils.save(obj_2, export_dir)
export_dir = checkpoint_utils.latest_checkpoint(self.get_temp_dir())
loaded_obj = checkpoint_utils.load(export_dir, obj_1)
self.assertEqual(os.path.join(self.get_temp_dir(), 'ckpt_2'), export_dir)
self.assertAllClose(tf.nest.flatten(obj_2), tf.nest.flatten(loaded_obj))
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| 35.55102 | 77 | 0.751148 | 505 | 3,484 | 4.956436 | 0.336634 | 0.035957 | 0.040751 | 0.027966 | 0.443068 | 0.425489 | 0.387535 | 0.387535 | 0.387535 | 0.322413 | 0 | 0.023074 | 0.154133 | 3,484 | 97 | 78 | 35.917526 | 0.826264 | 0.326349 | 0 | 0.244898 | 0 | 0 | 0.013877 | 0 | 0 | 0 | 0 | 0.010309 | 0.061224 | 1 | 0.061224 | false | 0 | 0.142857 | 0.020408 | 0.326531 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
534883bea976b0a78d54a9c4ba718667cfc4884f | 2,923 | py | Python | website/models/user.py | alexli0707/pyforum | 4f5ea4a0b07e094e24410ae699016590b9c20d59 | [
"Apache-2.0"
] | 4 | 2016-10-13T02:03:55.000Z | 2017-04-05T03:21:46.000Z | website/models/user.py | alexli0707/pyforum | 4f5ea4a0b07e094e24410ae699016590b9c20d59 | [
"Apache-2.0"
] | null | null | null | website/models/user.py | alexli0707/pyforum | 4f5ea4a0b07e094e24410ae699016590b9c20d59 | [
"Apache-2.0"
] | 1 | 2019-01-01T09:36:28.000Z | 2019-01-01T09:36:28.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import peewee
from flask import current_app,abort
from flask.ext.login import AnonymousUserMixin, UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from peewee import Model, IntegerField, CharField,PrimaryKeyField
from website.app import db_wrapper, login_manager
from website.http.main_exception import MainException
from werkzeug.security import check_password_hash,generate_password_hash
class User(UserMixin, db_wrapper.Model):
id = PrimaryKeyField()
email = CharField(index=True)
username = CharField(index=True)
password_hash = CharField()
role_id = IntegerField()
confirmed = IntegerField()
class Meta:
db_table = 'users'
def register(self,email,password,username):
user = User(email=email, username=username, password_hash=generate_password_hash(password))
try:
user.save()
except peewee.IntegrityError as err:
print(err.args)
if err.args[0] == 1062:
if 'ix_users_email' in err.args[1]:
raise MainException.DUPLICATE_EMAIL
if 'ix_users_username' in err.args[1]:
raise MainException.DUPLICATE_USERNAME
return user
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
"""生成验证邮箱的token"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
"""验证邮箱"""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
print(data)
except:
return False
if data.get('confirm') != self.id:
return False
# 验证成功,写入数据库
self.confirmed = True
self.save()
return True
def generate_reset_token(self, expiration=3600):
"""生成重置密码的token"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
"""重置密码"""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
# 验证成功,写入数据库
self.password = new_password
self.save()
return True
"""
匿名用户
"""
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
user = User.get(User.id == int(user_id))
if not user:
abort(404)
else:
return user
| 27.317757 | 99 | 0.63599 | 332 | 2,923 | 5.454819 | 0.322289 | 0.046383 | 0.039757 | 0.046383 | 0.261734 | 0.226394 | 0.197681 | 0.123689 | 0.123689 | 0.123689 | 0 | 0.008858 | 0.266165 | 2,923 | 106 | 100 | 27.575472 | 0.835431 | 0.034211 | 0 | 0.315068 | 0 | 0 | 0.03592 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.123288 | false | 0.109589 | 0.109589 | 0.041096 | 0.534247 | 0.027397 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.