hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bc332f46bf5fa376fd785a4d78a379066d15cba7
| 9,201
|
py
|
Python
|
trio2o/nova_apigw/controllers/flavor.py
|
OpenCloudNeXt/trio2o
|
f4d2d5458fbba71414edebf5e9f69b98abd2d080
|
[
"Apache-2.0"
] | 1
|
2021-03-19T16:48:55.000Z
|
2021-03-19T16:48:55.000Z
|
trio2o/nova_apigw/controllers/flavor.py
|
OpenCloudNeXt/trio2o
|
f4d2d5458fbba71414edebf5e9f69b98abd2d080
|
[
"Apache-2.0"
] | null | null | null |
trio2o/nova_apigw/controllers/flavor.py
|
OpenCloudNeXt/trio2o
|
f4d2d5458fbba71414edebf5e9f69b98abd2d080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import expose
from pecan import rest
import oslo_db.exception as db_exc
import trio2o.common.context as t_context
from trio2o.common.i18n import _
from trio2o.common import utils
from trio2o.db import core
from trio2o.db import models
class FlavorManageController(rest.RestController):
# NOTE(zhiyuan) according to nova API reference, flavor creating and
# deleting should use '/flavors/os-flavor-manage' path, but '/flavors/'
# also supports this two operations to keep compatible with nova client
def __init__(self, project_id):
self.project_id = project_id
@expose(generic=True, template='json')
def post(self, **kw):
context = t_context.extract_context_from_environ()
if not context.is_admin:
return utils.format_nova_error(
403, _("Policy doesn't allow os_compute_api:os-flavor-manage "
"to be performed."))
required_fields = ['name', 'ram', 'vcpus', 'disk']
if 'flavor' not in kw:
utils.format_nova_error(400, _('flavor is not set'))
if not utils.validate_required_fields_set(kw['flavor'],
required_fields):
utils.format_nova_error(
400, _('Invalid input for field/attribute flavor.'))
flavor_dict = {
'name': kw['flavor']['name'],
'flavorid': kw['flavor'].get('id'),
'memory_mb': kw['flavor']['ram'],
'vcpus': kw['flavor']['vcpus'],
'root_gb': kw['flavor']['disk'],
'ephemeral_gb': kw['flavor'].get('OS-FLV-EXT-DATA:ephemeral', 0),
'swap': kw['flavor'].get('swap', 0),
'rxtx_factor': kw['flavor'].get('rxtx_factor', 1.0),
'is_public': kw['flavor'].get('os-flavor-access:is_public', True),
}
try:
with context.session.begin():
flavor = core.create_resource(
context, models.InstanceTypes, flavor_dict)
except db_exc.DBDuplicateEntry as e:
if 'flavorid' in e.columns:
return utils.format_nova_error(
409, _('Flavor with ID %s already '
'exists.') % flavor_dict['flavorid'])
else:
return utils.format_nova_error(
409, _('Flavor with name %s already '
'exists.') % flavor_dict['name'])
except Exception:
return utils.format_nova_error(500, _('Failed to create flavor'))
return {'flavor': flavor}
@expose(generic=True, template='json')
def delete(self, _id):
context = t_context.extract_context_from_environ()
try:
with context.session.begin():
flavors = core.query_resource(context, models.InstanceTypes,
[{'key': 'flavorid',
'comparator': 'eq',
'value': _id}], [])
if not flavors:
return utils.format_nova_error(
404, _('Flavor %s could not be found') % _id)
core.delete_resource(context, models.InstanceTypes,
flavors[0]['id'])
except Exception:
return utils.format_nova_error(500, _('Failed to delete flavor'))
pecan.response.status = 202
return
class FlavorController(rest.RestController):
def __init__(self, project_id):
self.project_id = project_id
@pecan.expose()
def _lookup(self, action, *remainder):
if action == 'os-flavor-manage':
return FlavorManageController(self.project_id), remainder
@expose(generic=True, template='json')
def post(self, **kw):
context = t_context.extract_context_from_environ()
if not context.is_admin:
return utils.format_nova_error(
403, _("Policy doesn't allow os_compute_api:os-flavor-manage "
"to be performed."))
required_fields = ['name', 'ram', 'vcpus', 'disk']
if 'flavor' not in kw:
utils.format_nova_error(400, _('flavor is not set'))
if not utils.validate_required_fields_set(kw['flavor'],
required_fields):
utils.format_nova_error(
400, _('Invalid input for field/attribute flavor.'))
flavor_dict = {
'name': kw['flavor']['name'],
'flavorid': kw['flavor'].get('id'),
'memory_mb': kw['flavor']['ram'],
'vcpus': kw['flavor']['vcpus'],
'root_gb': kw['flavor']['disk'],
'ephemeral_gb': kw['flavor'].get('OS-FLV-EXT-DATA:ephemeral', 0),
'swap': kw['flavor'].get('swap', 0),
'rxtx_factor': kw['flavor'].get('rxtx_factor', 1.0),
'is_public': kw['flavor'].get('os-flavor-access:is_public', True),
}
try:
with context.session.begin():
flavor = core.create_resource(
context, models.InstanceTypes, flavor_dict)
except db_exc.DBDuplicateEntry as e:
if 'flavorid' in e.columns:
return utils.format_nova_error(
409, _('Flavor with ID %s already '
'exists.') % flavor_dict['flavorid'])
else:
return utils.format_nova_error(
409, _('Flavor with name %s already '
'exists.') % flavor_dict['name'])
except Exception:
utils.format_nova_error(500, _('Failed to create flavor'))
flavor['id'] = flavor['flavorid']
del flavor['flavorid']
return {'flavor': flavor}
@expose(generic=True, template='json')
def get_one(self, _id):
# NOTE(zhiyuan) this function handles two kinds of requests
# GET /flavors/flavor_id
# GET /flavors/detail
context = t_context.extract_context_from_environ()
if _id == 'detail':
with context.session.begin():
flavors = core.query_resource(context, models.InstanceTypes,
[], [])
for flavor in flavors:
flavor['id'] = flavor['flavorid']
del flavor['flavorid']
return {'flavors': flavors}
else:
with context.session.begin():
flavors = core.query_resource(context, models.InstanceTypes,
[{'key': 'flavorid',
'comparator': 'eq',
'value': _id}], [])
if not flavors:
return utils.format_nova_error(
404, _('Flavor %s could not be found') % _id)
flavor = flavors[0]
flavor['id'] = flavor['flavorid']
del flavor['flavorid']
return {'flavor': flavor}
@expose(generic=True, template='json')
def get_all(self):
context = t_context.extract_context_from_environ()
with context.session.begin():
flavors = core.query_resource(context, models.InstanceTypes,
[], [])
return {'flavors': [dict(
[('id', flavor['flavorid']),
('name', flavor['name'])]) for flavor in flavors]}
@expose(generic=True, template='json')
def delete(self, _id):
# TODO(zhiyuan) handle foreign key constraint
context = t_context.extract_context_from_environ()
try:
with context.session.begin():
flavors = core.query_resource(context, models.InstanceTypes,
[{'key': 'flavorid',
'comparator': 'eq',
'value': _id}], [])
if not flavors:
return utils.format_nova_error(
404, _('Flavor %s could not be found') % _id)
core.delete_resource(context,
models.InstanceTypes, flavors[0]['id'])
except Exception:
return utils.format_nova_error(500, _('Failed to delete flavor'))
pecan.response.status = 202
return
| 42.206422
| 78
| 0.535159
| 959
| 9,201
| 4.970803
| 0.216893
| 0.033564
| 0.053493
| 0.071324
| 0.72855
| 0.72855
| 0.72855
| 0.720159
| 0.701909
| 0.668135
| 0
| 0.013908
| 0.351375
| 9,201
| 217
| 79
| 42.400922
| 0.784853
| 0.104771
| 0
| 0.813953
| 0
| 0
| 0.159951
| 0.019963
| 0
| 0
| 0
| 0.004608
| 0
| 1
| 0.052326
| false
| 0
| 0.052326
| 0
| 0.232558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
70af8d86c493e026536844ac77648476d945168d
| 578
|
py
|
Python
|
lib/tracked_entity.py
|
kostyaby/fc-helper
|
727b4418a832cad73fbf3a76b07973ec7dd8f49f
|
[
"MIT"
] | null | null | null |
lib/tracked_entity.py
|
kostyaby/fc-helper
|
727b4418a832cad73fbf3a76b07973ec7dd8f49f
|
[
"MIT"
] | null | null | null |
lib/tracked_entity.py
|
kostyaby/fc-helper
|
727b4418a832cad73fbf3a76b07973ec7dd8f49f
|
[
"MIT"
] | null | null | null |
from . import Constant
import os
class TrackedEntity:
def __init__(self, id, tracked_directory_path, related_path, created_at):
self.id = id
self.tracked_directory_path = tracked_directory_path
self.related_path = related_path
self.created_at = created_at
def get_absolute_path(self):
return os.path.join(self.tracked_directory_path, self.related_path)
def __str__(self):
return "id: {}; tracked_directory_path: {}; related_path: {}; created_at: {}".format(\
self.id, self.tracked_directory_path, self.related_path, self.created_at)
| 27.52381
| 90
| 0.742215
| 79
| 578
| 5.012658
| 0.278481
| 0.242424
| 0.30303
| 0.181818
| 0.661616
| 0.497475
| 0.409091
| 0.212121
| 0
| 0
| 0
| 0
| 0.157439
| 578
| 20
| 91
| 28.9
| 0.813142
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 0.039792
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.153846
| 0.153846
| 0.615385
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
70c4a76793b990585e6b90605023577d1eeb4269
| 4,433
|
py
|
Python
|
plenum/test/cli/test_phrase_word_completer.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/cli/test_phrase_word_completer.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/cli/test_phrase_word_completer.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | 2
|
2017-12-13T21:14:54.000Z
|
2021-06-06T15:48:03.000Z
|
from prompt_toolkit.completion import CompleteEvent, Completion
from prompt_toolkit.document import Document
from plenum.cli.phrase_word_completer import PhraseWordCompleter
def test_next_phrase_word_is_suggested_for_typed_word_being_its_prefix():
completer = PhraseWordCompleter('add genesis transaction')
document = Document('add genesis tra')
complete_event = CompleteEvent(text_inserted=True)
completions = list(completer.get_completions(document, complete_event))
assert [Completion('transaction', -3)] == completions
def test_none_is_suggested_for_typed_word_not_being_next_phrase_word_prefix():
completer = PhraseWordCompleter('add genesis transaction')
document = Document('add tr')
complete_event = CompleteEvent(text_inserted=True)
completions = list(completer.get_completions(document, complete_event))
assert [] == completions
def test_next_phrase_word_is_suggested_for_space():
completer = PhraseWordCompleter('add genesis transaction')
document = Document('add ')
complete_event = CompleteEvent(text_inserted=True)
completions = list(completer.get_completions(document, complete_event))
assert [Completion('genesis')] == completions
def test_next_phrase_word_is_suggested_for_space_typed_inside_input():
completer = PhraseWordCompleter('add genesis transaction')
document = Document('add transaction', 4)
complete_event = CompleteEvent(text_inserted=True)
completions = list(completer.get_completions(document, complete_event))
assert [Completion('genesis')] == completions
def test_next_phrase_word_is_suggested_for_its_prefix_typed_inside_input():
completer = PhraseWordCompleter('add genesis transaction')
document = Document('add gtransaction', 5)
complete_event = CompleteEvent(text_inserted=True)
completions = list(completer.get_completions(document, complete_event))
assert [Completion('genesis', -1)] == completions
def test_typed_word_is_suggested_for_itself_if_it_is_next_phrase_word():
completer = PhraseWordCompleter('add genesis transaction')
document = Document('add genesis transaction')
complete_event = CompleteEvent(text_inserted=True)
completions = list(completer.get_completions(document, complete_event))
assert [Completion('transaction', -11)] == completions
def test_first_phrase_word_is_suggested_for_empty_input():
completer = PhraseWordCompleter('add genesis transaction')
document = Document('')
complete_event = CompleteEvent(completion_requested=True)
completions = list(completer.get_completions(document, complete_event))
assert [Completion('add')] == completions
def test_none_is_suggested_for_space_after_all_phrase():
completer = PhraseWordCompleter('add genesis transaction')
document = Document('add genesis transaction ')
complete_event = CompleteEvent(completion_requested=True)
completions = list(completer.get_completions(document, complete_event))
assert [] == completions
def test_none_is_suggested_for_word_typed_after_all_phrase():
completer = PhraseWordCompleter('add genesis transaction')
document = Document('add genesis transaction new')
complete_event = CompleteEvent(completion_requested=True)
completions = list(completer.get_completions(document, complete_event))
assert [] == completions
def test_none_is_suggested_for_any_input_after_all_phrase_and_space():
completer = PhraseWordCompleter('add genesis transaction')
document = Document('add genesis transaction new tr')
complete_event = CompleteEvent(completion_requested=True)
completions = list(completer.get_completions(document, complete_event))
assert [] == completions
def test_first_phrase_word_is_suggested_for_only_space_typed():
completer = PhraseWordCompleter('add genesis transaction')
document = Document(' ')
complete_event = CompleteEvent(text_inserted=True)
completions = list(completer.get_completions(document, complete_event))
assert [Completion('add')] == completions
def test_next_phrase_word_is_suggested_for_redundant_space():
completer = PhraseWordCompleter('add genesis transaction')
document = Document(' add genesis ')
complete_event = CompleteEvent(text_inserted=True)
completions = list(completer.get_completions(document, complete_event))
assert [Completion('transaction')] == completions
| 35.464
| 78
| 0.781863
| 485
| 4,433
| 6.8
| 0.125773
| 0.094603
| 0.10188
| 0.138266
| 0.887204
| 0.881747
| 0.881747
| 0.865373
| 0.853244
| 0.754093
| 0
| 0.00156
| 0.13219
| 4,433
| 124
| 79
| 35.75
| 0.855732
| 0
| 0
| 0.64
| 0
| 0
| 0.116174
| 0
| 0
| 0
| 0
| 0
| 0.16
| 1
| 0.16
| false
| 0
| 0.04
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
70cd95bc30b9bfcb1a5920edb8befabb92f263fd
| 32
|
py
|
Python
|
kikimr/public/sdk/python/client/table.py
|
yandex-cloud/ydb-python-sdk
|
0df2dce2d77fc41ad3020072740f51dd91630177
|
[
"Apache-2.0"
] | 19
|
2019-07-01T08:25:29.000Z
|
2022-01-26T14:46:51.000Z
|
kikimr/public/sdk/python/client/table.py
|
yandex-cloud/ydb-python-sdk
|
0df2dce2d77fc41ad3020072740f51dd91630177
|
[
"Apache-2.0"
] | 5
|
2019-07-02T13:36:42.000Z
|
2021-09-14T06:46:48.000Z
|
kikimr/public/sdk/python/client/table.py
|
yandex-cloud/ydb-python-sdk
|
0df2dce2d77fc41ad3020072740f51dd91630177
|
[
"Apache-2.0"
] | 10
|
2019-06-07T10:36:19.000Z
|
2021-10-15T08:58:11.000Z
|
from ydb.table import * # noqa
| 16
| 31
| 0.6875
| 5
| 32
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 32
| 1
| 32
| 32
| 0.88
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cb19f3adc8f4c7f8852e263dd1aeb0c00be61f1a
| 140
|
py
|
Python
|
lpp_index/views.py
|
uehara1414/lang-processing-play
|
5d7a48bda3671250607c94d9008a9606c99512d0
|
[
"MIT"
] | null | null | null |
lpp_index/views.py
|
uehara1414/lang-processing-play
|
5d7a48bda3671250607c94d9008a9606c99512d0
|
[
"MIT"
] | null | null | null |
lpp_index/views.py
|
uehara1414/lang-processing-play
|
5d7a48bda3671250607c94d9008a9606c99512d0
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import JsonResponse
def index(reqeust):
return JsonResponse({"Hello": "World!"})
| 20
| 44
| 0.757143
| 17
| 140
| 6.235294
| 0.764706
| 0.188679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135714
| 140
| 6
| 45
| 23.333333
| 0.876033
| 0
| 0
| 0
| 0
| 0
| 0.078571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
cb1c70e77b8b15996a88690033e441699a5f58c1
| 155
|
py
|
Python
|
nobleweb/www/about.py
|
finbyz/nobleweb
|
397df5084edf52c4824504c0908c8692b61d99c5
|
[
"MIT"
] | null | null | null |
nobleweb/www/about.py
|
finbyz/nobleweb
|
397df5084edf52c4824504c0908c8692b61d99c5
|
[
"MIT"
] | null | null | null |
nobleweb/www/about.py
|
finbyz/nobleweb
|
397df5084edf52c4824504c0908c8692b61d99c5
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import frappe
def get_context(context):
return { "doc": frappe.get_doc("About Us Settings","About Us Settings")}
| 25.833333
| 73
| 0.780645
| 22
| 155
| 5.181818
| 0.636364
| 0.122807
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116129
| 155
| 6
| 73
| 25.833333
| 0.832117
| 0
| 0
| 0
| 0
| 0
| 0.23871
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
cb3210d72c34b3041bf34aa5b2421fce176d2718
| 244
|
py
|
Python
|
transurlvania/views.py
|
trapeze/transurlvania
|
3ee16d52ea44d086dc873fb184e95194ace50403
|
[
"BSD-3-Clause"
] | 15
|
2015-01-07T14:59:18.000Z
|
2020-01-29T21:48:45.000Z
|
transurlvania/views.py
|
trapeze/transurlvania
|
3ee16d52ea44d086dc873fb184e95194ace50403
|
[
"BSD-3-Clause"
] | 1
|
2015-02-02T19:39:10.000Z
|
2015-02-02T21:45:22.000Z
|
transurlvania/views.py
|
trapeze/transurlvania
|
3ee16d52ea44d086dc873fb184e95194ace50403
|
[
"BSD-3-Clause"
] | 2
|
2015-03-20T20:43:37.000Z
|
2017-08-01T15:30:03.000Z
|
from django.http import HttpResponseRedirect
from django.utils.translation import get_language_from_request
def detect_language_and_redirect(request):
return HttpResponseRedirect(
'/%s/' % get_language_from_request(request)
)
| 27.111111
| 62
| 0.795082
| 28
| 244
| 6.607143
| 0.571429
| 0.108108
| 0.162162
| 0.237838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139344
| 244
| 8
| 63
| 30.5
| 0.880952
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
cb77ba056a500fa5dbd53ddf9bde455a670a9707
| 12,982
|
py
|
Python
|
simple_salesforce/tests/test_aio/test_bulk.py
|
MulliganFunding/simple-salesforce
|
6d43d252683e688eb50faab46c6030afc0aa9838
|
[
"Apache-2.0"
] | null | null | null |
simple_salesforce/tests/test_aio/test_bulk.py
|
MulliganFunding/simple-salesforce
|
6d43d252683e688eb50faab46c6030afc0aa9838
|
[
"Apache-2.0"
] | null | null | null |
simple_salesforce/tests/test_aio/test_bulk.py
|
MulliganFunding/simple-salesforce
|
6d43d252683e688eb50faab46c6030afc0aa9838
|
[
"Apache-2.0"
] | null | null | null |
"""Test for bulk.py"""
import copy
import json
from unittest import mock
import httpx
import pytest
from simple_salesforce.exceptions import SalesforceGeneralError
def test_bulk_handler(sf_client, constants):
"""Test that BulkHandler Loads Properly"""
bulk_handler = sf_client.bulk
assert bulk_handler.session_id == sf_client.session_id
assert bulk_handler.bulk_url == sf_client.bulk_url
assert constants["BULK_HEADERS"] == bulk_handler.headers
def test_bulk_type(sf_client, constants):
"""Test bulk type creation"""
contact = sf_client.bulk.Contact
assert contact.bulk_url == sf_client.bulk_url
assert constants["BULK_HEADERS"] == contact.headers
assert "Contact" == contact.object_name
EXPECTED_RESULT = [
{"success": True, "created": True, "id": "001xx000003DHP0AAO", "errors": []},
{"success": True, "created": True, "id": "001xx000003DHP1AAO", "errors": []},
]
EXPECTED_QUERY = [
{
"Id": "001xx000003DHP0AAO",
"AccountId": "ID-13",
"Email": "contact1@example.com",
"FirstName": "Bob",
"LastName": "x",
},
{
"Id": "001xx000003DHP1AAO",
"AccountId": "ID-24",
"Email": "contact2@example.com",
"FirstName": "Alice",
"LastName": "y",
},
{
"Id": "001xx000003DHP0AAO",
"AccountId": "ID-13",
"Email": "contact1@example.com",
"FirstName": "Bob",
"LastName": "x",
},
{
"Id": "001xx000003DHP1AAO",
"AccountId": "ID-24",
"Email": "contact2@example.com",
"FirstName": "Alice",
"LastName": "y",
},
]
@pytest.mark.asyncio
@pytest.mark.parametrize(
"operation,method_name",
(
("delete", "delete"),
("insert", "insert"),
("update", "update"),
("hardDelete", "hard_delete"),
),
)
async def test_insert(operation, method_name, sf_client, mock_httpx_client):
"""Test bulk insert records"""
_, mock_client, _ = mock_httpx_client
body1 = {
"apiVersion": 42.0,
"concurrencyMode": "Parallel",
"contentType": "JSON",
"id": "Job-1",
"object": "Contact",
"operation": operation,
"state": "Open",
}
body2 = {"id": "Batch-1", "jobId": "Job-1", "state": "Queued"}
body3 = copy.deepcopy(body1)
body3["state"] = "Closed"
body4 = copy.deepcopy(body2)
body4["state"] = "InProgress"
body5 = copy.deepcopy(body2)
body5["state"] = "Completed"
body6 = [
{"success": True, "created": True, "id": "001xx000003DHP0AAO", "errors": []},
{"success": True, "created": True, "id": "001xx000003DHP1AAO", "errors": []},
]
body7 = {}
all_bodies = [body1, body2, body3, body4, body5, body6, body7]
responses = [httpx.Response(200, content=json.dumps(body)) for body in all_bodies]
mock_client.request.side_effect = mock.AsyncMock(side_effect=responses)
data = [
{
"AccountId": "ID-1",
"Email": "contact1@example.com",
"FirstName": "Bob",
"LastName": "x",
},
{
"AccountId": "ID-2",
"Email": "contact2@example.com",
"FirstName": "Alice",
"LastName": "y",
},
]
function = getattr(sf_client.bulk.Contact, method_name)
result = await function(data, wait=0.1)
assert EXPECTED_RESULT == result
@pytest.mark.asyncio
async def test_upsert(sf_client, mock_httpx_client):
"""Test bulk upsert records"""
_, mock_client, _ = mock_httpx_client
operation = "delete"
body1 = {
"apiVersion": 42.0,
"concurrencyMode": "Parallel",
"contentType": "JSON",
"id": "Job-1",
"object": "Contact",
"operation": operation,
"state": "Open",
}
body2 = {"id": "Batch-1", "jobId": "Job-1", "state": "Queued"}
body3 = copy.deepcopy(body1)
body3["state"] = "Closed"
body4 = copy.deepcopy(body2)
body4["state"] = "InProgress"
body5 = copy.deepcopy(body2)
body5["state"] = "Completed"
body6 = [
{"success": True, "created": True, "id": "001xx000003DHP0AAO", "errors": []},
{"success": True, "created": True, "id": "001xx000003DHP1AAO", "errors": []},
]
body7 = {}
all_bodies = [body1, body2, body3, body4, body5, body6, body7]
responses = [httpx.Response(200, content=json.dumps(body)) for body in all_bodies]
mock_client.request.side_effect = mock.AsyncMock(side_effect=responses)
data = [{"id": "ID-1"}, {"id": "ID-2"}]
result = await sf_client.bulk.Contact.upsert(data, "some-field", wait=0.1)
assert EXPECTED_RESULT == result
@pytest.mark.asyncio
async def test_query(mock_httpx_client, sf_client):
"""Test bulk query"""
_, mock_client, _ = mock_httpx_client
operation = "query"
body1 = {
"apiVersion": 42.0,
"concurrencyMode": "Parallel",
"contentType": "JSON",
"id": "Job-1",
"object": "Contact",
"operation": operation,
"state": "Open",
}
body2 = {"id": "Batch-1", "jobId": "Job-1", "state": "Queued"}
body3 = copy.deepcopy(body1)
body3["state"] = "Closed"
body4 = copy.deepcopy(body2)
body4["state"] = "InProgress"
body5 = copy.deepcopy(body2)
body5["state"] = "Completed"
body6 = ["752x000000000F1", "752x000000000F2"]
body7 = [
{
"Id": "001xx000003DHP0AAO",
"AccountId": "ID-13",
"Email": "contact1@example.com",
"FirstName": "Bob",
"LastName": "x",
},
{
"Id": "001xx000003DHP1AAO",
"AccountId": "ID-24",
"Email": "contact2@example.com",
"FirstName": "Alice",
"LastName": "y",
},
]
body8 = [
{
"Id": "001xx000003DHP0AAO",
"AccountId": "ID-13",
"Email": "contact1@example.com",
"FirstName": "Bob",
"LastName": "x",
},
{
"Id": "001xx000003DHP1AAO",
"AccountId": "ID-24",
"Email": "contact2@example.com",
"FirstName": "Alice",
"LastName": "y",
},
]
all_bodies = [body1, body2, body3, body4, body5, body6, body7, body8]
responses = [httpx.Response(200, content=json.dumps(body)) for body in all_bodies]
mock_client.request.side_effect = mock.AsyncMock(side_effect=responses)
data = "SELECT Id,AccountId,Email,FirstName,LastName FROM Contact"
result = await sf_client.bulk.Contact.query(data, wait=0.1, lazy_operation=False)
assert body7[0] in result
assert body7[1] in result
assert body8[0] in result
assert body8[1] in result
@pytest.mark.asyncio
async def test_query_all(mock_httpx_client, sf_client):
"""Test bulk query all"""
_, mock_client, _ = mock_httpx_client
operation = "queryAll"
body1 = {
"apiVersion": 42.0,
"concurrencyMode": "Parallel",
"contentType": "JSON",
"id": "Job-1",
"object": "Contact",
"operation": operation,
"state": "Open",
}
body2 = {"id": "Batch-1", "jobId": "Job-1", "state": "Queued"}
body3 = copy.deepcopy(body1)
body3["state"] = "Closed"
body4 = copy.deepcopy(body2)
body4["state"] = "InProgress"
body5 = copy.deepcopy(body2)
body5["state"] = "Completed"
body6 = ["752x000000000F1", "752x000000000F2"]
body7 = [
{
"Id": "001xx000003DHP0AAO",
"AccountId": "ID-13",
"Email": "contact1@example.com",
"FirstName": "Bob",
"LastName": "x",
},
{
"Id": "001xx000003DHP1AAO",
"AccountId": "ID-24",
"Email": "contact2@example.com",
"FirstName": "Alice",
"LastName": "y",
},
]
body8 = [
{
"Id": "001xx000003DHP0AAO",
"AccountId": "ID-13",
"Email": "contact1@example.com",
"FirstName": "Bob",
"LastName": "x",
},
{
"Id": "001xx000003DHP1AAO",
"AccountId": "ID-24",
"Email": "contact2@example.com",
"FirstName": "Alice",
"LastName": "y",
},
]
all_bodies = [body1, body2, body3, body4, body5, body6, body7, body8]
responses = [httpx.Response(200, content=json.dumps(body)) for body in all_bodies]
mock_client.request.side_effect = mock.AsyncMock(side_effect=responses)
data = "SELECT Id,AccountId,Email,FirstName,LastName FROM Contact"
result = await sf_client.bulk.Contact.query_all(
data, wait=0.1, lazy_operation=False
)
assert body7[0] in result
assert body7[1] in result
assert body8[0] in result
assert body8[1] in result
@pytest.mark.asyncio
async def test_query_lazy(mock_httpx_client, sf_client):
"""Test lazy bulk query"""
_, mock_client, _ = mock_httpx_client
operation = "queryAll"
body1 = {
"apiVersion": 42.0,
"concurrencyMode": "Parallel",
"contentType": "JSON",
"id": "Job-1",
"object": "Contact",
"operation": operation,
"state": "Open",
}
body2 = {"id": "Batch-1", "jobId": "Job-1", "state": "Queued"}
body3 = copy.deepcopy(body1)
body3["state"] = "Closed"
body4 = copy.deepcopy(body2)
body4["state"] = "InProgress"
body5 = copy.deepcopy(body2)
body5["state"] = "Completed"
body6 = ["752x000000000F1", "752x000000000F2"]
body7 = [
{
"Id": "001xx000003DHP0AAO",
"AccountId": "ID-13",
"Email": "contact1@example.com",
"FirstName": "Bob",
"LastName": "x",
},
{
"Id": "001xx000003DHP1AAO",
"AccountId": "ID-24",
"Email": "contact2@example.com",
"FirstName": "Alice",
"LastName": "y",
},
]
body8 = [
{
"Id": "001xx000003DHP0AAO",
"AccountId": "ID-15",
"Email": "contact1@example.com",
"FirstName": "Bob",
"LastName": "x",
},
{
"Id": "001xx000003DHP1AAO",
"AccountId": "ID-26",
"Email": "contact2@example.com",
"FirstName": "Alice",
"LastName": "y",
},
]
all_bodies = [body1, body2, body3, body4, body5, body6, body7, body8]
responses = [httpx.Response(200, content=json.dumps(body)) for body in all_bodies]
mock_client.request.side_effect = mock.AsyncMock(side_effect=responses)
data = "SELECT Id,AccountId,Email,FirstName,LastName FROM Contact"
result = await sf_client.bulk.Contact.query_all(data, wait=0.1, lazy_operation=True)
assert body7[0] in result[0]
assert body7[1] in result[0]
assert body8[0] in result[1]
assert body8[1] in result[1]
# [[{'Id': '001xx000003DHP0AAO', 'AccountId': 'ID-13',
# 'Email': 'contact1@example.com', 'FirstName': 'Bob',
# 'LastName': 'x'}, {'Id': '001xx000003DHP1AAO',
# 'AccountId': 'ID-24', 'Email': 'contact2@example.com',
# 'FirstName': 'Alice', 'LastName': 'y'}],
# [{'Id': '001xx000003DHP0AAO', 'AccountId': 'ID-13',
# 'Email': 'contact1@example.com', 'FirstName': 'Bob',
# 'LastName': 'x'}, {'Id': '001xx000003DHP1AAO',
# 'AccountId': 'ID-24', 'Email': 'contact2@example.com',
# 'FirstName': 'Alice', 'LastName': 'y'}]]
@pytest.mark.asyncio
async def test_query_fail(mock_httpx_client, sf_client):
"""Test bulk query records failure"""
_, mock_client, _ = mock_httpx_client
operation = "query"
body1 = {
"apiVersion": 42.0,
"concurrencyMode": "Parallel",
"contentType": "JSON",
"id": "Job-1",
"object": "Contact",
"operation": operation,
"state": "Open",
}
body2 = {"id": "Batch-1", "jobId": "Job-1", "state": "Queued"}
body3 = {
"apiVersion": 42.0,
"concurrencyMode": "Parallel",
"contentType": "JSON",
"id": "Job-1",
"object": "Contact",
"operation": operation,
"state": "Closed",
}
body4 = {"id": "Batch-1", "jobId": "Job-1", "state": "InProgress"}
body5 = {
"id": "Batch-1",
"jobId": "Job-1",
"state": "Failed",
"stateMessage": "InvalidBatch : Failed to process query",
}
all_bodies = [body1, body2, body3, body4, body5]
responses = [httpx.Response(200, content=json.dumps(body)) for body in all_bodies]
mock_client.request.side_effect = mock.AsyncMock(side_effect=responses)
data = "SELECT ASDFASfgsds FROM Contact"
with pytest.raises(SalesforceGeneralError) as exc:
await sf_client.bulk.Contact.query(data, wait=0.1)
assert exc.status == body5["state"]
assert exc.resource_name == body5["jobId"]
assert exc.content == body5["stateMessage"]
| 32.293532
| 88
| 0.559082
| 1,314
| 12,982
| 5.423135
| 0.105784
| 0.03396
| 0.058658
| 0.035504
| 0.858687
| 0.846057
| 0.827814
| 0.805782
| 0.776031
| 0.776031
| 0
| 0.066145
| 0.272146
| 12,982
| 401
| 89
| 32.374065
| 0.688009
| 0.044292
| 0
| 0.66205
| 0
| 0
| 0.264554
| 0.010808
| 0
| 0
| 0
| 0
| 0.063712
| 1
| 0.00554
| false
| 0
| 0.016621
| 0
| 0.022161
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cb808e04dd2f841b591d9238c57e86b8e54c861a
| 126
|
py
|
Python
|
GUI/Frames/__init__.py
|
LamWS/ArknightsAutoHelper
|
7e3231aceaa23728851e90ba1e8937d9b7dabb35
|
[
"MIT"
] | 2
|
2021-07-14T04:03:57.000Z
|
2022-03-17T03:23:19.000Z
|
GUI/Frames/__init__.py
|
AlvISsReimu/ArknightsAutoHelper
|
7112b73c01fe381b20314342ba0dfa2f7e01805d
|
[
"MIT"
] | 1
|
2019-09-10T13:58:24.000Z
|
2019-09-10T13:58:24.000Z
|
GUI/Frames/__init__.py
|
AlaricGilbert/ArknightsAutoHelper
|
9e2db0c4e0d1be30856df731ab192da396121d94
|
[
"MIT"
] | null | null | null |
from GUI.Frames.Index import Index
from GUI.Frames.Dialog import MessageDialog_OK, MessageDialog_CANCEL, MessageDialog_Yes_No
| 42
| 90
| 0.873016
| 18
| 126
| 5.888889
| 0.611111
| 0.132075
| 0.245283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079365
| 126
| 2
| 91
| 63
| 0.913793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cba67531bd2aff80ddcf46ddcdf5853a6455dc49
| 39
|
py
|
Python
|
crud operations/11.py
|
ramadevim/Crud-Operations
|
ac89701c1cdefe088fb165b90c6f2629615e43da
|
[
"MIT"
] | null | null | null |
crud operations/11.py
|
ramadevim/Crud-Operations
|
ac89701c1cdefe088fb165b90c6f2629615e43da
|
[
"MIT"
] | null | null | null |
crud operations/11.py
|
ramadevim/Crud-Operations
|
ac89701c1cdefe088fb165b90c6f2629615e43da
|
[
"MIT"
] | null | null | null |
import random
print (random.randint(4))
| 19.5
| 25
| 0.794872
| 6
| 39
| 5.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.076923
| 39
| 2
| 25
| 19.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
cbbd0ef0fb806c2c20f14a8d503ace8c852aab38
| 6,044
|
py
|
Python
|
archipelago/src/lut_tile/config/sweep_gen.py
|
haojunliu/OpenFPGA
|
b0c4f27077f698aae59bbcbd3ca002f22ba2a5a1
|
[
"BSD-2-Clause"
] | 31
|
2016-02-15T02:57:28.000Z
|
2021-06-02T10:40:25.000Z
|
archipelago/src/lut_tile/config/sweep_gen.py
|
haojunliu/OpenFPGA
|
b0c4f27077f698aae59bbcbd3ca002f22ba2a5a1
|
[
"BSD-2-Clause"
] | null | null | null |
archipelago/src/lut_tile/config/sweep_gen.py
|
haojunliu/OpenFPGA
|
b0c4f27077f698aae59bbcbd3ca002f22ba2a5a1
|
[
"BSD-2-Clause"
] | 6
|
2017-02-08T21:51:51.000Z
|
2021-06-02T10:40:40.000Z
|
import sys, os, math
def main(argv=None):
for lut_size in range (4, 6+1):
for clb_size in range (4, 10+1):
for ipin_w in [8, 12, 16]:
for chanxy_w in [8, 12, 16]:
for chanxy_num in [80, 120, 160]:
config_name = str(lut_size) + '_' + str(clb_size) + '_' + str(ipin_w) + '_' + str(chanxy_w) + '_' + str(chanxy_num)
gen_config_file(config_name, lut_size, clb_size, ipin_w, chanxy_w, chanxy_num)
def gen_config_file(config_name, lut_size, clb_size, ipin_w, chanxy_w, chanxy_num):
config_file_name = 'lut_tile_config_' + config_name
config_fp = open (config_file_name, 'w')
line_to_print = 'package fpga_components\n'
config_fp.write(line_to_print)
line_to_print = '{\n'
config_fp.write(line_to_print)
line_to_print = '\n'
config_fp.write(line_to_print)
line_to_print = 'import Chisel._\nimport scala.math._\n\nobject LutConstants {\n'
config_fp.write(line_to_print)
line_to_print = ' // LUT CONFIG\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_INPUTS_PER_LUT = ' + str(lut_size) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_OUTPUTS_PER_LUT = ' + str(1) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_CONFIGS_PER_LUT = ' + str(1<<lut_size) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_MUXES_PER_LUT = ' + str(1) + '\n'
config_fp.write(line_to_print)
line_to_print = ' // CLB CONFIG\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_CLB_IN = ' + str(lut_size*clb_size) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_CLB_OUT = ' + str(clb_size) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_LUTS_PER_CLB = ' + str(clb_size) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_CLB_LUT_CONFIGS = ' + str((1<<lut_size)*clb_size) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_CLB_MUX_CONFIGS = ' + str(clb_size) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_TOTAL_MUX_CONFIGS = ' + str(clb_size) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_IPIN_PER_TILE = ' + str(int(lut_size*clb_size*0.5)+3) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_OPIN_PER_TILE = ' + str(clb_size) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_XBAR_INPUTS = ' + str(int(lut_size*clb_size*0.5)+3+clb_size) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_XBAR_OUTPUTS = ' + str(lut_size*clb_size) + '\n'
config_fp.write(line_to_print)
per_mux_config = int(math.floor(math.log((int(lut_size*clb_size*0.5)+3+clb_size)- 0.000001, 2) + 1))
line_to_print = ' var VAR_NUM_XBAR_PER_MUX_CONFIGS = ' + str(per_mux_config) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_XBAR_CONFIGS = ' + str(per_mux_config*lut_size*clb_size) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_TOTAL_XBAR_CONFIGS = VAR_NUM_XBAR_CONFIGS\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_IPIN_INPUT_WIDTH = ' + str(ipin_w) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_IPIN_CONFIG_WIDTH = ' + str(int(math.floor(math.log(ipin_w - 0.00001, 2) + 1))) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_NUM_CHANXY_PER_TILE = ' + str(int(chanxy_num*0.5)) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_CHANXY_INPUT_WIDTH = ' + str(chanxy_w) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_CHANXY_CONFIG_WIDTH = ' + str(int(math.floor(math.log(chanxy_w - 0.00001, 2) + 1))) + '\n'
config_fp.write(line_to_print)
line_to_print = ' var VAR_LUT_CONFIGS_START = 0\n'
line_to_print = line_to_print + ' var VAR_LUT_CONFIGS_END = VAR_LUT_CONFIGS_START + VAR_NUM_CLB_LUT_CONFIGS - 1\n'
line_to_print = line_to_print + ' var VAR_MUX_CONFIGS_START = VAR_NUM_CLB_LUT_CONFIGS\n'
line_to_print = line_to_print + ' var VAR_MUX_CONFIGS_END = VAR_MUX_CONFIGS_START + VAR_TOTAL_MUX_CONFIGS - 1\n'
line_to_print = line_to_print + ' var VAR_XBAR_CONFIGS_START = VAR_MUX_CONFIGS_END + 1\n'
line_to_print = line_to_print + ' var VAR_XBAR_CONFIGS_END = VAR_XBAR_CONFIGS_START + VAR_TOTAL_XBAR_CONFIGS - 1\n'
num_ipin_config = (int(lut_size*clb_size*0.5)+3)*(int(math.floor(math.log(ipin_w - 0.00001, 2) + 1)))
line_to_print = line_to_print + ' var VAR_TOTAL_IPIN_CONFIGS = ' + str(num_ipin_config) + '\n'
line_to_print = line_to_print + ' var VAR_SBCB_IPIN_CONFIGS_START = VAR_XBAR_CONFIGS_END + 1\n'
line_to_print = line_to_print + ' var VAR_SBCB_IPIN_CONFIGS_END = VAR_SBCB_IPIN_CONFIGS_START + VAR_TOTAL_IPIN_CONFIGS - 1\n'
num_chanxy_config = int(chanxy_num*0.5)*(int(math.floor(math.log(chanxy_w - 0.00001, 2) + 1)))
line_to_print = line_to_print + ' var VAR_TOTAL_CHANXY_CONFIGS = ' + str(num_chanxy_config) + '\n'
line_to_print = line_to_print + ' var VAR_SBCB_CHANXY_CONFIGS_START = VAR_SBCB_IPIN_CONFIGS_END + 1\n'
line_to_print = line_to_print + ' var VAR_SBCB_CHANXY_CONFIGS_END = VAR_SBCB_CHANXY_CONFIGS_START + VAR_TOTAL_CHANXY_CONFIGS - 1\n'
total_num_config = (1<<lut_size)*clb_size + clb_size + per_mux_config*lut_size*clb_size + num_ipin_config + num_chanxy_config
line_to_print = line_to_print + ' var VAR_TOTAL_NUM_CONFIGS = ' + str(total_num_config) + '\n'
level_of_config_depth = 1 + total_num_config/32
line_to_print = line_to_print + ' var VAR_CONFIGS_FILE_DEPTH = ' + str(level_of_config_depth) + '\n'
config_fp.write(line_to_print)
line_to_print = '}\n\n}\n'
config_fp.write(line_to_print)
config_fp.close()
if __name__ == "__main__":
sys.exit(main())
| 55.449541
| 139
| 0.688451
| 1,017
| 6,044
| 3.60177
| 0.080629
| 0.140868
| 0.258258
| 0.167895
| 0.765493
| 0.720994
| 0.701884
| 0.655747
| 0.634999
| 0.618619
| 0
| 0.020287
| 0.192588
| 6,044
| 108
| 140
| 55.962963
| 0.730328
| 0
| 0
| 0.322581
| 0
| 0
| 0.298974
| 0.152879
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021505
| false
| 0
| 0.021505
| 0
| 0.043011
| 0.784946
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
cbd4bf08f4c4d54885cd6032677d54da6bf081a5
| 163
|
py
|
Python
|
pytorch_mask_rcnn/__init__.py
|
yokosyun/instance-segmentation
|
5779ae864b24c28300b0ddc4c314e63490215606
|
[
"MIT"
] | null | null | null |
pytorch_mask_rcnn/__init__.py
|
yokosyun/instance-segmentation
|
5779ae864b24c28300b0ddc4c314e63490215606
|
[
"MIT"
] | null | null | null |
pytorch_mask_rcnn/__init__.py
|
yokosyun/instance-segmentation
|
5779ae864b24c28300b0ddc4c314e63490215606
|
[
"MIT"
] | null | null | null |
from .model import maskrcnn_resnet50
from .datasets import *
from .engine import train_one_epoch
try:
from .visualize import show
except ImportError:
pass
| 20.375
| 36
| 0.785276
| 22
| 163
| 5.681818
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014815
| 0.171779
| 163
| 8
| 37
| 20.375
| 0.911111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.142857
| 0.714286
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
cbdb22545e68620f931b9d5f6f61a973f5b4bd93
| 40
|
py
|
Python
|
python/cendalytics/wikipedia/ingest/bp/__init__.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/cendalytics/wikipedia/ingest/bp/__init__.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/cendalytics/wikipedia/ingest/bp/__init__.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
from .wikipedia_api import WikipediaAPI
| 20
| 39
| 0.875
| 5
| 40
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
38341d165fa256f3909581a9d9e970a2d884cf2f
| 20
|
py
|
Python
|
evemansys/backend/helpers.py
|
uktrade/evemansys
|
43a1f034a55a4b9dc9594d13eb7a1a530efeb479
|
[
"MIT"
] | null | null | null |
evemansys/backend/helpers.py
|
uktrade/evemansys
|
43a1f034a55a4b9dc9594d13eb7a1a530efeb479
|
[
"MIT"
] | null | null | null |
evemansys/backend/helpers.py
|
uktrade/evemansys
|
43a1f034a55a4b9dc9594d13eb7a1a530efeb479
|
[
"MIT"
] | null | null | null |
import functools
| 4
| 16
| 0.75
| 2
| 20
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 20
| 4
| 17
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3848e7470c0b214c206fc6d2030f9d8011818f78
| 191
|
py
|
Python
|
dlnlputils/data/__init__.py
|
Rojanson/stepik-dl-nlp
|
e32748fbccd0868a8e4a131e4749188935ed524f
|
[
"MIT"
] | null | null | null |
dlnlputils/data/__init__.py
|
Rojanson/stepik-dl-nlp
|
e32748fbccd0868a8e4a131e4749188935ed524f
|
[
"MIT"
] | null | null | null |
dlnlputils/data/__init__.py
|
Rojanson/stepik-dl-nlp
|
e32748fbccd0868a8e4a131e4749188935ed524f
|
[
"MIT"
] | null | null | null |
from .base import *
from .bag_of_words import *
from .embeddings import *
from .nnets import *
from .pos import *
from .lemmatize import *
from .poetry import *
from .ngrams_handler import *
| 21.222222
| 29
| 0.748691
| 27
| 191
| 5.185185
| 0.481481
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167539
| 191
| 8
| 30
| 23.875
| 0.880503
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
69989f82068adbed0ab73ca99f23d2142d9760c4
| 34,662
|
py
|
Python
|
datasets/google_dei/pipelines/diversity_annual_report/diversity_annual_report_dag.py
|
gkodukula/public-datasets-pipelines
|
4f4c87edae252059062ba479b80559e7675a885f
|
[
"Apache-2.0"
] | null | null | null |
datasets/google_dei/pipelines/diversity_annual_report/diversity_annual_report_dag.py
|
gkodukula/public-datasets-pipelines
|
4f4c87edae252059062ba479b80559e7675a885f
|
[
"Apache-2.0"
] | null | null | null |
datasets/google_dei/pipelines/diversity_annual_report/diversity_annual_report_dag.py
|
gkodukula/public-datasets-pipelines
|
4f4c87edae252059062ba479b80559e7675a885f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-05-01",
}
with DAG(
dag_id="google_dei.diversity_annual_report",
default_args=default_args,
max_active_runs=1,
schedule_interval="@once",
catchup=False,
default_view="graph",
) as dag:
# Task to load CSV data to a BigQuery table
load_intersectional_attrition_index_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_intersectional_attrition_index_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/intersectional_attrition_index.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_intersectional_attrition_index",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "gender_us",
"description": "Gender of Googler exits in the U.S.",
"type": "string",
"mode": "required",
},
{
"name": "race_asian",
"description": "The attrition index score of Googlers in the U.S. who identify as Asian and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The attrition index score of Googlers in the U.S. who identify as Black and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The attrition index score of Googlers in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The attrition index score of Googlers in the U.S. who identify as Native American and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The attrition index score of Googlers in the U.S. who identify as White and zero or more other races",
"type": "integer",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_intersectional_hiring_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_intersectional_hiring_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/intersectional_hiring.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_intersectional_hiring",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "gender_us",
"description": "Gender of Googlers hired in the U.S.",
"type": "string",
"mode": "required",
},
{
"name": "race_asian",
"description": "The percentage of Googlers hired in the U.S. who identify as Asian and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The percentage of Googlers hired in the U.S. who identify as Black and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The percentage of Googlers hired in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The percentage of Googlers hired in the U.S. who identify as Native American and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The percentage of Googlers hired in the U.S. who identify as White and zero or more other races",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_intersectional_representation_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_intersectional_representation_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/intersectional_representation.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_intersectional_representation",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "gender_us",
"description": "Gender of Googlers in the U.S.",
"type": "string",
"mode": "required",
},
{
"name": "race_asian",
"description": "The percentage of Googlers in the U.S. who identify as Asian and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The percentage of Googlers in the U.S. who identify as Black and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The percentage of Googlers in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The percentage of Googlers in the U.S. who identify as Native American and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The percentage of Googlers in the U.S. who identify as White and zero or more other races",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_intersectional_exits_representation_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_intersectional_exits_representation_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/intersectional_exits_representation.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_intersectional_exits_representation",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "gender_us",
"description": "Gender of Googler exits in the U.S.",
"type": "string",
"mode": "required",
},
{
"name": "race_asian",
"description": "The percentage of Googler exits in the U.S. who identify as Asian and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The percentage of Googler exits in the U.S. who identify as Black and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The percentage of Googler exits in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The percentage of Googler exits in the U.S. who identify as Native American and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The percentage of Googler exits in the U.S. who identify as White and zero or more other races",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_non_intersectional_representation_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_non_intersectional_representation_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/non_intersectional_representation.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_non_intersectional_representation",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "race_asian",
"description": "The percentage of Googlers in the U.S. who identify as Asian and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The percentage of Googlers in the U.S. who identify as Black and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The percentage of Googlers in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The percentage of Googlers in the U.S. who identify as Native American and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The percentage of Googlers in the U.S. who identify as White and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_us_women",
"description": "The percentage of Googlers in the U.S. who identify as women",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_us_men",
"description": "The percentage of Googlers in the U.S. who identify as men",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_global_women",
"description": "The percentage of global Googlers who identify as women",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_global_men",
"description": "The percentage of global Googlers who identify as men",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_non_intersectional_exits_representation_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_non_intersectional_exits_representation_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/non_intersectional_exits_representation.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_non_intersectional_exits_representation",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "race_asian",
"description": "The percentage of Googler exits in the U.S. who identify as Asian and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The percentage of Googler exits in the U.S. who identify as Black and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The percentage of Googler exits in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The percentage of Googler exits in the U.S. who identify as Native American and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The percentage of Googler exits in the U.S. who identify as White and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_us_women",
"description": "The percentage of Googler exits in the U.S. who identify as women",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_us_men",
"description": "The percentage of Googler exits in the U.S. who identify as men",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_global_women",
"description": "The percentage of global Googler exits who identify as women",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_global_men",
"description": "The percentage of global Googler exits who identify as men",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_non_intersectional_attrition_index_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_non_intersectional_attrition_index_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/non_intersectional_attrition_index.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_non_intersectional_attrition_index",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "race_asian",
"description": "The attrition index score of Googlers in the U.S. who identify as Asian and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The attrition index score of Googlers in the U.S. who identify as Black and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The attrition index score of Googlers in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The attrition index score of Googlers in the U.S. who identify as Native American and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The attrition index score of Googlers in the U.S. who identify as White and zero or more other races",
"type": "integer",
"mode": "nullable",
},
{
"name": "gender_us_women",
"description": "The attrition index score of Googlers in the U.S. who are women",
"type": "integer",
"mode": "nullable",
},
{
"name": "gender_us_men",
"description": "The attrition index score of Googlers in the U.S. who are men",
"type": "integer",
"mode": "nullable",
},
{
"name": "gender_global_women",
"description": "The attrition index score of global Googlers who are women",
"type": "integer",
"mode": "nullable",
},
{
"name": "gender_global_men",
"description": "The attrition index score of global Googlers who are men",
"type": "integer",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_non_intersectional_hiring_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_non_intersectional_hiring_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/non_intersectional_hiring.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_non_intersectional_hiring",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "race_asian",
"description": "The percentage of Googlers hired in the U.S. who identify as Asian and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_black",
"description": "The percentage of Googlers hired in the U.S. who identify as Black and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_hispanic_latinx",
"description": "The percentage of Googlers hired in the U.S. who identify as Hispanic or Latinx and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_native_american",
"description": "The percentage of Googlers hired in the U.S. who identify as Native American and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_white",
"description": "The percentage of Googlers hired in the U.S. who identify as White and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_us_women",
"description": "The percentage of Googlers hired in the U.S. who are women",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_us_men",
"description": "The percentage of Googlers hired in the U.S. who are men",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_global_women",
"description": "The percentage of global Googlers hired who are women",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_global_men",
"description": "The percentage of global Googlers hired who are men",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_region_non_intersectional_attrition_index_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_region_non_intersectional_attrition_index_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/region_non_intersectional_attrition_index.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_region_non_intersectional_attrition_index",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "region",
"description": "Region",
"type": "string",
"mode": "required",
},
{
"name": "gender_women",
"description": "The attrition index score of Googlers in the region who are women",
"type": "integer",
"mode": "nullable",
},
{
"name": "gender_men",
"description": "The attrition index score of Googlers in the region who are men",
"type": "integer",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_region_non_intersectional_hiring_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_region_non_intersectional_hiring_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/region_non_intersectional_hiring.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_region_non_intersectional_hiring",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "region",
"description": "Region",
"type": "string",
"mode": "required",
},
{
"name": "gender_women",
"description": "The percentage of Googlers hired in the region who are women",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_men",
"description": "The percentage of Googlers hired in the region who are men",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_region_non_intersectional_representation_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_region_non_intersectional_representation_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/region_non_intersectional_representation.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_region_non_intersectional_representation",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "region",
"description": "Region",
"type": "string",
"mode": "required",
},
{
"name": "race_asian",
"description": "The percentage of Googlers in the region who identify as Asian and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_black_african",
"description": "The percentage of Googlers in the region who identify as Black African and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_hispanic_latino_latinx",
"description": "The percentage of Googlers in the region who identify as Hispanic, Latino, or Latinx and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_indigenous",
"description": "The percentage of Googlers in the region who identify as Indigenous and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_mena",
"description": "The percentage of Googlers in the region who identify as Middle Eastern or North African and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "race_white_european",
"description": "The percentage of Googlers in the region who identify as White or European and zero or more other races",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_women",
"description": "The percentage of Googlers in the region who are women",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_men",
"description": "The percentage of Googlers in the region who are men",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_region_non_intersectional_exits_representation_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_region_non_intersectional_exits_representation_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/region_non_intersectional_exits_representation.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_region_non_intersectional_exits_representation",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Overall and sub-categories",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "region",
"description": "Region",
"type": "string",
"mode": "required",
},
{
"name": "gender_women",
"description": "The percentage of Googler exits in the region who are women",
"type": "float",
"mode": "nullable",
},
{
"name": "gender_men",
"description": "The percentage of Googler exits in the region who are men",
"type": "float",
"mode": "nullable",
},
],
)
# Task to load CSV data to a BigQuery table
load_selfid_representation_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_selfid_representation_to_bq",
bucket="{{ var.json.google_dei.storage_bucket }}",
source_objects=["DAR/2022/selfid_representation.csv"],
source_format="CSV",
destination_project_dataset_table="google_dei.dar_selfid_representation",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "workforce",
"description": "Self-identification category. lgbtq: Googlers who self-identify as LGBQ+ and/or Trans+; disability: Googlers who self-identify as having a disability; military: Googlers who self-identify as being or having been members of the military; nonbinary: Googlers who self-identify as non-binary",
"type": "string",
"mode": "required",
},
{
"name": "report_year",
"description": "The year the report was published",
"type": "integer",
"mode": "required",
},
{
"name": "global",
"description": 'The percentage of global Googlers who identify as being part of the self-identification category (i.e., "workforce" type)',
"type": "float",
"mode": "nullable",
},
],
)
load_intersectional_attrition_index_to_bq
load_intersectional_hiring_to_bq
load_intersectional_representation_to_bq
load_intersectional_exits_representation_to_bq
load_non_intersectional_attrition_index_to_bq
load_non_intersectional_hiring_to_bq
load_non_intersectional_representation_to_bq
load_non_intersectional_exits_representation_to_bq
load_region_non_intersectional_attrition_index_to_bq
load_region_non_intersectional_hiring_to_bq
load_region_non_intersectional_representation_to_bq
load_region_non_intersectional_exits_representation_to_bq
load_selfid_representation_to_bq
| 40.923259
| 322
| 0.513906
| 3,334
| 34,662
| 5.153269
| 0.059988
| 0.068448
| 0.054013
| 0.083231
| 0.939875
| 0.928467
| 0.919912
| 0.909144
| 0.89954
| 0.888889
| 0
| 0.003814
| 0.379724
| 34,662
| 846
| 323
| 40.971631
| 0.795302
| 0.031562
| 0
| 0.571785
| 0
| 0.047441
| 0.449957
| 0.068215
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.002497
| 0
| 0.002497
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
69ccbbe4e2cff4191b3bc1c6a7456873f659d37a
| 174
|
py
|
Python
|
contact_rest_api/admin.py
|
tekrajchhetri/contact-rest-api
|
2ec93c74cdb30f35c2c73de2ccc7f24d49537362
|
[
"MIT"
] | null | null | null |
contact_rest_api/admin.py
|
tekrajchhetri/contact-rest-api
|
2ec93c74cdb30f35c2c73de2ccc7f24d49537362
|
[
"MIT"
] | null | null | null |
contact_rest_api/admin.py
|
tekrajchhetri/contact-rest-api
|
2ec93c74cdb30f35c2c73de2ccc7f24d49537362
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from contact_rest_api import models
# Register your models here.
admin.site.register(models.UserProfile)
admin.site.register(models.Contact)
| 29
| 39
| 0.83908
| 25
| 174
| 5.76
| 0.56
| 0.125
| 0.236111
| 0.319444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086207
| 174
| 6
| 40
| 29
| 0.90566
| 0.149425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
69f89911db9423b0ea8a51f2e380e1228e8c5927
| 208
|
py
|
Python
|
tronx/helpers/__init__.py
|
beastzx18/Tron
|
92207b841c80311e484e8f350b96f7df8a76d3b9
|
[
"MIT"
] | 8
|
2021-08-22T06:43:34.000Z
|
2022-02-24T17:09:49.000Z
|
tronx/helpers/__init__.py
|
beastzx18/Tron
|
92207b841c80311e484e8f350b96f7df8a76d3b9
|
[
"MIT"
] | 61
|
2021-09-12T11:05:33.000Z
|
2021-12-07T15:26:18.000Z
|
tronx/helpers/__init__.py
|
beastzx18/Tron
|
92207b841c80311e484e8f350b96f7df8a76d3b9
|
[
"MIT"
] | 6
|
2021-09-08T08:43:04.000Z
|
2022-02-24T17:09:50.000Z
|
from .bots import *
from .filters import *
from .functions import *
from .user import *
from .utils import *
from .decorators import *
from .variables import *
from .constants import *
from .strings import *
| 20.8
| 25
| 0.740385
| 27
| 208
| 5.703704
| 0.407407
| 0.519481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173077
| 208
| 9
| 26
| 23.111111
| 0.895349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
38e7b233e4e1b39c1fcf8037b5b69e1eb84451fe
| 191
|
py
|
Python
|
tapispy/clients/__init__.py
|
tapis-project/tapispy
|
fc7d5e79f8b5a73fa0517e6129f737dd753c2561
|
[
"Python-2.0",
"OLDAP-2.3"
] | null | null | null |
tapispy/clients/__init__.py
|
tapis-project/tapispy
|
fc7d5e79f8b5a73fa0517e6129f737dd753c2561
|
[
"Python-2.0",
"OLDAP-2.3"
] | null | null | null |
tapispy/clients/__init__.py
|
tapis-project/tapispy
|
fc7d5e79f8b5a73fa0517e6129f737dd753c2561
|
[
"Python-2.0",
"OLDAP-2.3"
] | null | null | null |
from .create import clients_create
from .delete import clients_delete
from .list import clients_list
from .subscribe import clients_subscribe
from .subscribtions import clients_subscribtions
| 31.833333
| 48
| 0.86911
| 25
| 191
| 6.44
| 0.32
| 0.403727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104712
| 191
| 5
| 49
| 38.2
| 0.94152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2a23fe8ff7dd9a77896fa57f4be9a2086507054c
| 312
|
py
|
Python
|
nighres/microscopy/__init__.py
|
marcobarilari/nighres
|
e503bb96a6a73f73020c5d9d7b540bc5f17699a8
|
[
"Apache-2.0"
] | 2
|
2020-08-05T18:05:38.000Z
|
2022-03-28T12:22:14.000Z
|
nighres/microscopy/__init__.py
|
marcobarilari/nighres
|
e503bb96a6a73f73020c5d9d7b540bc5f17699a8
|
[
"Apache-2.0"
] | 23
|
2017-07-17T12:53:22.000Z
|
2017-07-24T21:31:16.000Z
|
nighres/microscopy/__init__.py
|
marcobarilari/nighres
|
e503bb96a6a73f73020c5d9d7b540bc5f17699a8
|
[
"Apache-2.0"
] | 8
|
2017-10-31T13:57:06.000Z
|
2021-03-11T16:17:44.000Z
|
from nighres.microscopy.mgdm_cells import mgdm_cells
from nighres.microscopy.stack_intensity_regularisation import stack_intensity_regularisation
from nighres.microscopy.stack_intensity_mapping import stack_intensity_mapping
from nighres.microscopy.directional_line_clustering import directional_line_clustering
| 62.4
| 92
| 0.923077
| 38
| 312
| 7.210526
| 0.342105
| 0.160584
| 0.306569
| 0.189781
| 0.255474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 312
| 4
| 93
| 78
| 0.925676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2a54f96f4fcc51c71b8634048fbf72d46af90d54
| 95
|
py
|
Python
|
numbalsoda/__init__.py
|
Nicholaswogan/numblsoda
|
69fafdc7753e8f3273283a0b21e2eb3523d5f3aa
|
[
"MIT"
] | 4
|
2022-02-28T21:17:40.000Z
|
2022-03-31T05:51:11.000Z
|
numbalsoda/__init__.py
|
Nicholaswogan/numblsoda
|
69fafdc7753e8f3273283a0b21e2eb3523d5f3aa
|
[
"MIT"
] | 2
|
2022-03-08T08:57:40.000Z
|
2022-03-31T05:05:41.000Z
|
numbalsoda/__init__.py
|
Nicholaswogan/numblsoda
|
69fafdc7753e8f3273283a0b21e2eb3523d5f3aa
|
[
"MIT"
] | null | null | null |
from .driver import lsoda_sig, lsoda, address_as_void_pointer
from .driver_dop853 import dop853
| 47.5
| 61
| 0.863158
| 15
| 95
| 5.133333
| 0.666667
| 0.25974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 0.094737
| 95
| 2
| 62
| 47.5
| 0.825581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aa5cd370680f4e4b66644e5f1f83fe3680ce9421
| 27,433
|
py
|
Python
|
tests/test_asymmetric.py
|
bhumikapaharia/oscrypto
|
e166b40ee5fa47eb231c7cd78734004b123253b8
|
[
"MIT"
] | 309
|
2015-07-22T02:42:45.000Z
|
2022-03-28T23:54:47.000Z
|
tests/test_asymmetric.py
|
bhumikapaharia/oscrypto
|
e166b40ee5fa47eb231c7cd78734004b123253b8
|
[
"MIT"
] | 58
|
2015-08-21T23:30:29.000Z
|
2022-03-18T12:05:56.000Z
|
tests/test_asymmetric.py
|
bhumikapaharia/oscrypto
|
e166b40ee5fa47eb231c7cd78734004b123253b8
|
[
"MIT"
] | 55
|
2015-10-10T04:45:30.000Z
|
2022-03-20T21:05:53.000Z
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import unittest
import sys
import os
from asn1crypto import pem, algos, keys, core
from oscrypto import asymmetric, errors, backend
from ._unittest_compat import patch
patch()
if sys.version_info < (3,):
byte_cls = str
int_types = (int, long) # noqa
else:
byte_cls = bytes
int_types = (int,)
_backend = backend()
if _backend == 'openssl':
from oscrypto._openssl._libcrypto import libcrypto_version_info
openssl_098 = libcrypto_version_info < (1, 0, 0)
else:
openssl_098 = False
tests_root = os.path.dirname(__file__)
fixtures_dir = os.path.join(tests_root, 'fixtures')
def _win_version_pair():
ver_info = sys.getwindowsversion()
return (ver_info[0], ver_info[1])
def _should_support_sha2():
if _backend == 'mac':
return False
if _backend == 'winlegacy':
return False
if _backend == 'win' and _win_version_pair() < (6, 2):
return False
if openssl_098:
return False
return True
class AsymmetricTests(unittest.TestCase):
def test_load_incomplete_dsa_cert(self):
with self.assertRaises(errors.IncompleteAsymmetricKeyError):
asymmetric.load_public_key(os.path.join(fixtures_dir, 'DSAParametersInheritedCACert.crt'))
def test_cert_attributes(self):
cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'keys/test.crt'))
self.assertEqual(2048, cert.bit_size)
self.assertEqual(256, cert.byte_size)
self.assertEqual('rsa', cert.algorithm)
def test_public_key_attributes(self):
pub_key = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-public-rsa.key'))
self.assertEqual(2048, pub_key.bit_size)
self.assertEqual(256, pub_key.byte_size)
self.assertEqual('rsa', pub_key.algorithm)
def test_private_key_attributes(self):
private_key = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
self.assertEqual(2048, private_key.bit_size)
self.assertEqual(256, private_key.byte_size)
self.assertEqual('rsa', private_key.algorithm)
def test_cert_ec_attributes(self):
cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'keys/test-ec-named.crt'))
self.assertEqual(256, cert.bit_size)
self.assertEqual(32, cert.byte_size)
self.assertEqual('secp256r1', cert.curve)
self.assertEqual('ec', cert.algorithm)
def test_public_key_ec_attributes(self):
pub_key = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-public-ec-named.key'))
self.assertEqual(256, pub_key.bit_size)
self.assertEqual(32, pub_key.byte_size)
self.assertEqual('secp256r1', pub_key.curve)
self.assertEqual('ec', pub_key.algorithm)
def test_private_key_ec_attributes(self):
private_key = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-ec-named.key'))
self.assertEqual(256, private_key.bit_size)
self.assertEqual(32, private_key.byte_size)
self.assertEqual('secp256r1', private_key.curve)
self.assertEqual('ec', private_key.algorithm)
def test_dump_public(self):
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
pem_serialized = asymmetric.dump_public_key(public)
public_reloaded = asymmetric.load_public_key(pem_serialized)
self.assertIsInstance(public_reloaded, asymmetric.PublicKey)
self.assertEqual('rsa', public_reloaded.algorithm)
def test_dump_certificate(self):
cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'keys/test.crt'))
pem_serialized = asymmetric.dump_certificate(cert)
cert_reloaded = asymmetric.load_certificate(pem_serialized)
self.assertIsInstance(cert_reloaded, asymmetric.Certificate)
self.assertEqual('rsa', cert_reloaded.algorithm)
def test_dump_private(self):
def do_run():
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
for password in [None, 'password123']:
pem_serialized = asymmetric.dump_private_key(private, password, target_ms=20)
private_reloaded = asymmetric.load_private_key(pem_serialized, password)
self.assertTrue(pem.detect(pem_serialized))
self.assertIsInstance(private_reloaded, asymmetric.PrivateKey)
self.assertEqual('rsa', private_reloaded.algorithm)
# OpenSSL 0.9.8 and Windows CryptoAPI don't have PBKDF2 implemented in
# C, thus the dump operation fails since there is no reasonable way to
# ensure we are using a good number of iterations of PBKDF2
if openssl_098 or _backend == 'winlegacy':
with self.assertRaises(OSError):
do_run()
else:
do_run()
def test_dump_private_openssl(self):
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
pem_serialized = asymmetric.dump_openssl_private_key(private, 'password123')
private_reloaded = asymmetric.load_private_key(pem_serialized, 'password123')
self.assertIsInstance(private_reloaded, asymmetric.PrivateKey)
self.assertEqual('rsa', private_reloaded.algorithm)
def test_dh_generate(self):
dh_parameters = asymmetric.generate_dh_parameters(512)
self.assertIsInstance(dh_parameters, algos.DHParameters)
self.assertIsInstance(dh_parameters['p'].native, int_types)
self.assertIsInstance(dh_parameters['g'].native, int_types)
self.assertEqual(2, dh_parameters['g'].native)
def test_rsa_generate(self):
public, private = asymmetric.generate_pair('rsa', bit_size=2048)
self.assertEqual('rsa', public.algorithm)
self.assertEqual(2048, public.bit_size)
original_data = b'This is data to sign'
signature = asymmetric.rsa_pkcs1v15_sign(private, original_data, 'sha1')
self.assertIsInstance(signature, byte_cls)
asymmetric.rsa_pkcs1v15_verify(public, signature, original_data, 'sha1')
raw_public = asymmetric.dump_public_key(public)
asymmetric.load_public_key(raw_public)
raw_private = asymmetric.dump_private_key(private, None)
asymmetric.load_private_key(raw_private, None)
self.assertIsInstance(private.fingerprint, byte_cls)
self.assertIsInstance(public.fingerprint, byte_cls)
self.assertEqual(private.fingerprint, public.fingerprint)
def test_dsa_generate(self):
public, private = asymmetric.generate_pair('dsa', bit_size=1024)
self.assertEqual('dsa', public.algorithm)
self.assertEqual(1024, public.bit_size)
original_data = b'This is data to sign'
signature = asymmetric.dsa_sign(private, original_data, 'sha1')
self.assertIsInstance(signature, byte_cls)
asymmetric.dsa_verify(public, signature, original_data, 'sha1')
raw_public = asymmetric.dump_public_key(public)
asymmetric.load_public_key(raw_public)
raw_private = asymmetric.dump_private_key(private, None)
asymmetric.load_private_key(raw_private, None)
self.assertIsInstance(private.fingerprint, byte_cls)
self.assertIsInstance(public.fingerprint, byte_cls)
self.assertEqual(private.fingerprint, public.fingerprint)
def test_ec_generate(self):
public, private = asymmetric.generate_pair('ec', curve='secp256r1')
self.assertEqual('ec', public.algorithm)
self.assertEqual('secp256r1', public.asn1.curve[1])
original_data = b'This is data to sign'
signature = asymmetric.ecdsa_sign(private, original_data, 'sha1')
self.assertIsInstance(signature, byte_cls)
asymmetric.ecdsa_verify(public, signature, original_data, 'sha1')
raw_public = asymmetric.dump_public_key(public)
asymmetric.load_public_key(raw_public)
raw_private = asymmetric.dump_private_key(private, None)
asymmetric.load_private_key(raw_private, None)
self.assertIsInstance(private.fingerprint, byte_cls)
self.assertIsInstance(public.fingerprint, byte_cls)
self.assertEqual(private.fingerprint, public.fingerprint)
def test_rsa_verify(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'rsa_signature'), 'rb') as f:
signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
asymmetric.rsa_pkcs1v15_verify(public, signature, original_data, 'sha1')
def test_rsa_verify_key_size_mismatch(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'rsa_signature'), 'rb') as f:
signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-4096.crt'))
with self.assertRaises(errors.SignatureError):
asymmetric.rsa_pkcs1v15_verify(public, signature, original_data, 'sha1')
def test_rsa_verify_fail(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'rsa_signature'), 'rb') as f:
signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
with self.assertRaises(errors.SignatureError):
asymmetric.rsa_pkcs1v15_verify(public, signature, original_data + b'1', 'sha1')
def test_rsa_verify_fail_each_byte(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'rsa_signature'), 'rb') as f:
original_signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
for i in range(0, len(original_signature)):
if i == 0:
signature = b'\xab' + original_signature[1:]
elif i == len(original_signature) - 1:
signature = original_signature[0:-1] + b'\xab'
else:
signature = original_signature[0:i] + b'\xab' + original_signature[i + 1:]
with self.assertRaises(errors.SignatureError):
asymmetric.rsa_pkcs1v15_verify(public, signature, original_data + b'1', 'sha1')
def test_rsa_pss_verify(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'rsa_pss_signature'), 'rb') as f:
signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
asymmetric.rsa_pss_verify(public, signature, original_data, 'sha1')
def test_rsa_pss_verify_fail(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'rsa_pss_signature'), 'rb') as f:
signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
with self.assertRaises(errors.SignatureError):
asymmetric.rsa_pss_verify(public, signature, original_data + b'1', 'sha1')
def test_rsa_raw_verify(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'rsa_signature_raw'), 'rb') as f:
signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
asymmetric.rsa_pkcs1v15_verify(public, signature, original_data, 'raw')
def test_rsa_raw_verify_fail(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'rsa_signature_raw'), 'rb') as f:
signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
with self.assertRaises(errors.SignatureError):
asymmetric.rsa_pkcs1v15_verify(public, signature, original_data + b'1', 'raw')
def test_dsa_verify(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'dsa_signature'), 'rb') as f:
signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt'))
asymmetric.dsa_verify(public, signature, original_data, 'sha1')
def test_dsa_verify_key_size_mismatch(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'dsa_signature'), 'rb') as f:
signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-512.crt'))
with self.assertRaises(errors.SignatureError):
asymmetric.dsa_verify(public, signature, original_data, 'sha1')
def test_dsa_verify_fail(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'dsa_signature'), 'rb') as f:
signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt'))
with self.assertRaises(errors.SignatureError):
asymmetric.dsa_verify(public, signature, original_data + b'1', 'sha1')
def test_dsa_verify_fail_each_byte(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'dsa_signature'), 'rb') as f:
original_signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt'))
for i in range(0, len(original_signature)):
if i == 0:
signature = b'\xab' + original_signature[1:]
elif i == len(original_signature) - 1:
signature = original_signature[0:-1] + b'\xab'
else:
signature = original_signature[0:i] + b'\xab' + original_signature[i+1:]
with self.assertRaises(errors.SignatureError):
asymmetric.dsa_verify(public, signature, original_data + b'1', 'sha1')
def test_ecdsa_verify(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'ecdsa_signature'), 'rb') as f:
signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-public-ec-named.key'))
asymmetric.ecdsa_verify(public, signature, original_data, 'sha1')
def test_ecdsa_verify_fail_each_byte(self):
with open(os.path.join(fixtures_dir, 'message.txt'), 'rb') as f:
original_data = f.read()
with open(os.path.join(fixtures_dir, 'ecdsa_signature'), 'rb') as f:
original_signature = f.read()
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-public-ec-named.key'))
for i in range(0, len(original_signature)):
if i == 0:
signature = b'\xab' + original_signature[1:]
elif i == len(original_signature) - 1:
signature = original_signature[0:-1] + b'\xab'
else:
signature = original_signature[0:i] + b'\xab' + original_signature[i+1:]
with self.assertRaises(errors.SignatureError):
asymmetric.ecdsa_verify(public, signature, original_data + b'1', 'sha1')
def test_rsa_pkcs1v15_encrypt(self):
original_data = b'This is data to encrypt'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
ciphertext = asymmetric.rsa_pkcs1v15_encrypt(public, original_data)
self.assertIsInstance(ciphertext, byte_cls)
plaintext = asymmetric.rsa_pkcs1v15_decrypt(private, ciphertext)
self.assertEqual(original_data, plaintext)
def test_rsa_oaep_encrypt(self):
original_data = b'This is data to encrypt'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
ciphertext = asymmetric.rsa_oaep_encrypt(public, original_data)
self.assertIsInstance(ciphertext, byte_cls)
plaintext = asymmetric.rsa_oaep_decrypt(private, ciphertext)
self.assertEqual(original_data, plaintext)
def test_rsa_private_pkcs1v15_decrypt(self):
original_data = b'This is the message to sign'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
with open(os.path.join(fixtures_dir, 'rsa_public_encrypted'), 'rb') as f:
plaintext = asymmetric.rsa_pkcs1v15_decrypt(private, f.read())
self.assertEqual(original_data, plaintext)
def test_rsa_private_oaep_decrypt(self):
original_data = b'This is the message to sign'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
with open(os.path.join(fixtures_dir, 'rsa_public_encrypted_oaep'), 'rb') as f:
plaintext = asymmetric.rsa_oaep_decrypt(private, f.read())
self.assertEqual(original_data, plaintext)
def test_rsa_sign(self):
original_data = b'This is data to sign'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
signature = asymmetric.rsa_pkcs1v15_sign(private, original_data, 'sha1')
self.assertIsInstance(signature, byte_cls)
asymmetric.rsa_pkcs1v15_verify(public, signature, original_data, 'sha1')
def test_rsa_fingerprint(self):
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
self.assertIsInstance(private.fingerprint, byte_cls)
self.assertIsInstance(public.fingerprint, byte_cls)
self.assertEqual(private.fingerprint, public.fingerprint)
def test_rsa_public_key_attr(self):
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
computed_public = private.public_key
self.assertEqual(public.asn1.dump(), computed_public.asn1.dump())
def test_rsa_private_key_unwrap(self):
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
self.assertIsInstance(private.unwrap(), keys.RSAPrivateKey)
def test_rsa_public_key_unwrap(self):
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
self.assertIsInstance(public.unwrap(), keys.RSAPublicKey)
def test_rsa_pss_sign(self):
original_data = b'This is data to sign'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
signature = asymmetric.rsa_pss_sign(private, original_data, 'sha1')
self.assertIsInstance(signature, byte_cls)
asymmetric.rsa_pss_verify(public, signature, original_data, 'sha1')
def test_rsa_pss_sha256_sign(self):
original_data = b'This is data to sign'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
signature = asymmetric.rsa_pss_sign(private, original_data, 'sha256')
self.assertIsInstance(signature, byte_cls)
asymmetric.rsa_pss_verify(public, signature, original_data, 'sha256')
def test_rsa_raw_sign(self):
original_data = b'This is data to sign!'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test.crt'))
signature = asymmetric.rsa_pkcs1v15_sign(private, original_data, 'raw')
self.assertIsInstance(signature, byte_cls)
asymmetric.rsa_pkcs1v15_verify(public, signature, original_data, 'raw')
def test_dsa_sign(self):
original_data = b'This is data to sign'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt'))
signature = asymmetric.dsa_sign(private, original_data, 'sha1')
self.assertIsInstance(signature, byte_cls)
asymmetric.dsa_verify(public, signature, original_data, 'sha1')
def test_dsa_fingerprint(self):
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt'))
self.assertIsInstance(private.fingerprint, byte_cls)
self.assertIsInstance(public.fingerprint, byte_cls)
self.assertEqual(private.fingerprint, public.fingerprint)
def test_dsa_public_key_attr(self):
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt'))
computed_public = private.public_key
self.assertEqual(public.asn1.dump(), computed_public.asn1.dump())
def test_dsa_private_key_unwrap(self):
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.key'))
self.assertIsInstance(private.unwrap(), keys.DSAPrivateKey)
def test_dsa_public_key_unwrap(self):
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-1024.crt'))
self.assertIsInstance(public.unwrap(), core.Integer)
def test_dsa_2048_sha1_sign(self):
def do_run():
original_data = b'This is data to sign'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa-2048.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-2048.crt'))
signature = asymmetric.dsa_sign(private, original_data, 'sha1')
self.assertIsInstance(signature, byte_cls)
asymmetric.dsa_verify(public, signature, original_data, 'sha1')
if sys.platform == 'win32':
with self.assertRaises(errors.AsymmetricKeyError):
do_run()
else:
do_run()
def test_dsa_2048_sha2_sign(self):
def do_run():
original_data = b'This is data to sign'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa-2048-sha2.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa-2048-sha2.crt'))
signature = asymmetric.dsa_sign(private, original_data, 'sha256')
self.assertIsInstance(signature, byte_cls)
asymmetric.dsa_verify(public, signature, original_data, 'sha256')
if not _should_support_sha2():
with self.assertRaises(errors.AsymmetricKeyError):
do_run()
else:
do_run()
def test_dsa_3072_sign(self):
def do_run():
original_data = b'This is data to sign'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa.crt'))
signature = asymmetric.dsa_sign(private, original_data, 'sha256')
self.assertIsInstance(signature, byte_cls)
asymmetric.dsa_verify(public, signature, original_data, 'sha256')
if not _should_support_sha2():
with self.assertRaises(errors.AsymmetricKeyError):
do_run()
else:
do_run()
def test_dsa_3072_sign_sha1(self):
def do_run():
original_data = b'This is data to sign'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-dsa.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-dsa.crt'))
signature = asymmetric.dsa_sign(private, original_data, 'sha1')
self.assertIsInstance(signature, byte_cls)
asymmetric.dsa_verify(public, signature, original_data, 'sha1')
if _backend == 'mac' or openssl_098 or _backend == 'winlegacy':
with self.assertRaises(errors.AsymmetricKeyError):
do_run()
elif _backend == 'win':
if _win_version_pair() < (6, 2):
exception_class = errors.AsymmetricKeyError
else:
exception_class = ValueError
with self.assertRaises(exception_class):
do_run()
else:
do_run()
def test_ecdsa_sign(self):
original_data = b'This is data to sign'
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-ec-named.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-ec-named.crt'))
signature = asymmetric.ecdsa_sign(private, original_data, 'sha1')
self.assertIsInstance(signature, byte_cls)
asymmetric.ecdsa_verify(public, signature, original_data, 'sha1')
def test_ec_fingerprints(self):
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-ec-named.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-ec-named.crt'))
self.assertIsInstance(private.fingerprint, byte_cls)
self.assertIsInstance(public.fingerprint, byte_cls)
self.assertEqual(private.fingerprint, public.fingerprint)
def test_ec_public_key_attr(self):
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-ec-named.key'))
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-ec-named.crt'))
computed_public = private.public_key
self.assertEqual(public.asn1.dump(), computed_public.asn1.dump())
def test_ec_private_key_unwrap(self):
private = asymmetric.load_private_key(os.path.join(fixtures_dir, 'keys/test-ec-named.key'))
self.assertIsInstance(private.unwrap(), keys.ECPrivateKey)
def test_ec_public_key_unwrap(self):
public = asymmetric.load_public_key(os.path.join(fixtures_dir, 'keys/test-ec-named.crt'))
self.assertIsInstance(public.unwrap(), keys.ECPointBitString)
| 46.49661
| 108
| 0.681989
| 3,508
| 27,433
| 5.094071
| 0.060718
| 0.033912
| 0.05596
| 0.09972
| 0.86094
| 0.837493
| 0.804309
| 0.788584
| 0.777224
| 0.773755
| 0
| 0.016164
| 0.203951
| 27,433
| 589
| 109
| 46.575552
| 0.802134
| 0.007801
| 0
| 0.615551
| 0
| 0
| 0.090214
| 0.027524
| 0
| 0
| 0
| 0
| 0.220302
| 1
| 0.133909
| false
| 0.010799
| 0.017279
| 0
| 0.166307
| 0.047516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
aa5de7b1e3190745c491a28ae5140490fda05d73
| 101
|
py
|
Python
|
hardware/YL83/__init__.py
|
RechnioMateusz/weather_forecast
|
5ae9c65336831042b74a77e05c163b7b65b90dcd
|
[
"MIT"
] | 1
|
2019-10-22T20:09:54.000Z
|
2019-10-22T20:09:54.000Z
|
hardware/YL83/__init__.py
|
RechnioMateusz/weather_forecast
|
5ae9c65336831042b74a77e05c163b7b65b90dcd
|
[
"MIT"
] | null | null | null |
hardware/YL83/__init__.py
|
RechnioMateusz/weather_forecast
|
5ae9c65336831042b74a77e05c163b7b65b90dcd
|
[
"MIT"
] | null | null | null |
from .yl83_handler import YL83, YL83Exception
from .mcp3008_handler import MCP3008, MCP3008Exception
| 33.666667
| 54
| 0.861386
| 12
| 101
| 7.083333
| 0.583333
| 0.305882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197802
| 0.09901
| 101
| 2
| 55
| 50.5
| 0.736264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aa841eee81a72044700badd5b82fe177320f2184
| 81
|
py
|
Python
|
Calculator_Application/calculations/mul.py
|
jpweldon/Module_2_Practice
|
cb546bbfcf5ffb7c6388f854e0eb8873834cfab9
|
[
"MIT"
] | null | null | null |
Calculator_Application/calculations/mul.py
|
jpweldon/Module_2_Practice
|
cb546bbfcf5ffb7c6388f854e0eb8873834cfab9
|
[
"MIT"
] | null | null | null |
Calculator_Application/calculations/mul.py
|
jpweldon/Module_2_Practice
|
cb546bbfcf5ffb7c6388f854e0eb8873834cfab9
|
[
"MIT"
] | null | null | null |
# Define a Multiplication Function
def mul(num1, num2):
return num1 * num2
| 13.5
| 34
| 0.703704
| 11
| 81
| 5.181818
| 0.818182
| 0.280702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063492
| 0.222222
| 81
| 5
| 35
| 16.2
| 0.84127
| 0.395062
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
aae30323577a591962c2998ee1b811cbf130ca82
| 6,266
|
py
|
Python
|
tests/test_binary_predicate.py
|
nokia/minifold
|
3687d32ab6119dc8293ae370c8c4ba9bbbb47deb
|
[
"BSD-3-Clause"
] | 15
|
2018-09-03T09:40:59.000Z
|
2021-07-16T16:14:46.000Z
|
tests/test_binary_predicate.py
|
Infinite-Blue-1042/minifold
|
cd0aa9207f9e1819ed2ecbb24373cdcfe27abd16
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_binary_predicate.py
|
Infinite-Blue-1042/minifold
|
cd0aa9207f9e1819ed2ecbb24373cdcfe27abd16
|
[
"BSD-3-Clause"
] | 8
|
2019-01-25T07:18:59.000Z
|
2021-04-07T17:54:54.000Z
|
#!/usr/bin/env pytest-3
# -*- coding: utf-8 -*-
#
# This file is part of the minifold project.
# https://github.com/nokia/minifold
__author__ = "Marc-Olivier Buob"
__maintainer__ = "Marc-Olivier Buob"
__email__ = "marc-olivier.buob@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2018, Nokia"
__license__ = "BSD-3"
import operator
from minifold.binary_predicate import BinaryPredicate, __in__
ENTRY = {"a" : 1, "b" : 2}
ENTRY2 = {"a" : {1, 2, 3}}
def test_le():
assert BinaryPredicate("a", "<=", 0)(ENTRY) == False
assert BinaryPredicate("a", "<=", 1)(ENTRY) == True
assert BinaryPredicate("a", "<=", 3)(ENTRY) == True
assert BinaryPredicate("a", operator.__le__, 0)(ENTRY) == False
assert BinaryPredicate("a", operator.__le__, 1)(ENTRY) == True
assert BinaryPredicate("a", operator.__le__, 3)(ENTRY) == True
def test_lt():
assert BinaryPredicate("a", "<", 0)(ENTRY) == False
assert BinaryPredicate("a", "<", 1)(ENTRY) == False
assert BinaryPredicate("a", "<", 3)(ENTRY) == True
assert BinaryPredicate("a", operator.__lt__, 0)(ENTRY) == False
assert BinaryPredicate("a", operator.__lt__, 1)(ENTRY) == False
assert BinaryPredicate("a", operator.__lt__, 3)(ENTRY) == True
def test_ge():
assert BinaryPredicate("a", ">=", 0)(ENTRY) == True
assert BinaryPredicate("a", ">=", 1)(ENTRY) == True
assert BinaryPredicate("a", ">=", 3)(ENTRY) == False
assert BinaryPredicate("a", operator.__ge__, 0)(ENTRY) == True
assert BinaryPredicate("a", operator.__ge__, 1)(ENTRY) == True
assert BinaryPredicate("a", operator.__ge__, 3)(ENTRY) == False
def test_gt():
assert BinaryPredicate("a", ">", 0)(ENTRY) == True
assert BinaryPredicate("a", ">", 1)(ENTRY) == False
assert BinaryPredicate("a", ">", 3)(ENTRY) == False
assert BinaryPredicate("a", operator.__gt__, 0)(ENTRY) == True
assert BinaryPredicate("a", operator.__gt__, 1)(ENTRY) == False
assert BinaryPredicate("a", operator.__gt__, 3)(ENTRY) == False
def test_eq():
assert BinaryPredicate("a", "==", 0)(ENTRY) == False
assert BinaryPredicate("a", "==", 1)(ENTRY) == True
assert BinaryPredicate("a", "==", 3)(ENTRY) == False
assert BinaryPredicate("a", operator.__eq__, 0)(ENTRY) == False
assert BinaryPredicate("a", operator.__eq__, 1)(ENTRY) == True
assert BinaryPredicate("a", operator.__eq__, 3)(ENTRY) == False
def test_ne():
assert BinaryPredicate("a", "!=", 0)(ENTRY) == True
assert BinaryPredicate("a", "!=", 1)(ENTRY) == False
assert BinaryPredicate("a", "!=", 3)(ENTRY) == True
assert BinaryPredicate("a", operator.__ne__, 0)(ENTRY) == True
assert BinaryPredicate("a", operator.__ne__, 1)(ENTRY) == False
assert BinaryPredicate("a", operator.__ne__, 3)(ENTRY) == True
def test_set():
assert BinaryPredicate("a", "<=", {1, 2, 3})(ENTRY2) == True
assert BinaryPredicate("a", "<=", {1, 2, 3, 4})(ENTRY2) == True
assert BinaryPredicate("a", "<=", {2, 3, 4})(ENTRY2) == False
assert BinaryPredicate("a", "<=", {2, 3})(ENTRY2) == False
assert BinaryPredicate("a", "<", {1, 2, 3})(ENTRY2) == False
assert BinaryPredicate("a", "<", {1, 2, 3, 4})(ENTRY2) == True
assert BinaryPredicate("a", "<", {2, 3, 4})(ENTRY2) == False
assert BinaryPredicate("a", "<", {2, 3})(ENTRY2) == False
assert BinaryPredicate("a", ">=", {1, 2, 3})(ENTRY2) == True
assert BinaryPredicate("a", ">=", {1, 2, 3, 4})(ENTRY2) == False
assert BinaryPredicate("a", ">=", {2, 3, 4})(ENTRY2) == False
assert BinaryPredicate("a", ">=", {2, 3})(ENTRY2) == True
assert BinaryPredicate("a", ">", {1, 2, 3})(ENTRY2) == False
assert BinaryPredicate("a", ">", {1, 2, 3, 4})(ENTRY2) == False
assert BinaryPredicate("a", ">", {2, 3, 4})(ENTRY2) == False
assert BinaryPredicate("a", ">", {2, 3})(ENTRY2) == True
assert BinaryPredicate("a", "==", {1, 2, 3})(ENTRY2) == True
assert BinaryPredicate("a", "==", {1, 2, 3, 4})(ENTRY2) == False
assert BinaryPredicate("a", "==", {2, 3, 4})(ENTRY2) == False
assert BinaryPredicate("a", "==", {2, 3})(ENTRY2) == False
assert BinaryPredicate("a", "!=", {1, 2, 3})(ENTRY2) == False
assert BinaryPredicate("a", "!=", {1, 2, 3, 4})(ENTRY2) == True
assert BinaryPredicate("a", "!=", {2, 3, 4})(ENTRY2) == True
assert BinaryPredicate("a", "!=", {2, 3})(ENTRY2) == True
def test_includes():
assert BinaryPredicate("a", "IN", {1, 2, 3})(ENTRY) == True
assert BinaryPredicate("a", "IN", {4, 5, 6})(ENTRY) == False
def test_contains():
assert BinaryPredicate("a", "CONTAINS", 1)(ENTRY2) == True
assert BinaryPredicate("a", "CONTAINS", 4)(ENTRY2) == False
def check_clause(t1, f1, t2, f2):
assert BinaryPredicate(t1, "AND", t2)(ENTRY) == True
assert BinaryPredicate(t1, "AND", f2)(ENTRY) == False
assert BinaryPredicate(f1, "AND", t2)(ENTRY) == False
assert BinaryPredicate(f1, "AND", f2)(ENTRY) == False
assert BinaryPredicate(t1, "&&", t2)(ENTRY) == True
assert BinaryPredicate(t1, "&&", f2)(ENTRY) == False
assert BinaryPredicate(f1, "&&", t2)(ENTRY) == False
assert BinaryPredicate(f1, "&&", f2)(ENTRY) == False
assert BinaryPredicate(t1, "OR", t2)(ENTRY) == True
assert BinaryPredicate(t1, "OR", f2)(ENTRY) == True
assert BinaryPredicate(f1, "OR", t2)(ENTRY) == True
assert BinaryPredicate(f1, "OR", f2)(ENTRY) == False
assert BinaryPredicate(t1, "||", t2)(ENTRY) == True
assert BinaryPredicate(t1, "||", f2)(ENTRY) == True
assert BinaryPredicate(f1, "||", t2)(ENTRY) == True
assert BinaryPredicate(f1, "||", f2)(ENTRY) == False
def test_clause():
t1 = BinaryPredicate("a", "==", 1)
f1 = BinaryPredicate("a", "!=", 1)
t2 = BinaryPredicate("b", "==", 2)
f2 = BinaryPredicate("b", "!=", 2)
check_clause(t1, f1, t2, f2)
def test_lambda():
t1 = lambda e: e["a"] == 1
f1 = lambda e: e["a"] != 1
t2 = lambda e: e["b"] == 2
f2 = lambda e: e["b"] != 2
check_clause(t1, f1, t2, f2)
def test_in():
assert not __in__(0, {1, 2, 3})
assert __in__(1, {1, 2, 3})
assert __in__(2, {1, 2, 3})
assert __in__(3, {1, 2, 3})
assert not __in__(4, {1, 2, 3})
| 42.337838
| 68
| 0.595914
| 771
| 6,266
| 4.671855
| 0.097276
| 0.466408
| 0.390894
| 0.217379
| 0.832871
| 0.771793
| 0.648806
| 0.504442
| 0.504442
| 0.504442
| 0
| 0.046342
| 0.190712
| 6,266
| 147
| 69
| 42.62585
| 0.663972
| 0.019311
| 0
| 0.017094
| 0
| 0
| 0.050489
| 0.006026
| 0
| 0
| 0
| 0
| 0.726496
| 1
| 0.111111
| false
| 0
| 0.017094
| 0
| 0.128205
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2d492da2d79c6f51febd53eb0e12d2a01a531317
| 34
|
py
|
Python
|
models/__init__.py
|
SimonBartels/Variations_of_VAE
|
89eec430eb3ec4483a47f345cc83b86051a81be7
|
[
"MIT"
] | 1
|
2021-11-07T22:52:14.000Z
|
2021-11-07T22:52:14.000Z
|
models/__init__.py
|
SimonBartels/Variations_of_VAE
|
89eec430eb3ec4483a47f345cc83b86051a81be7
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
SimonBartels/Variations_of_VAE
|
89eec430eb3ec4483a47f345cc83b86051a81be7
|
[
"MIT"
] | 1
|
2021-08-05T13:32:29.000Z
|
2021-08-05T13:32:29.000Z
|
from .vae.VAE_bayes_jaks import *
| 17
| 33
| 0.794118
| 6
| 34
| 4.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2dad0889913a58bbe6d071246c6fb47781e5c8f4
| 98
|
py
|
Python
|
test/conftest.py
|
mpccolorado/python-escpos
|
e21940a5b46bd61052e4f5677199a9d3c19a41e7
|
[
"MIT"
] | 683
|
2015-12-28T08:52:55.000Z
|
2022-03-30T18:28:33.000Z
|
test/conftest.py
|
mpccolorado/python-escpos
|
e21940a5b46bd61052e4f5677199a9d3c19a41e7
|
[
"MIT"
] | 345
|
2015-12-23T20:56:12.000Z
|
2022-03-06T19:48:28.000Z
|
test/conftest.py
|
mpccolorado/python-escpos
|
e21940a5b46bd61052e4f5677199a9d3c19a41e7
|
[
"MIT"
] | 243
|
2015-12-25T17:52:20.000Z
|
2022-03-30T00:10:50.000Z
|
import pytest
from escpos.printer import Dummy
@pytest.fixture
def driver():
return Dummy()
| 12.25
| 32
| 0.744898
| 13
| 98
| 5.615385
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173469
| 98
| 7
| 33
| 14
| 0.901235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0.2
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
2dffa62d78ceba72d0200994402c48361d4053df
| 226
|
py
|
Python
|
pyfuzzysystem/defuzzyfication/longest_maximum.py
|
e1Ru1o/pyfuzzysystem
|
0da96fafd4bb7e5ed34730bb456ad78401e835dc
|
[
"MIT"
] | null | null | null |
pyfuzzysystem/defuzzyfication/longest_maximum.py
|
e1Ru1o/pyfuzzysystem
|
0da96fafd4bb7e5ed34730bb456ad78401e835dc
|
[
"MIT"
] | null | null | null |
pyfuzzysystem/defuzzyfication/longest_maximum.py
|
e1Ru1o/pyfuzzysystem
|
0da96fafd4bb7e5ed34730bb456ad78401e835dc
|
[
"MIT"
] | null | null | null |
from .utils import defuzzification_search
def longest_maximum(fuzzy_set):
'''
Find the smallest element that has
maximum membership value
'''
return defuzzification_search(fuzzy_set, lambda x, y: x >= y)
| 28.25
| 66
| 0.716814
| 29
| 226
| 5.413793
| 0.758621
| 0.267516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.207965
| 226
| 8
| 66
| 28.25
| 0.877095
| 0.265487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9307dcc104eb5d8f8e78189c9dc983e8af7dbb25
| 86
|
py
|
Python
|
appendix/app/common/logging.py
|
iurykrieger96/morpy-tcc
|
95cb484ede708fab798db5471f944472c2a65c66
|
[
"MIT"
] | null | null | null |
appendix/app/common/logging.py
|
iurykrieger96/morpy-tcc
|
95cb484ede708fab798db5471f944472c2a65c66
|
[
"MIT"
] | null | null | null |
appendix/app/common/logging.py
|
iurykrieger96/morpy-tcc
|
95cb484ede708fab798db5471f944472c2a65c66
|
[
"MIT"
] | null | null | null |
from flask import current_app
def info(message):
current_app.logger.info(message)
| 21.5
| 36
| 0.790698
| 13
| 86
| 5.076923
| 0.692308
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127907
| 86
| 4
| 36
| 21.5
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
93253e4adeeb88dd9fd04e4e5050214b61ef82a7
| 86
|
py
|
Python
|
tributary/lazy/base.py
|
ceball/tributary
|
5e30f90d1a5cc176c0f231f525d9dc5a81353925
|
[
"Apache-2.0"
] | 357
|
2018-09-13T19:58:46.000Z
|
2022-03-31T17:22:20.000Z
|
tributary/lazy/base.py
|
ceball/tributary
|
5e30f90d1a5cc176c0f231f525d9dc5a81353925
|
[
"Apache-2.0"
] | 109
|
2018-09-13T18:37:00.000Z
|
2022-03-27T00:59:49.000Z
|
tributary/lazy/base.py
|
ceball/tributary
|
5e30f90d1a5cc176c0f231f525d9dc5a81353925
|
[
"Apache-2.0"
] | 36
|
2018-09-17T21:01:05.000Z
|
2022-03-26T02:41:37.000Z
|
from .graph import LazyGraph # noqa: F401
from .node import Node, node # noqa: F401
| 28.666667
| 42
| 0.72093
| 13
| 86
| 4.769231
| 0.538462
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 0.197674
| 86
| 2
| 43
| 43
| 0.811594
| 0.244186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9326777f909e9dc627a169e0bb11ced3d18d5290
| 175
|
py
|
Python
|
wsgi.py
|
zebraxxl/micro-pass
|
dc2242cc98742890d163b7359b2fbdf63d1dcdc4
|
[
"MIT"
] | null | null | null |
wsgi.py
|
zebraxxl/micro-pass
|
dc2242cc98742890d163b7359b2fbdf63d1dcdc4
|
[
"MIT"
] | null | null | null |
wsgi.py
|
zebraxxl/micro-pass
|
dc2242cc98742890d163b7359b2fbdf63d1dcdc4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from src import app
__author__ = "zebraxxl"
def application(environ, start_response):
return app(environ, start_response)
| 17.5
| 41
| 0.708571
| 23
| 175
| 5.130435
| 0.826087
| 0.20339
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 0.154286
| 175
| 9
| 42
| 19.444444
| 0.790541
| 0.24
| 0
| 0
| 0
| 0
| 0.061069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
9383b7ba883f4f351ba430c51dddba3a15ec6046
| 192
|
py
|
Python
|
zunzun/LongRunningProcess/__init__.py
|
Sturtuk/EPES
|
6926382922e6291caa1b3b66beea8177a9dde995
|
[
"BSD-2-Clause"
] | null | null | null |
zunzun/LongRunningProcess/__init__.py
|
Sturtuk/EPES
|
6926382922e6291caa1b3b66beea8177a9dde995
|
[
"BSD-2-Clause"
] | null | null | null |
zunzun/LongRunningProcess/__init__.py
|
Sturtuk/EPES
|
6926382922e6291caa1b3b66beea8177a9dde995
|
[
"BSD-2-Clause"
] | null | null | null |
from . import StatusMonitoredLongRunningProcessPage
from . import CharacterizeData
from . import StatisticalDistributions
from . import FunctionFinder
from . import FunctionFinderResults
| 17.454545
| 51
| 0.84375
| 15
| 192
| 10.8
| 0.466667
| 0.308642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130208
| 192
| 10
| 52
| 19.2
| 0.97006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fa7e31596eb610f95fe6acd12860d0f5197890be
| 4,579
|
py
|
Python
|
violas_client/libra_client/test/test_transaction.py
|
violas-core/violas-client
|
e8798f7d081ac218b78b81fd7eb2f8da92631a16
|
[
"MIT"
] | null | null | null |
violas_client/libra_client/test/test_transaction.py
|
violas-core/violas-client
|
e8798f7d081ac218b78b81fd7eb2f8da92631a16
|
[
"MIT"
] | null | null | null |
violas_client/libra_client/test/test_transaction.py
|
violas-core/violas-client
|
e8798f7d081ac218b78b81fd7eb2f8da92631a16
|
[
"MIT"
] | 1
|
2022-01-05T06:49:42.000Z
|
2022-01-05T06:49:42.000Z
|
from violas_client.lbrtypes.account_config import testnet_dd_account_address
from violas_client.libra_client import Client, Wallet
from typing import List
from violas_client.libra_client.account import Account
def create_accounts(account_number)-> List[Account]:
wallet = Wallet.new()
return [wallet.new_account() for _ in range(account_number)]
def create_accounts_with_coins(account_number)-> List[Account]:
wallet = Wallet.new()
client = create_client()
accounts = []
for _ in range(account_number):
account = wallet.new_account()
client.mint_coin(account.address, 100, auth_key_prefix=account.auth_key_prefix, is_blocking=True)
accounts.append(account)
return accounts
def create_client() -> Client:
return Client()
def test_get_sender():
client = create_client()
[a1, a2] = create_accounts(2)
seq = client.mint_coin(a1.address, 100, auth_key_prefix=a1.auth_key_prefix, is_blocking=True)
tx = client.get_account_transaction(testnet_dd_account_address(), seq)
assert tx.get_sender() == testnet_dd_account_address().hex().lower()
seq = client.mint_coin(a2.address, 100, auth_key_prefix=a2.auth_key_prefix, is_blocking=True)
seq = client.transfer_coin(a1, a2.address, 10, is_blocking=True)
tx = client.get_account_transaction(a1.address, seq)
assert tx.get_sender() == a1.address_hex.lower()
tx = client.get_transaction(0)
assert None == tx.get_sender()
tx = client.get_transaction(1)
assert None == tx.get_sender()
def test_get_receiver():
client = create_client()
[a1, a2] = create_accounts(2)
seq = client.mint_coin(a1.address, 100, auth_key_prefix=a1.auth_key_prefix, is_blocking=True)
tx = client.get_account_transaction(testnet_dd_account_address(), seq)
assert tx.get_receiver() == a1.address_hex.lower()
seq = client.mint_coin(a2.address, 100, auth_key_prefix=a2.auth_key_prefix, is_blocking=True)
seq = client.transfer_coin(a1, a2.address, 10, is_blocking=True)
tx = client.get_account_transaction(a1.address, seq)
assert tx.get_receiver() == a2.address_hex.lower()
tx = client.get_transaction(0)
assert None == tx.get_receiver()
tx = client.get_transaction(1)
assert None == tx.get_receiver()
def test_get_amount():
client = create_client()
[a1, a2] = create_accounts(2)
seq = client.mint_coin(a1.address, 99, auth_key_prefix=a1.auth_key_prefix, is_blocking=True)
tx = client.get_account_transaction(testnet_dd_account_address(), seq)
assert tx.get_amount() == 99
seq = client.mint_coin(a2.address, 100, auth_key_prefix=a2.auth_key_prefix, is_blocking=True)
seq = client.transfer_coin(a1, a2.address, 88, is_blocking=True)
tx = client.get_account_transaction(a1.address, seq)
assert tx.get_amount() == 88
tx = client.get_transaction(0)
assert None == tx.get_amount()
tx = client.get_transaction(1)
assert None == tx.get_amount()
def test_get_currency_code():
client = create_client()
[a1, a2] = create_accounts(2)
seq = client.mint_coin(a1.address, 99, auth_key_prefix=a1.auth_key_prefix, is_blocking=True)
tx = client.get_account_transaction(testnet_dd_account_address(), seq)
assert tx.get_currency_code() == "XUS"
seq = client.mint_coin(a2.address, 100, auth_key_prefix=a2.auth_key_prefix, is_blocking=True)
seq = client.transfer_coin(a1, a2.address, 88, is_blocking=True)
tx = client.get_account_transaction(a1.address, seq)
assert tx.get_currency_code() == "XUS"
tx = client.get_transaction(0)
assert None == tx.get_currency_code()
tx = client.get_transaction(1)
assert None == tx.get_currency_code()
def test_get_data():
client = create_client()
[a1, a2] = create_accounts(2)
seq = client.mint_coin(a1.address, 100, auth_key_prefix=a1.auth_key_prefix, is_blocking=True)
tx = client.get_account_transaction(testnet_dd_account_address(), seq)
assert tx.get_data() == ""
data = b"data"
seq = client.mint_coin(a2.address, 100, auth_key_prefix=a2.auth_key_prefix, is_blocking=True)
seq = client.transfer_coin(a1, a2.address, 10, is_blocking=True, data=data)
tx = client.get_account_transaction(a1.address, seq)
assert tx.get_data() == data.hex()
seq = client.transfer_coin(a1, a2.address, 10, is_blocking=True)
tx = client.get_account_transaction(a1.address, seq)
assert tx.get_data() == ""
tx = client.get_transaction(0)
assert None == tx.get_data()
tx = client.get_transaction(1)
assert None == tx.get_data()
| 39.474138
| 105
| 0.723957
| 674
| 4,579
| 4.621662
| 0.090504
| 0.049438
| 0.091814
| 0.05297
| 0.851685
| 0.798074
| 0.76886
| 0.738684
| 0.729053
| 0.631461
| 0
| 0.029282
| 0.15724
| 4,579
| 116
| 106
| 39.474138
| 0.777922
| 0
| 0
| 0.684783
| 0
| 0
| 0.002183
| 0
| 0
| 0
| 0
| 0
| 0.228261
| 1
| 0.086957
| false
| 0
| 0.043478
| 0.01087
| 0.163043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fa82c64e6cec3bbc492f1053eaf455e2a28dfe06
| 313
|
py
|
Python
|
PythonExercicios/ex013.py
|
lordvinick/Python
|
c03fd08d4c204104bf0196b0bd129427fd2067ae
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex013.py
|
lordvinick/Python
|
c03fd08d4c204104bf0196b0bd129427fd2067ae
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex013.py
|
lordvinick/Python
|
c03fd08d4c204104bf0196b0bd129427fd2067ae
|
[
"MIT"
] | null | null | null |
print('\033[31m=\033[m'*12, '\033[33mReajuste Salarial\033[m', '\033[31m=\033[m'*12)
sal = float(input('\033[35mQual é o salario do Funcionário? R$' ))
p = sal * 15/100
print('\033[4;32mUm funcionário que ganhava R$\033[34m{},\033[4;32m com 15% de aumento, passa a receber R$\033[34m{:.2f}.'.format(sal,(sal+p)))
| 62.6
| 143
| 0.664537
| 59
| 313
| 3.525424
| 0.576271
| 0.057692
| 0.086538
| 0.096154
| 0.115385
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224199
| 0.102236
| 313
| 4
| 144
| 78.25
| 0.516014
| 0
| 0
| 0
| 0
| 0.25
| 0.696486
| 0.073482
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.25
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 6
|
fa894dde0e21bf6bf0bea7e6d0345e962be69919
| 2,178
|
py
|
Python
|
pirates/leveleditor/worldData/pvp_shipBattleWorld1.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/leveleditor/worldData/pvp_shipBattleWorld1.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/leveleditor/worldData/pvp_shipBattleWorld1.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.pvp_shipBattleWorld1
from pandac.PandaModules import Point3, VBase3
objectStruct = {'Objects': {'1171688064.0jubutler': {'Type': 'Region', 'Name': 'default', 'Objects': {'1171689088.0jubutler': {'Type': 'Player Spawn Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Index': -1, 'Pos': Point3(-500.0, -500.0, 0.0), 'Priority': '1', 'Scale': VBase3(1.0, 1.0, 1.0), 'SpawnDelay': '20', 'Spawnables': 'Team 2', 'Visual': {'Color': (0.8, 0.2, 0.65, 1), 'Model': 'models/misc/smiley'}, 'startingDepth': '12'}, '1171689216.0jubutler': {'Type': 'Player Spawn Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Index': -1, 'Pos': Point3(-500.0, 500.0, 0.0), 'Priority': '1', 'Scale': VBase3(1.0, 1.0, 1.0), 'SpawnDelay': '20', 'Spawnables': 'Team 2', 'Visual': {'Color': (0.8, 0.2, 0.65, 1), 'Model': 'models/misc/smiley'}, 'startingDepth': '12'}, '1171689216.0jubutler0': {'Type': 'Player Spawn Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Index': -1, 'Pos': Point3(500.0, 500.0, 0.0), 'Priority': '1', 'Scale': VBase3(1.0, 1.0, 1.0), 'SpawnDelay': '20', 'Spawnables': 'Team 1', 'Visual': {'Color': (0.8, 0.2, 0.65, 1), 'Model': 'models/misc/smiley'}, 'startingDepth': '12'}, '1171689216.0jubutler1': {'Type': 'Player Spawn Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Index': -1, 'Pos': Point3(500.0, -500.0, 0.0), 'Priority': '1', 'Scale': VBase3(1.0, 1.0, 1.0), 'SpawnDelay': '20', 'Spawnables': 'Team 1', 'Visual': {'Color': (0.8, 0.2, 0.65, 1), 'Model': 'models/misc/smiley'}, 'startingDepth': '12'}}, 'Visual': {}}}, 'Layers': {}, 'ObjectIds': {'1171688064.0jubutler': '["Objects"]["1171688064.0jubutler"]', '1171689088.0jubutler': '["Objects"]["1171688064.0jubutler"]["Objects"]["1171689088.0jubutler"]', '1171689216.0jubutler': '["Objects"]["1171688064.0jubutler"]["Objects"]["1171689216.0jubutler"]', '1171689216.0jubutler0': '["Objects"]["1171688064.0jubutler"]["Objects"]["1171689216.0jubutler0"]', '1171689216.0jubutler1': '["Objects"]["1171688064.0jubutler"]["Objects"]["1171689216.0jubutler1"]'}}
| 363
| 1,896
| 0.634527
| 311
| 2,178
| 4.440514
| 0.244373
| 0.04055
| 0.043447
| 0.034757
| 0.648805
| 0.518465
| 0.518465
| 0.518465
| 0.518465
| 0.518465
| 0
| 0.213961
| 0.092287
| 2,178
| 6
| 1,896
| 363
| 0.484573
| 0.103765
| 0
| 0
| 0
| 0
| 0.548768
| 0.205852
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
faaa3d5210f0b01accc72893698ea0d5f877a5a6
| 204
|
py
|
Python
|
ssl_fastai2/imports.py
|
Samjoel3101/Self-Supervised-Learning-fastai2
|
08c6262ecd9497658c1143b67bc9ce432e7a0c20
|
[
"Apache-2.0"
] | null | null | null |
ssl_fastai2/imports.py
|
Samjoel3101/Self-Supervised-Learning-fastai2
|
08c6262ecd9497658c1143b67bc9ce432e7a0c20
|
[
"Apache-2.0"
] | 1
|
2021-09-28T05:35:25.000Z
|
2021-09-28T05:35:25.000Z
|
ssl_fastai2/imports.py
|
Samjoel3101/Self-Supervised-Learning-fastai2
|
08c6262ecd9497658c1143b67bc9ce432e7a0c20
|
[
"Apache-2.0"
] | null | null | null |
import pdb
from fastai2 import *
from fastai2.vision.all import *
from fastai2.basics import *
from fastai2.vision.models.unet import _get_sz_change_idxs
from fastai2.callback.hook import hook_outputs
| 29.142857
| 58
| 0.823529
| 31
| 204
| 5.258065
| 0.516129
| 0.337423
| 0.312883
| 0.282209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027933
| 0.122549
| 204
| 6
| 59
| 34
| 0.882682
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
fadaedf63cf6c7121d0fae02e860f002f4c01715
| 6,038
|
py
|
Python
|
tests/unit/api/endpoints/test_flow.py
|
jina-ai/jinad
|
4fb874b145357668ecd84cb015c9db1939ed013c
|
[
"Apache-2.0"
] | 3
|
2020-10-29T10:11:42.000Z
|
2022-03-15T02:32:43.000Z
|
tests/unit/api/endpoints/test_flow.py
|
jina-ai/jinad
|
4fb874b145357668ecd84cb015c9db1939ed013c
|
[
"Apache-2.0"
] | 41
|
2020-10-23T13:06:39.000Z
|
2021-01-06T19:55:06.000Z
|
tests/unit/api/endpoints/test_flow.py
|
jina-ai/jinad
|
4fb874b145357668ecd84cb015c9db1939ed013c
|
[
"Apache-2.0"
] | null | null | null |
import uuid
import pytest
from fastapi import UploadFile
from jinad.api.endpoints import flow
_temp_id = uuid.uuid1()
def mock_create_success(**kwargs):
return _temp_id, '0.0.0.0', 12345
def mock_flow_creation_exception(**kwargs):
raise flow.FlowCreationException
def mock_flow_parse_exception(**kwargs):
raise flow.FlowYamlParseException
def mock_flow_start_exception(**kwargs):
raise flow.FlowStartException
def mock_fetch_success(**kwargs):
return '0.0.0.0', 12345, '!Flow\npods:\n pod1:\n uses:_pass'
def mock_fetch_exception(**kwargs):
raise KeyError
@pytest.mark.asyncio
async def test_create_from_pods_success(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_create_success)
response = await flow._create_from_pods()
assert response['status_code'] == 200
assert response['flow_id'] == _temp_id
assert response['host'] == '0.0.0.0'
assert response['port'] == 12345
assert response['status'] == 'started'
@pytest.mark.asyncio
async def test_create_from_pods_flow_create_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_flow_creation_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._create_from_pods()
assert response.value.status_code == 404
assert response.value.detail == 'Bad pods args'
@pytest.mark.asyncio
async def test_create_from_pods_flow_start_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_flow_start_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._create_from_pods()
assert response.value.status_code == 404
assert response.value.detail == 'Flow couldn\'t get started'
@pytest.mark.asyncio
async def test_create_from_yaml_success(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_create_success)
response = await flow._create_from_yaml(yamlspec=UploadFile(filename='abc.yaml'),
uses_files=[UploadFile(filename='abcd.yaml')],
pymodules_files=[UploadFile(filename='abc.py')])
assert response['status_code'] == 200
assert response['flow_id'] == _temp_id
assert response['host'] == '0.0.0.0'
assert response['port'] == 12345
assert response['status'] == 'started'
@pytest.mark.asyncio
async def test_create_from_yaml_parse_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_flow_parse_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._create_from_yaml(yamlspec=UploadFile(filename='abc.yaml'),
uses_files=[UploadFile(filename='abcd.yaml')],
pymodules_files=[UploadFile(filename='abc.py')])
assert response.value.status_code == 404
assert response.value.detail == 'Invalid yaml file.'
@pytest.mark.asyncio
async def test_create_from_yaml_flow_start_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_flow_start_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._create_from_yaml(yamlspec=UploadFile(filename='abc.yaml'),
uses_files=[UploadFile(filename='abcd.yaml')],
pymodules_files=[UploadFile(filename='abc.py')])
assert response.value.status_code == 404
assert 'Flow couldn\'t get started' in response.value.detail
@pytest.mark.asyncio
async def test_fetch_flow_success(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_get', mock_fetch_success)
response = await flow._fetch(_temp_id)
assert response['status_code'] == 200
assert response['host'] == '0.0.0.0'
assert response['port'] == 12345
assert response['yaml'] == '!Flow\npods:\n pod1:\n uses:_pass'
@pytest.mark.asyncio
async def test_fetch_flow_success_yaml_only(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_get', mock_fetch_success)
response = await flow._fetch(_temp_id, yaml_only=True)
assert response.status_code == 200
assert response.body == b'!Flow\npods:\n pod1:\n uses:_pass'
assert response.media_type == 'application/yaml'
@pytest.mark.asyncio
async def test_fetch_flow_keyerror(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_get', mock_fetch_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._fetch(_temp_id)
assert response.value.status_code == 404
assert response.value.detail == f'Flow ID {_temp_id} not found! Please create a new Flow'
def mock_ping_exception(**kwargs):
raise flow.GRPCServerError
@pytest.mark.asyncio
@pytest.mark.skip('unblocking jinad tests. will fix in next PR')
async def test_ping_success(monkeypatch, mocker):
response = await flow._ping(host='0.0.0.0', port=12345)
assert response['status_code'] == 200
assert response['detail'] == 'connected'
@pytest.mark.asyncio
@pytest.mark.skip('unblocking jinad tests. will fix in next PR')
async def test_ping_exception(monkeypatch):
monkeypatch.setattr(flow, 'py_client', mock_ping_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._ping(host='0.0.0.0', port=12345)
assert response.value.status_code == 404
assert response.value.detail == 'Cannot connect to GRPC Server on 0.0.0.0:12345'
@pytest.mark.asyncio
async def test_delete_success(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_delete', lambda **kwargs: None)
response = await flow._delete(_temp_id)
assert response['status_code'] == 200
@pytest.mark.asyncio
async def test_delete_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_delete', mock_fetch_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._delete(_temp_id)
assert response.value.status_code == 404
assert response.value.detail == f'Flow ID {_temp_id} not found! Please create a new Flow'
| 37.271605
| 93
| 0.71845
| 786
| 6,038
| 5.278626
| 0.139949
| 0.111352
| 0.011569
| 0.095445
| 0.835623
| 0.810557
| 0.810557
| 0.727404
| 0.700892
| 0.647626
| 0
| 0.022936
| 0.169593
| 6,038
| 161
| 94
| 37.503106
| 0.804547
| 0
| 0
| 0.521008
| 0
| 0
| 0.126532
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.058824
| false
| 0.02521
| 0.033613
| 0.016807
| 0.109244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
87ad983d10c89694c263f429c1720653f32112f4
| 233
|
py
|
Python
|
src/customers/admin.py
|
hygull/try-django-tenant-schemas
|
56b3e1dfa940542c45b5f72d5b2ff844389e7d00
|
[
"MIT"
] | 1
|
2020-03-05T14:20:36.000Z
|
2020-03-05T14:20:36.000Z
|
src/customers/admin.py
|
hygull/try-django-tenant-schemas
|
56b3e1dfa940542c45b5f72d5b2ff844389e7d00
|
[
"MIT"
] | null | null | null |
src/customers/admin.py
|
hygull/try-django-tenant-schemas
|
56b3e1dfa940542c45b5f72d5b2ff844389e7d00
|
[
"MIT"
] | 1
|
2021-01-29T14:33:28.000Z
|
2021-01-29T14:33:28.000Z
|
from django.contrib import admin
from .models import Client
class ClientAdmin(admin.ModelAdmin):
readonly_fields = ("schema_name", "domain_url",)
# admin.site.register(Client, ClientAdmin)
admin.site.register(Client, ClientAdmin)
| 25.888889
| 49
| 0.793991
| 29
| 233
| 6.275862
| 0.62069
| 0.175824
| 0.186813
| 0.252747
| 0.373626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094421
| 233
| 8
| 50
| 29.125
| 0.862559
| 0.171674
| 0
| 0
| 0
| 0
| 0.109948
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
87e3e8b8563136ec4883f8d2e03e21f05a0530c4
| 46
|
py
|
Python
|
project/commons/services/__init__.py
|
hiraqdev/base-django
|
4df57f356905274b26af57af8328f015d6c680a4
|
[
"MIT"
] | 1
|
2018-03-19T05:21:53.000Z
|
2018-03-19T05:21:53.000Z
|
project/commons/services/__init__.py
|
hiraq/base-django
|
4df57f356905274b26af57af8328f015d6c680a4
|
[
"MIT"
] | 6
|
2020-06-05T20:17:33.000Z
|
2022-03-11T23:45:44.000Z
|
project/commons/services/__init__.py
|
hiraq/base-django
|
4df57f356905274b26af57af8328f015d6c680a4
|
[
"MIT"
] | null | null | null |
from commons.services.base import BaseService
| 23
| 45
| 0.869565
| 6
| 46
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
35c5c7f6237cd86c0fad896bb448d31ed67440e1
| 193
|
py
|
Python
|
spt_compute/__init__.py
|
mjshaw1/spt_compute_ensco_wo_temporal_forecast_lead_flexibility_and_dams
|
7ea364d0588a91f5b49457b0face9e8c3c265c23
|
[
"BSD-3-Clause"
] | null | null | null |
spt_compute/__init__.py
|
mjshaw1/spt_compute_ensco_wo_temporal_forecast_lead_flexibility_and_dams
|
7ea364d0588a91f5b49457b0face9e8c3c265c23
|
[
"BSD-3-Clause"
] | null | null | null |
spt_compute/__init__.py
|
mjshaw1/spt_compute_ensco_wo_temporal_forecast_lead_flexibility_and_dams
|
7ea364d0588a91f5b49457b0face9e8c3c265c23
|
[
"BSD-3-Clause"
] | null | null | null |
from .ecmwf_forecast_process import run_ecmwf_forecast_process
from .hpc.spt_hpc_watershed_groups_process import spt_hpc_watershed_groups_process
from .process_lock import reset_lock_info_file
| 48.25
| 82
| 0.917098
| 30
| 193
| 5.333333
| 0.466667
| 0.1625
| 0.25
| 0.2625
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062176
| 193
| 3
| 83
| 64.333333
| 0.883978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ea5b992df152aa0b2cbad9ff8fa1e9f2d51253a3
| 2,742
|
py
|
Python
|
Foundations_of_Programming/5_can_balance/canBalance.py
|
alvinctk/google-tech-dev-guide
|
9d7759bea1f44673c2de4f25a94b27368928a59f
|
[
"Apache-2.0"
] | 26
|
2019-06-07T05:29:47.000Z
|
2022-03-19T15:32:27.000Z
|
Foundations_of_Programming/5_can_balance/canBalance.py
|
alvinctk/google-tech-dev-guide
|
9d7759bea1f44673c2de4f25a94b27368928a59f
|
[
"Apache-2.0"
] | null | null | null |
Foundations_of_Programming/5_can_balance/canBalance.py
|
alvinctk/google-tech-dev-guide
|
9d7759bea1f44673c2de4f25a94b27368928a59f
|
[
"Apache-2.0"
] | 6
|
2019-10-10T06:39:28.000Z
|
2020-05-12T19:50:55.000Z
|
"""
Problem:
Given a non-empty array, return true if there is a place to split the array so
that the sum of the numbers on one side is equal to the
sum of the numbers on the other side.
canBalance([1, 1, 1, 2, 1]) → true
canBalance([2, 1, 1, 2, 1]) → false
canBalance([10, 10]) → true
"""
def canBalance(arr):
"""
Determine if a list of numbers is balance.
Parameter:
arr := list of numbers
Return:
True if a split position can be found in the arr such that
both halves sum of numbers are equal.
False otherwise.
Assuming numbers can be integers or float
"""
show_result = lambda b: print("canBalance({}) = {}".format(arr, b))
# Empty list or None cannot be split
if arr is None or len(arr) == 0:
show_result(False)
return False
total = sum(arr)
half = 0
# Compute if there is a balance half of sum equal to other half.
for x in arr:
if half == total/2:
break
half += x
else:
# Loop complete successfully without break
# Therefore, there isn't any split in the array such that the sum of
# the numbers on one side is equal to the sum of numbers on the other
# side.
show_result(False)
return False
show_result(True)
return True
def canBalance2(arr):
"""
Determine if a list of numbers is balance.
Parameter:
arr := list of numbers
Return:
True if a split position can be found in the arr such that
both halves sum of numbers are equal.
False otherwise.
Assuming numbers can be only integers
"""
show_result = lambda b: print("canBalance2({}) = {}".format(arr, b))
# Empty list or None cannot be split
if arr is None or len(arr) == 0:
show_result(False)
return False
total = sum(arr)
# Since numbers are only integers, there will be no balance for
# odd numbers.
if total % 2 != 0:
show_result(False)
return False
half = 0
# Compute if there is a balance half of sum equal to other half.
for x in arr:
if half == total/2:
break
half += x
else:
# Loop complete successfully without break
# Therefore, there isn't any split in the array such that the sum of
# the numbers on one side is equal to the sum of numbers on the other
# side.
show_result(False)
return False
show_result(True)
return True
if __name__ == "__main__":
canBalance([1, 1, 1, 2, 1])
canBalance([2, 1, 1, 2, 1])
canBalance([10, 10])
print()
canBalance2([1, 1, 1, 2, 1])
canBalance2([2, 1, 1, 2, 1])
canBalance2([10, 10])
| 24.927273
| 78
| 0.601021
| 415
| 2,742
| 3.937349
| 0.20241
| 0.011016
| 0.029376
| 0.014688
| 0.832925
| 0.771726
| 0.711138
| 0.711138
| 0.711138
| 0.711138
| 0
| 0.029365
| 0.316922
| 2,742
| 109
| 79
| 25.155963
| 0.841431
| 0.522976
| 0
| 0.697674
| 0
| 0
| 0.039134
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0
| 0
| 0.209302
| 0.069767
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ea919aae0fb8ff052b06b87db970bb9271b87da5
| 22,799
|
py
|
Python
|
pyGPGO/covfunc.py
|
dataronio/pyGPGO
|
c628eec39d57d25929e6961b986378a3a35ffbd7
|
[
"MIT"
] | 172
|
2017-02-13T17:17:52.000Z
|
2019-12-11T03:13:28.000Z
|
pyGPGO/covfunc.py
|
ynkay/pyGPGO
|
97da7a5a27f60dfa21dd3349b02cb8e5ab042efa
|
[
"MIT"
] | 23
|
2017-02-13T17:04:02.000Z
|
2019-10-25T18:38:47.000Z
|
pyGPGO/covfunc.py
|
ynkay/pyGPGO
|
97da7a5a27f60dfa21dd3349b02cb8e5ab042efa
|
[
"MIT"
] | 43
|
2017-04-26T15:46:33.000Z
|
2019-12-05T13:02:57.000Z
|
import numpy as np
from scipy.special import gamma, kv
from scipy.spatial.distance import cdist
default_bounds = {
'l': [1e-4, 1],
'sigmaf': [1e-4, 2],
'sigman': [1e-6, 2],
'v': [1e-3, 10],
'gamma': [1e-3, 1.99],
'alpha': [1e-3, 1e4],
'period': [1e-3, 10]
}
def l2norm_(X, Xstar):
"""
Wrapper function to compute the L2 norm
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances.
Xstar: np.ndarray, shape=((m, nfeatures))
Instances
Returns
-------
np.ndarray
Pairwise euclidian distance between row pairs of `X` and `Xstar`.
"""
return cdist(X, Xstar)
def kronDelta(X, Xstar):
"""
Computes Kronecker delta for rows in X and Xstar.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances.
Xstar: np.ndarray, shape((m, nfeatures))
Instances.
Returns
-------
np.ndarray
Kronecker delta between row pairs of `X` and `Xstar`.
"""
return cdist(X, Xstar) < np.finfo(np.float32).eps
class squaredExponential:
def __init__(self, l=1, sigmaf=1.0, sigman=1e-6, bounds=None, parameters=['l', 'sigmaf',
'sigman']):
"""
Squared exponential kernel class.
Parameters
----------
l: float
Characteristic length-scale. Units in input space in which posterior GP values do not
change significantly.
sigmaf: float
Signal variance. Controls the overall scale of the covariance function.
sigman: float
Noise variance. Additive noise in output space.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings specifying which hyperparameters should be optimized.
"""
self.l = l
self.sigmaf = sigmaf
self.sigman = sigman
self.parameters = parameters
if bounds is not None:
self.bounds = bounds
else:
self.bounds = []
for param in self.parameters:
self.bounds.append(default_bounds[param])
def K(self, X, Xstar):
"""
Computes covariance function values over `X` and `Xstar`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
Returns
-------
np.ndarray
Computed covariance matrix.
"""
r = l2norm_(X, Xstar)
return self.sigmaf * np.exp(-.5 * r ** 2 / self.l ** 2) + self.sigman * kronDelta(X, Xstar)
def gradK(self, X, Xstar, param='l'):
"""
Computes gradient matrix for instances `X`, `Xstar` and hyperparameter `param`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
param: str
Parameter to compute gradient matrix for.
Returns
-------
np.ndarray
Gradient matrix for parameter `param`.
"""
if param == 'l':
r = l2norm_(X, Xstar)
num = r ** 2 * self.sigmaf * np.exp(-r ** 2 / (2 * self.l ** 2))
den = self.l ** 3
l_grad = num / den
return (l_grad)
elif param == 'sigmaf':
r = l2norm_(X, Xstar)
sigmaf_grad = (np.exp(-.5 * r ** 2 / self.l ** 2))
return (sigmaf_grad)
elif param == 'sigman':
sigman_grad = kronDelta(X, Xstar)
return (sigman_grad)
else:
raise ValueError('Param not found')
class matern:
def __init__(self, v=1, l=1, sigmaf=1, sigman=1e-6, bounds=None, parameters=['v',
'l',
'sigmaf',
'sigman']):
"""
Matern kernel class.
Parameters
----------
v: float
Scale-mixture hyperparameter of the Matern covariance function.
l: float
Characteristic length-scale. Units in input space in which posterior GP values do not
change significantly.
sigmaf: float
Signal variance. Controls the overall scale of the covariance function.
sigman: float
Noise variance. Additive noise in output space.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings specifying which hyperparameters should be optimized.
"""
self.v, self.l = v, l
self.sigmaf = sigmaf
self.sigman = sigman
self.parameters = parameters
if bounds is not None:
self.bounds = bounds
else:
self.bounds = []
for param in self.parameters:
self.bounds.append(default_bounds[param])
def K(self, X, Xstar):
"""
Computes covariance function values over `X` and `Xstar`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
Returns
-------
np.ndarray
Computed covariance matrix.
"""
r = l2norm_(X, Xstar)
bessel = kv(self.v, np.sqrt(2 * self.v) * r / self.l)
f = 2 ** (1 - self.v) / gamma(self.v) * (np.sqrt(2 * self.v) * r / self.l) ** self.v
res = f * bessel
res[np.isnan(res)] = 1
res = self.sigmaf * res + self.sigman * kronDelta(X, Xstar)
return (res)
class matern32:
def __init__(self, l=1, sigmaf=1, sigman=1e-6, bounds=None, parameters=['l', 'sigmaf', 'sigman']):
"""
Matern v=3/2 kernel class.
Parameters
----------
l: float
Characteristic length-scale. Units in input space in which posterior GP values do not
change significantly.
sigmaf: float
Signal variance. Controls the overall scale of the covariance function.
sigman: float
Noise variance. Additive noise in output space.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings specifying which hyperparameters should be optimized.
"""
self.l = l
self.sigmaf = sigmaf
self.sigman = sigman
self.parameters = parameters
if bounds is not None:
self.bounds = bounds
else:
self.bounds = []
for param in self.parameters:
self.bounds.append(default_bounds[param])
def K(self, X, Xstar):
"""
Computes covariance function values over `X` and `Xstar`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
Returns
-------
np.ndarray
Computed covariance matrix.
"""
r = l2norm_(X, Xstar)
one = (1 + np.sqrt(3 * (r / self.l) ** 2))
two = np.exp(- np.sqrt(3 * (r / self.l) ** 2))
return self.sigmaf * one * two + self.sigman * kronDelta(X, Xstar)
def gradK(self, X, Xstar, param):
"""
Computes gradient matrix for instances `X`, `Xstar` and hyperparameter `param`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
param: str
Parameter to compute gradient matrix for.
Returns
-------
np.ndarray
Gradient matrix for parameter `param`.
"""
if param == 'l':
r = l2norm_(X, Xstar)
num = 3 * (r ** 2) * self.sigmaf * np.exp(-np.sqrt(3) * r / self.l)
return num / (self.l ** 3)
elif param == 'sigmaf':
r = l2norm_(X, Xstar)
one = (1 + np.sqrt(3 * (r / self.l) ** 2))
two = np.exp(- np.sqrt(3 * (r / self.l) ** 2))
return one * two
elif param == 'sigman':
return kronDelta(X, Xstar)
else:
raise ValueError('Param not found')
class matern52:
def __init__(self, l=1, sigmaf=1, sigman=1e-6, bounds=None, parameters=['l', 'sigmaf', 'sigman']):
"""
Matern v=5/2 kernel class.
Parameters
----------
l: float
Characteristic length-scale. Units in input space in which posterior GP values do not
change significantly.
sigmaf: float
Signal variance. Controls the overall scale of the covariance function.
sigman: float
Noise variance. Additive noise in output space.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings specifying which hyperparameters should be optimized.
"""
self.l = l
self.sigmaf = sigmaf
self.sigman = sigman
self.parameters = parameters
if bounds is not None:
self.bounds = bounds
else:
self.bounds = []
for param in self.parameters:
self.bounds.append(default_bounds[param])
def K(self, X, Xstar):
"""
Computes covariance function values over `X` and `Xstar`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
Returns
-------
np.ndarray
Computed covariance matrix.
"""
r = l2norm_(X, Xstar)/self.l
one = (1 + np.sqrt(5 * r ** 2) + 5 * r ** 2 / 3)
two = np.exp(-np.sqrt(5 * r ** 2))
return self.sigmaf * one * two + self.sigman * kronDelta(X, Xstar)
def gradK(self, X, Xstar, param):
"""
Computes gradient matrix for instances `X`, `Xstar` and hyperparameter `param`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
param: str
Parameter to compute gradient matrix for.
Returns
-------
np.ndarray
Gradient matrix for parameter `param`.
"""
r = l2norm_(X, Xstar)
if param == 'l':
num_one = 5 * r ** 2 * np.exp(-np.sqrt(5) * r / self.l)
num_two = np.sqrt(5) * r / self.l + 1
res = num_one * num_two / (3 * self.l ** 3)
return res
elif param == 'sigmaf':
one = (1 + np.sqrt(5 * (r / self.l) ** 2) + 5 * (r / self.l) ** 2 / 3)
two = np.exp(-np.sqrt(5 * r ** 2))
return one * two
elif param == 'sigman':
return kronDelta(X, Xstar)
class gammaExponential:
def __init__(self, gamma=1, l=1, sigmaf=1, sigman=1e-6, bounds=None, parameters=['gamma',
'l',
'sigmaf',
'sigman']):
"""
Gamma-exponential kernel class.
Parameters
----------
gamma: float
Hyperparameter of the Gamma-exponential covariance function.
l: float
Characteristic length-scale. Units in input space in which posterior GP values do not
change significantly.
sigmaf: float
Signal variance. Controls the overall scale of the covariance function.
sigman: float
Noise variance. Additive noise in output space.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings specifying which hyperparameters should be optimized.
"""
self.gamma = gamma
self.l = l
self.sigmaf = sigmaf
self.sigman = sigman
self.parameters = parameters
if bounds is not None:
self.bounds = bounds
else:
self.bounds = []
for param in self.parameters:
self.bounds.append(default_bounds[param])
def K(self, X, Xstar):
"""
Computes covariance function values over `X` and `Xstar`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
Returns
-------
np.ndarray
Computed covariance matrix.
"""
r = l2norm_(X, Xstar)
return self.sigmaf * (np.exp(-(r / self.l) ** self.gamma)) + \
self.sigman * kronDelta(X, Xstar)
def gradK(self, X, Xstar, param):
"""
Computes gradient matrix for instances `X`, `Xstar` and hyperparameter `param`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
param: str
Parameter to compute gradient matrix for.
Returns
-------
np.ndarray
Gradient matrix for parameter `param`.
"""
if param == 'gamma':
eps = 10e-6
r = l2norm_(X, Xstar) + eps
first = -np.exp(- (r / self.l) ** self.gamma)
sec = (r / self.l) ** self.gamma * np.log(r / self.l)
gamma_grad = first * sec
return (gamma_grad)
elif param == 'l':
r = l2norm_(X, Xstar)
num = self.gamma * np.exp(-(r / self.l) ** self.gamma) * (r / self.l) ** self.gamma
l_grad = num / self.l
return (l_grad)
elif param == 'sigmaf':
r = l2norm_(X, Xstar)
sigmaf_grad = (np.exp(-(r / self.l) ** self.gamma))
return (sigmaf_grad)
elif param == 'sigman':
sigman_grad = kronDelta(X, Xstar)
return (sigman_grad)
else:
raise ValueError('Param not found')
class rationalQuadratic:
def __init__(self, alpha=1, l=1, sigmaf=1, sigman=1e-6, bounds=None, parameters=['alpha',
'l',
'sigmaf',
'sigman']):
"""
Rational-quadratic kernel class.
Parameters
----------
alpha: float
Hyperparameter of the rational-quadratic covariance function.
l: float
Characteristic length-scale. Units in input space in which posterior GP values do not
change significantly.
sigmaf: float
Signal variance. Controls the overall scale of the covariance function.
sigman: float
Noise variance. Additive noise in output space.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings specifying which hyperparameters should be optimized.
"""
self.alpha = alpha
self.l = l
self.sigmaf = sigmaf
self.sigman = sigman
self.parameters = parameters
if bounds is not None:
self.bounds = bounds
else:
self.bounds = []
for param in self.parameters:
self.bounds.append(default_bounds[param])
def K(self, X, Xstar):
"""
Computes covariance function values over `X` and `Xstar`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
Returns
-------
np.ndarray
Computed covariance matrix.
"""
r = l2norm_(X, Xstar)
return self.sigmaf * ((1 + r ** 2 / (2 * self.alpha * self.l ** 2)) ** (-self.alpha)) \
+ self.sigman * kronDelta(X, Xstar)
def gradK(self, X, Xstar, param):
"""
Computes gradient matrix for instances `X`, `Xstar` and hyperparameter `param`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
param: str
Parameter to compute gradient matrix for.
Returns
-------
np.ndarray
Gradient matrix for parameter `param`.
"""
if param == 'alpha':
r = l2norm_(X, Xstar)
one = (r ** 2 / (2 * self.alpha * self.l ** 2) + 1) ** (-self.alpha)
two = r ** 2 / ((2 * self.alpha * self.l ** 2) * (r ** 2 / (2 * self.alpha * self.l ** 2) + 1))
three = np.log(r ** 2 / (2 * self.alpha * self.l ** 2) + 1)
alpha_grad = one * (two - three)
return (alpha_grad)
elif param == 'l':
r = l2norm_(X, Xstar)
num = r ** 2 * (r ** 2 / (2 * self.alpha * self.l ** 2) + 1) ** (-self.alpha - 1)
l_grad = num / self.l ** 3
return (l_grad)
elif param == 'sigmaf':
r = l2norm_(X, Xstar)
sigmaf_grad = (1 + r ** 2 / (2 * self.alpha * self.l ** 2)) ** (-self.alpha)
return (sigmaf_grad)
elif param == 'sigman':
sigman_grad = kronDelta(X, Xstar)
return (sigman_grad)
else:
raise ValueError('Param not found')
class expSine:
"""
Exponential sine kernel class.
Parameters
----------
l: float
Characteristic length-scale. Units in input space in which posterior GP values do not
change significantly. l: float
period: float
Period hyperparameter.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings specifying which hyperparameters should be optimized.
"""
def __init__(self, l=1.0, period=1.0, bounds=None, parameters=['l', 'period']):
self.period = period
self.l = l
self.parameters = parameters
if bounds is not None:
self.bounds = bounds
else:
self.bounds = []
for param in self.parameters:
self.bounds.append(default_bounds[param])
def K(self, X, Xstar):
"""
Computes covariance function values over `X` and `Xstar`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
Returns
-------
np.ndarray
Computed covariance matrix.
"""
r = l2norm_(X, Xstar)
num = - 2 * np.sin(np.pi * r / self.period)
return np.exp(num / self.l) ** 2 + 1e-4
def gradK(self, X, Xstar, param):
if param == 'l':
r = l2norm_(X, Xstar)
one = 4 * np.sin(np.pi * r / self.period)
two = np.exp(-4 * np.sin(np.pi * r / self.period) / self.l)
return one * two / (self.l ** 2)
elif param == 'period':
r = l2norm_(X, Xstar)
one = 4 * np.pi * r * np.cos(np.pi * r / self.period)
two = np.exp(-4 * np.sin(np.pi * r / self.period) / self.l)
return one * two / (self.l * self.period ** 2)
class dotProd:
"""
Dot-product kernel class.
Parameters
----------
sigmaf: float
Signal variance. Controls the overall scale of the covariance function.
sigman: float
Noise variance. Additive noise in output space.
bounds: list
List of tuples specifying hyperparameter range in optimization procedure.
parameters: list
List of strings specifying which hyperparameters should be optimized.
"""
def __init__(self, sigmaf=1.0, sigman=1e-6, bounds=None, parameters=['sigmaf', 'sigman']):
self.sigmaf = sigmaf
self.sigman = sigman
self.parameters = parameters
if bounds is not None:
self.bounds = bounds
else:
self.bounds = []
for param in self.parameters:
self.bounds.append(default_bounds[param])
def K(self, X, Xstar):
"""
Computes covariance function values over `X` and `Xstar`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
Returns
-------
np.ndarray
Computed covariance matrix.
"""
return self.sigmaf * np.dot(X, Xstar.T) + self.sigman * kronDelta(X, Xstar)
def gradK(self, X, Xstar, param):
"""
Computes gradient matrix for instances `X`, `Xstar` and hyperparameter `param`.
Parameters
----------
X: np.ndarray, shape=((n, nfeatures))
Instances
Xstar: np.ndarray, shape=((n, nfeatures))
Instances
param: str
Parameter to compute gradient matrix for.
Returns
-------
np.ndarray
Gradient matrix for parameter `param`.
"""
if param == 'sigmaf':
return np.dot(X, Xstar.T)
elif param == 'sigman':
return self.sigmaf * np.dot(X, Xstar.T)
# DEPRECATED
# class arcSin:
# def __init__(self, n, sigma=None):
# if sigma == None:
# self.sigma = np.eye(n)
# else:
# self.sigma = sigma
#
# def k(self, x, xstar):
# num = 2 * np.dot(np.dot(x[np.newaxis, :], self.sigma), xstar)
# a = 1 + 2 * np.dot(np.dot(x[np.newaxis, :], self.sigma), x)
# b = 1 + 2 * np.dot(np.dot(xstar[np.newaxis, :], self.sigma), xstar)
# res = num / np.sqrt(a * b)
# return (res)
| 32.293201
| 107
| 0.506864
| 2,479
| 22,799
| 4.624445
| 0.068173
| 0.031926
| 0.039079
| 0.039253
| 0.869592
| 0.854763
| 0.84447
| 0.82903
| 0.816731
| 0.801989
| 0
| 0.013727
| 0.376903
| 22,799
| 705
| 108
| 32.339007
| 0.793256
| 0.401597
| 0
| 0.657692
| 0
| 0
| 0.026499
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.011538
| 0
| 0.257692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
578c19413df30be876e843a339372a2a55bf6a15
| 95
|
py
|
Python
|
models/__init__.py
|
young917/HGNN
|
41017f4315f459e1250830ca6c498b920d57e80a
|
[
"MIT"
] | 269
|
2019-05-27T09:10:23.000Z
|
2022-03-29T20:12:42.000Z
|
models/__init__.py
|
young917/HGNN
|
41017f4315f459e1250830ca6c498b920d57e80a
|
[
"MIT"
] | 12
|
2019-05-23T12:10:09.000Z
|
2021-12-09T02:05:47.000Z
|
models/__init__.py
|
young917/HGNN
|
41017f4315f459e1250830ca6c498b920d57e80a
|
[
"MIT"
] | 76
|
2019-05-24T12:40:21.000Z
|
2022-03-29T15:01:17.000Z
|
from .layers import HGNN_conv, HGNN_fc, HGNN_embedding, HGNN_classifier
from .HGNN import HGNN
| 31.666667
| 71
| 0.831579
| 15
| 95
| 5
| 0.533333
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115789
| 95
| 2
| 72
| 47.5
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
57fcd22d709f1799b3fe0b4da80953edad697b34
| 37
|
py
|
Python
|
src/egggist/__init__.py
|
Preocts/egggist
|
2e80a65c8b9d91a96f101418d3f5f0bf47782508
|
[
"MIT"
] | null | null | null |
src/egggist/__init__.py
|
Preocts/egggist
|
2e80a65c8b9d91a96f101418d3f5f0bf47782508
|
[
"MIT"
] | null | null | null |
src/egggist/__init__.py
|
Preocts/egggist
|
2e80a65c8b9d91a96f101418d3f5f0bf47782508
|
[
"MIT"
] | null | null | null |
from .egggist import EggGist # noqa
| 18.5
| 36
| 0.756757
| 5
| 37
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189189
| 37
| 1
| 37
| 37
| 0.933333
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
17abd3030da3e4bd0f94ec4b781ddf94242a7d91
| 1,069
|
py
|
Python
|
art/classifiers/scikitlearn/__init__.py
|
meghana-sesetti/adversarial-robustness-toolbox
|
6a5ce9e4142734ad9004e5c093ef8fa754ea6b39
|
[
"MIT"
] | 1
|
2021-09-09T13:19:34.000Z
|
2021-09-09T13:19:34.000Z
|
art/classifiers/scikitlearn/__init__.py
|
Tikquuss/adversarial-robustness-toolbox
|
62ffe7c951d8a60d49a9ea6ac7b04aa4432a3fb7
|
[
"MIT"
] | 105
|
2020-08-24T06:15:43.000Z
|
2022-03-24T08:03:16.000Z
|
art/classifiers/scikitlearn/__init__.py
|
Tikquuss/adversarial-robustness-toolbox
|
62ffe7c951d8a60d49a9ea6ac7b04aa4432a3fb7
|
[
"MIT"
] | 1
|
2021-09-09T13:19:35.000Z
|
2021-09-09T13:19:35.000Z
|
from art.estimators.classification.scikitlearn import SklearnClassifier
from art.estimators.classification.scikitlearn import ScikitlearnClassifier
from art.estimators.classification.scikitlearn import ScikitlearnDecisionTreeClassifier
from art.estimators.classification.scikitlearn import ScikitlearnDecisionTreeRegressor
from art.estimators.classification.scikitlearn import ScikitlearnExtraTreeClassifier
from art.estimators.classification.scikitlearn import ScikitlearnAdaBoostClassifier
from art.estimators.classification.scikitlearn import ScikitlearnBaggingClassifier
from art.estimators.classification.scikitlearn import ScikitlearnExtraTreesClassifier
from art.estimators.classification.scikitlearn import ScikitlearnGradientBoostingClassifier
from art.estimators.classification.scikitlearn import ScikitlearnRandomForestClassifier
from art.estimators.classification.scikitlearn import ScikitlearnLogisticRegression
from art.estimators.classification.scikitlearn import ScikitlearnSVC
from art.estimators.classification.scikitlearn import ScikitlearnLinearSVC
| 76.357143
| 91
| 0.914874
| 91
| 1,069
| 10.747253
| 0.208791
| 0.093047
| 0.225971
| 0.412065
| 0.638037
| 0.638037
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048644
| 1,069
| 13
| 92
| 82.230769
| 0.961652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
17cf0a283b0b98e4edf0e80f139698ef06b73e69
| 30,531
|
py
|
Python
|
model.py
|
KarolisMart/scalable-gnns
|
6651fc067cf04be494bb1a625d718951236cda78
|
[
"MIT"
] | 2
|
2021-08-24T06:52:20.000Z
|
2021-09-13T17:34:47.000Z
|
model.py
|
tianyuzelin/scalable-gnns
|
6651fc067cf04be494bb1a625d718951236cda78
|
[
"MIT"
] | null | null | null |
model.py
|
tianyuzelin/scalable-gnns
|
6651fc067cf04be494bb1a625d718951236cda78
|
[
"MIT"
] | 1
|
2021-08-24T06:23:36.000Z
|
2021-08-24T06:23:36.000Z
|
import torch
import torch.nn as nn
from collections import deque
# Ensure x and y stay inside the box and follow PBC
def apply_PBC_to_coordinates(coordinates, box_size=6):
# Only apply to coordinate columns
coordinates[:,:,-4:-2][coordinates[:,:,-4:-2] >= box_size/2] -= box_size
coordinates[:,:,-4:-2][coordinates[:,:,-4:-2] < -box_size/2] += box_size
return coordinates
def apply_PBC_to_distances(distances, box_size=6):
# Only apply to postion columns
distances[:,:,-4:-2][distances[:,:,-4:-2] > box_size/2] -= box_size
distances[:,:,-4:-2][distances[:,:,-4:-2] <= -box_size/2] += box_size
return distances
# Custom MSE loss that takes periodic boundry conditions into account
def PBC_MSE_loss(output, target, box_size=6):
# Get difference
error = output - target
# Deal with periodic boundry conditions
error = apply_PBC_to_distances(error, box_size=box_size)
# Get MSE
loss = torch.mean((error)**2)
return loss
class EdgeModel(torch.nn.Module):
def __init__(self, input_dim=64, output_dim=64, softplus=False, box_size=6):
super(EdgeModel, self).__init__()
self.box_size = box_size
if softplus:
self.edge_mlp = nn.Sequential(nn.Linear(input_dim, output_dim), nn.Softplus(), nn.Linear(output_dim, output_dim), nn.Softplus())
else:
self.edge_mlp = nn.Sequential(nn.Linear(input_dim, output_dim), nn.ReLU(), nn.Linear(output_dim, output_dim), nn.ReLU())
def forward(self, V_no_pos, V_pos, R_s, R_r, u=None, different_reciever=None, different_reciever_pos=None):
# Get edge features (sender mass (+charge) and speed, reciever mass and speed, pbc_distance(sender_poss - reciever_pos))
if different_reciever is None or different_reciever_pos is None:
# Edges between levels
E = torch.cat([V_no_pos.gather(1, R_s.expand(R_s.size(0), R_s.size(1), V_no_pos.size(2))), V_no_pos.gather(1, R_r.expand(R_r.size(0), R_r.size(1), V_no_pos.size(2))), (V_pos.gather(1, R_s.expand(R_s.size(0), R_s.size(1), V_pos.size(2))) - V_pos.gather(1, R_r.expand(R_r.size(0), R_r.size(1), V_pos.size(2))))], dim=-1)
else:
# If reciever features are supplied in a different matrix (recievers are different type nodes from senders)
E = torch.cat([V_no_pos.gather(1, R_s.expand(R_s.size(0), R_s.size(1), V_no_pos.size(2))), different_reciever.gather(1, R_r.expand(R_r.size(0), R_r.size(1), different_reciever.size(2))), (V_pos.gather(1, R_s.expand(R_s.size(0), R_s.size(1), V_pos.size(2))) - different_reciever_pos.gather(1, R_r.expand(R_r.size(0), R_r.size(1), different_reciever_pos.size(2))))], dim=-1)
# Deal with periodic boundry conditions
E[:,:,-2:][E[:,:,-2:] > self.box_size/2] -= self.box_size
E[:,:,-2:][E[:,:,-2:] <= -self.box_size/2] += self.box_size
if u is not None:
E = torch.cat([E, u.unsqueeze(2).expand(E.size(0), E.size(1), 1)], dim=-1)
return self.edge_mlp(E)
class NodeModel(torch.nn.Module):
def __init__(self, input_dim=64, output_dim=64, softplus=False):
super(NodeModel, self).__init__()
if softplus:
self.node_mlp = nn.Sequential(nn.Linear(input_dim, output_dim), nn.Softplus(), nn.Linear(output_dim, output_dim), nn.Softplus(), nn.Linear(output_dim, output_dim), nn.Softplus())
else:
self.node_mlp = nn.Sequential(nn.Linear(input_dim, output_dim), nn.ReLU(), nn.Linear(output_dim, output_dim), nn.ReLU(), nn.Linear(output_dim, output_dim), nn.ReLU())
def forward(self, V, E_n, u=None, R_r=None):
if R_r is None:
# Aggregate edges for each reciever node using the knwoledge that subsequent n_edges_per_node blocks of rows belong to the same reciever per R_r construction
out = torch.sum(E_n.view(V.size(0), V.size(1), E_n.size(1) // V.size(1), E_n.size(-1)), dim=2)
else:
# If recievers can have a different number of edges
out = torch.zeros((E_n.size(0), V.size(1), E_n.size(2)), device=E_n.device).scatter_add_(1, R_r.expand(R_r.size(0), R_r.size(1), E_n.size(2)), E_n)
out = torch.cat([V, out], dim=-1)
if u is not None:
# Expand global param u from one per sample in a batch to one per particle
out = torch.cat([out, u.unsqueeze(2).expand(out.size(0), out.size(1), 1)], dim=-1)
return self.node_mlp(out)
class GlobalModel(torch.nn.Module):
def __init__(self, input_dim=64, output_dim=64):
super(GlobalModel, self).__init__()
self.global_mlp = nn.Sequential(nn.Linear(input_dim, output_dim), nn.Softplus(), nn.Linear(output_dim, output_dim), nn.Softplus())
def forward(self, *args, u=None):
out = torch.cat([torch.sum(arg, axis=1) for arg in args], dim=-1)
if u is not None:
# Expand global param u from one per sample in a batch to one per particle
out = torch.cat([out, u.unsqueeze(2).expand(out.size(0), out.size(1), 1)], dim=-1)
return self.global_mlp(out)
class BaseIntegratorModel(torch.nn.Module):
def forward_step(self, mass_charge, V_0, *args):
raise NotImplementedError
def euler(self, dt, mass_charge, V_0, *args):
# Euler method
dt = dt.unsqueeze(2).expand(V_0.size(0), V_0.size(1), 1)
k1 = self.forward_step(mass_charge, V_0, *args)
dy = dt * k1
return apply_PBC_to_coordinates(V_0 + dy, box_size=self.box_size)
def rk4(self, dt, mass_charge, V_0, *args):
# NOTE There is an alternative formulation with a smaller error
# Expand dt from one per sample in a batch to one per particle
dt = dt.unsqueeze(2).expand(V_0.size(0), V_0.size(1), 1)
dt2 = dt / 2.0
k1 = self.forward_step(mass_charge, V_0, *args)
k2 = self.forward_step(mass_charge, apply_PBC_to_coordinates(V_0 + k1 * dt2, box_size=self.box_size), *args)
k3 = self.forward_step(mass_charge, apply_PBC_to_coordinates(V_0 + k2 * dt2, box_size=self.box_size), *args)
k4 = self.forward_step(mass_charge, apply_PBC_to_coordinates(V_0 + k3 * dt, box_size=self.box_size), *args)
dy = dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
return apply_PBC_to_coordinates(V_0 + dy, box_size=self.box_size)
def forward(self, state, R_s, R_r, dt):
raise NotImplementedError
class DeltaGN(torch.nn.Module):
def __init__(self, box_size=6, edge_output_dim=-1, node_output_dim=-1, simulation_type='gravity'):
super(DeltaGN, self).__init__()
if edge_output_dim < 1:
edge_output_dim = 150
if node_output_dim < 1:
node_output_dim = 100
self.simulation_type = simulation_type
if self.simulation_type == 'coulomb':
node_input_dim = 4
# Used to drop position from particles/nodes
self.non_pos_indices = list([0,1,4,5]) # (mass, charge, vx, vy)
else:
node_input_dim = 3
# Used to drop position from particles/nodes
self.non_pos_indices = list([0,3,4]) # (mass, vx, vy)
self.edge_model = EdgeModel(input_dim=2*node_input_dim+2+1, output_dim=edge_output_dim, box_size=box_size) # input dim: sender and reciever nodes + disntace vector + dt
self.node_model = NodeModel(input_dim=node_input_dim+edge_output_dim+1, output_dim=node_output_dim) # input dim: node features + embedded edge features + dt
# Linear layer to transform node embeddings to canonical coordinate change (four features: (x,y,v_x,v_y))
self.linear = nn.Linear(node_output_dim, 4)
# Set box size
self.box_size = box_size
def forward(self, V, R_s, R_r, dt):
R_s = R_s.unsqueeze(2)
R_r = R_r.unsqueeze(2)
# Edge block
E_n = self.edge_model(V[:, :, self.non_pos_indices], V[:, :, -4:-2], R_s, R_r, dt)
# Node block
V_n = self.node_model(V[:, :, self.non_pos_indices], E_n, dt)
new_coordinates = V[:, :, -4:] + self.linear(V_n)
# Deal with periodic boundry conditions
return apply_PBC_to_coordinates(new_coordinates, box_size=self.box_size)
class HOGN(BaseIntegratorModel):
def __init__(self, box_size=6, edge_output_dim=150, node_output_dim=100, global_output_dim=100, integrator='rk4', simulation_type='gravity'):
super(HOGN, self).__init__()
if edge_output_dim < 1:
edge_output_dim = 150
if node_output_dim < 1:
node_output_dim = 100
if global_output_dim < 1:
global_output_dim = 100
self.simulation_type = simulation_type
# Set number of node features, excluding the position (x,y)
if self.simulation_type == 'coulomb':
node_input_dim = 4 # (mass, charge, px, py)
else:
node_input_dim = 3 # (mass, px, py)
self.edge_model = EdgeModel(input_dim=2*node_input_dim+2, output_dim=edge_output_dim, softplus=True, box_size=box_size) # input dim: sender and reciever node features + disntace vector
self.node_model = NodeModel(input_dim=node_input_dim+edge_output_dim, output_dim=node_output_dim, softplus=True) # input dim: input node features + embedded edge features
self.global_model = GlobalModel(input_dim=node_output_dim+edge_output_dim, output_dim=global_output_dim) # input dim: embedded node features and embedded edge features
# Linear layer to transform global embeddings to a Hamiltonian
self.linear = nn.Linear(global_output_dim, 1)
# Set box size
self.box_size = box_size
# Set integrator to use
self.integrator = integrator
# Here vertices V are in canonical coordinates [x,y,px,py]
def forward_step(self, mass_charge, V, R_s, R_r):
# Drop position from particles/nodes and add mass and charge (if present)
V_no_pos = torch.cat([mass_charge, V[:,:,2:]], dim=2)
R_s = R_s.unsqueeze(2)
R_r = R_r.unsqueeze(2)
# Edge block
E_n = self.edge_model(V_no_pos, V[:,:,:2], R_s, R_r)
# Node block
V_n = self.node_model(V_no_pos, E_n)
# Global block
U_n = self.global_model(V_n, E_n)
# Hamiltonian
H = self.linear(U_n)
# Hamiltonian derivatives w.r.t inputs = dH/dq dH/dp
partial_derivatives = torch.autograd.grad(H.sum(), V, create_graph=True)[0] #, only_inputs=True
# Return dq and dp
return torch.cat([partial_derivatives[:,:,2:], partial_derivatives[:,:,:2] * (-1.0)], dim=2) # dq=dH/dp, dp=-dH/dq
def forward(self, state, R_s, R_r, dt):
# Transform inputs [m, x, y, vx, vy] to canonical coordinates [x,y,px,py]
mass_charge = state[:,:,:-4] # if no charge = [m]; with charge = [m, c]
momentum = state[:,:,-2:] * mass_charge[:,:,0].unsqueeze(2)
V = torch.cat([state[:,:,-4:-2], momentum], dim=2)
# Require grad to be able to compute partial derivatives
if not V.requires_grad:
V.requires_grad = True
# Compute updated canonical coordinates
if self.integrator == 'rk4':
new_canonical_coordinates = self.rk4(dt, mass_charge, V, R_s, R_r)
elif self.integrator == 'euler':
new_canonical_coordinates = self.euler(dt, mass_charge, V, R_s, R_r)
else:
raise Exception
# Convert back to original state format [x, y, vx, vy]
velocity = torch.div(new_canonical_coordinates[:,:,2:], mass_charge[:,:,0].unsqueeze(2))
new_state = torch.cat([new_canonical_coordinates[:,:,:2], velocity], dim=2)
return new_state
class HierarchicalDeltaGN(torch.nn.Module):
def __init__(self, box_size=6, edge_output_dim=-1, node_output_dim=-1, simulation_type='gravity'):
super(HierarchicalDeltaGN, self).__init__()
if edge_output_dim < 1:
edge_output_dim = 150
if node_output_dim < 1:
node_output_dim = 100
self.simulation_type = simulation_type
# Set number of node features, excluding the position (x,y)
if self.simulation_type == 'coulomb':
node_input_dim = 4
self.non_pos_indices = list([0,1,4,5]) # (mass, charge, vx, vy)
else:
node_input_dim = 3
self.non_pos_indices = list([0,3,4]) # (mass, vx, vy)
self.edge_to_super_model = EdgeModel(input_dim=2*node_input_dim+2+1, output_dim=node_output_dim, box_size=box_size) # input dim: sender (particle) and reciever (super/cell) nodes + disntace vector + dt
self.edge_to_upper_model = EdgeModel(input_dim=node_output_dim+2*node_input_dim+2+1, output_dim=node_output_dim, box_size=box_size) # input dim: sender and reciever (super) nodes (base node_input_features + features from vertex node embedding) + disntace vector + dt
self.super_edge_model = EdgeModel(input_dim=2*(node_input_dim+node_output_dim)+2+1, output_dim=edge_output_dim, box_size=box_size) # input dim: sender and reciever nodes + disntace vector + dt
self.super_node_model = NodeModel(input_dim=node_input_dim+node_output_dim+edge_output_dim+1, output_dim=node_output_dim) # input dim: input node features + updated features + embedded super edge features + dt
self.edge_from_super_model = EdgeModel(input_dim=2*node_input_dim+node_output_dim+2+1, output_dim=edge_output_dim, box_size=box_size) # input dim: sender (super) and reciever (particle) nodes + disntace vector + dt
self.edge_from_upper_model = EdgeModel(input_dim=2*(node_input_dim+node_output_dim)+2+1, output_dim=edge_output_dim, box_size=box_size) # input dim: sender (super) and reciever (cell) nodes + disntace vector + dt
self.edge_model = EdgeModel(input_dim=2*node_input_dim+2+1, output_dim=edge_output_dim, box_size=box_size) # input dim: sender and reciever nodes + disntace vector + dt
self.node_model = NodeModel(input_dim=node_input_dim+edge_output_dim+1, output_dim=node_output_dim) # input dim: node features + embedded edge features + dt
# Linear layer to transform node embeddings to canonical coordinate change (four features: (x,y,v_x,v_y))
self.linear = nn.Linear(node_output_dim, 4)
# Set box size
self.box_size = box_size
def forward(self, V, R_s, R_r, assignments, V_supers, super_graphs, dt):
R_s = R_s.unsqueeze(2)
R_r = R_r.unsqueeze(2)
R_vertex_to_super_s = assignments[0][:,:,1].unsqueeze(2)
R_vertex_to_super_r = assignments[0][:,:,0].unsqueeze(2)
### Embedding of particles into a super graph
# Edge block
V_lower_pos = V_supers[0][:, :, -4:-2]
E_to_super = self.edge_to_super_model(V[:, :, self.non_pos_indices], V[:, :, -4:-2], R_vertex_to_super_s, R_vertex_to_super_r, dt, V_supers[0][:, :, self.non_pos_indices], V_supers[0][:, :, -4:-2])
# Sum up incomming influences to the node
V_lower = torch.zeros((E_to_super.size(0), V_supers[0].size(1), E_to_super.size(2)), device=E_to_super.device).scatter_add_(1, R_vertex_to_super_r.expand(R_vertex_to_super_r.size(0), R_vertex_to_super_r.size(1), E_to_super.size(2)), E_to_super)
V_lower = torch.cat([V_supers[0][:, :, self.non_pos_indices], V_lower], dim=-1)
embeddings = deque([[V_lower, V_lower_pos]])
##### Upward pass
for assignment, V_super in zip(assignments[1:], V_supers[1:]):
R_vertex_to_super_s = assignment[:,:,1].unsqueeze(2)
R_vertex_to_super_r = assignment[:,:,0].unsqueeze(2)
# Edge block
E_to_super = self.edge_to_upper_model(V_lower, V_lower_pos, R_vertex_to_super_s, R_vertex_to_super_r, dt, V_super[:, :, self.non_pos_indices], V_super[:, :, -4:-2])
# Sum up incomming influences to the node
V_lower = torch.zeros((E_to_super.size(0), V_super.size(1), E_to_super.size(2)), device=E_to_super.device).scatter_add_(1, R_vertex_to_super_r.expand(R_vertex_to_super_r.size(0), R_vertex_to_super_r.size(1), E_to_super.size(2)), E_to_super)
V_lower_pos = V_super[:, :, -4:-2]
V_lower = torch.cat([V_super[:, :, self.non_pos_indices], V_lower], dim=-1)
embeddings.appendleft([V_lower, V_lower_pos])
del R_vertex_to_super_s, R_vertex_to_super_r, V_lower_pos, E_to_super, V_lower
V_current, V_current_pos = embeddings.popleft()
R_s_super = super_graphs[-1][:,:,0].unsqueeze(2)
R_r_super = super_graphs[-1][:,:,1].unsqueeze(2)
R_super_to_vertex_s = assignments[-1][:,:,0].unsqueeze(2)
R_super_to_vertex_r = assignments[-1][:,:,1].unsqueeze(2)
E_current_n = self.super_edge_model(V_current, V_current_pos, R_s_super, R_r_super, dt)
# Super node block
V_upper = self.super_node_model(V_current, E_current_n, dt, R_r=R_r_super)
V_upper = torch.cat([V_current[:, :, :-V_upper.size(2)], V_upper], dim=-1)
V_upper_pos = V_current_pos
##### Downward pass
for embedding, super_graph, assignment in zip(embeddings, reversed(super_graphs[:-1]), reversed(assignments[1:])):
V_current, V_current_pos = embedding
R_s_super = super_graph[:,:,0].unsqueeze(2)
R_r_super = super_graph[:,:,1].unsqueeze(2)
R_super_to_vertex_s = assignment[:,:,0].unsqueeze(2)
R_super_to_vertex_r = assignment[:,:,1].unsqueeze(2)
upper_influence = self.edge_from_upper_model(V_upper, V_upper_pos, R_super_to_vertex_s, R_super_to_vertex_r, dt, V_current, V_current_pos)
E_current_n = self.super_edge_model(V_current, V_current_pos, R_s_super, R_r_super, dt)
E_current_n = torch.cat([E_current_n, upper_influence], dim=1)
R_r_super = torch.cat([R_r_super, R_super_to_vertex_r], dim=1)
V_upper = self.super_node_model(V_current, E_current_n, dt, R_r=R_r_super)
V_upper = torch.cat([V_current[:, :, :-V_upper.size(2)], V_upper], dim=-1)
V_upper_pos = V_current_pos
del E_current_n, R_s_super, R_r_super, embeddings, super_graphs
R_super_to_vertex_s = assignments[0][:,:,0].unsqueeze(2)
R_super_to_vertex_r = assignments[0][:,:,1].unsqueeze(2)
### Cell -> Particle edges
E_n_s = self.edge_from_super_model(V_upper, V_upper_pos, R_super_to_vertex_s, R_super_to_vertex_r, dt, V[:, :, self.non_pos_indices], V[:, :, -4:-2])
del assignments, V_supers, V_upper, V_upper_pos, R_super_to_vertex_s
### Calculating change of lower node particles
# Edge block
E_n = self.edge_model(V[:, :, self.non_pos_indices], V[:, :, -4:-2], R_s, R_r, dt)
E_n = torch.cat([E_n, E_n_s], dim=1)
R_r = torch.cat([R_r, R_super_to_vertex_r], dim=1)
# # Node block
V_n = self.node_model(V[:, :, self.non_pos_indices], E_n, dt, R_r=R_r)
new_coordinates = V[:, :, -4:] + self.linear(V_n)
# Deal with periodic boundry conditions
return apply_PBC_to_coordinates(new_coordinates, box_size=self.box_size)
class HierarchicalHOGN(BaseIntegratorModel):
def __init__(self, box_size=6, edge_output_dim=-1, node_output_dim=-1, integrator='rk4', simulation_type='gravity'):
super(HierarchicalHOGN, self).__init__()
if edge_output_dim < 1:
edge_output_dim = 150
if node_output_dim < 1:
node_output_dim = 100
self.node_output_dim = node_output_dim
self.simulation_type = simulation_type
# Set number of node features, excluding the position (x,y)
if self.simulation_type == 'coulomb':
node_input_dim = 4
self.non_pos_indices = list([0,1,4,5]) # (mass, charge, px, py)
else:
node_input_dim = 3
self.non_pos_indices = list([0,3,4]) # (mass, px, py)
self.edge_to_super_model = EdgeModel(input_dim=2*node_input_dim+2, output_dim=node_output_dim, box_size=box_size, softplus=True) # input dim: sender (particle) and reciever (cell/super) nodes + disntace vector
self.edge_to_upper_model = EdgeModel(input_dim=node_output_dim+2*node_input_dim+2, output_dim=node_output_dim, box_size=box_size, softplus=True) # input dim: sender (particle) and reciever (cell/super) nodes (node input features + features from node embedding) + disntace vector
self.super_edge_model = EdgeModel(input_dim=2*(node_input_dim+node_output_dim)+2, output_dim=edge_output_dim, box_size=box_size, softplus=True) # input dim: sender and reciever nodes + features from lower layer + distance vector
self.super_node_model = NodeModel(input_dim=node_input_dim+node_output_dim+edge_output_dim, output_dim=node_output_dim, softplus=True) # input dim: input node features + updated features + embedded super edge features + dt
self.edge_from_super_model = EdgeModel(input_dim=2*node_input_dim+node_output_dim+2, output_dim=edge_output_dim, box_size=box_size, softplus=True) # input dim: sender (super) and reciever (particle) nodes (node input features + features from super node) + disntace vector
self.edge_from_upper_model = EdgeModel(input_dim=2*(node_input_dim+node_output_dim)+2, output_dim=edge_output_dim, box_size=box_size, softplus=True) # input dim: sender (super) and reciever (particle) nodes (node input features + embedded features) + disntace vector
self.edge_model = EdgeModel(input_dim=2*node_input_dim+2, output_dim=edge_output_dim, box_size=box_size, softplus=True) # input dim: sender and reciever nodes + disntace vector
self.node_model = NodeModel(input_dim=node_input_dim+edge_output_dim, output_dim=node_output_dim, softplus=True) # input dim: input node features + embedded edge features
self.global_model = GlobalModel(input_dim=edge_output_dim + node_output_dim, output_dim=node_output_dim) # input dim: embedded node features and embedded edge features
# Linear layer to transform node embeddings to canonical coordinate change (four features: (x,y,v_x,v_y))
self.linear = nn.Linear(node_output_dim, 1)
# Set box size
self.box_size = box_size
# Set integrator to use
self.integrator = integrator
def get_super_features(self, mass_charge, pos, momentum, R_to_upper_r, upper_count, batch_size=1):
# Compute cell features from vertex features for gradient flow (appears to not be necessary)
pos_weighted = pos * mass_charge[:,:,0].unsqueeze(2)
pos_super = torch.zeros((batch_size, upper_count, pos.size(2)), device=pos_weighted.device).scatter_add_(1, R_to_upper_r.expand(R_to_upper_r.size(0), R_to_upper_r.size(1), pos.size(2)), pos_weighted)
momentum_super = torch.zeros((batch_size, upper_count, momentum.size(2)), device=momentum.device).scatter_add_(1, R_to_upper_r.expand(R_to_upper_r.size(0), R_to_upper_r.size(1), momentum.size(2)), momentum)
cell_mass_charge = torch.zeros((mass_charge.size(0), upper_count, mass_charge.size(2)), device=mass_charge.device).scatter_add_(1, R_to_upper_r.expand(R_to_upper_r.size(0), R_to_upper_r.size(1), mass_charge.size(2)), mass_charge)
pos_super = pos_super / cell_mass_charge[:,:,0].unsqueeze(2)
return torch.cat([cell_mass_charge, pos_super, momentum_super], axis=-1)
def forward_step(self, mass_charge, V, R_s, R_r, assignments, V_supers, super_graphs):
batch_size = V.size(0)
# Drop position from particles/nodes
V_no_pos = torch.cat([mass_charge, V[:,:,2:]],dim=-1)
R_s = R_s.unsqueeze(2)
R_r = R_r.unsqueeze(2)
R_vertex_to_super_s = assignments[0][:,:,1].unsqueeze(2)
R_vertex_to_super_r = assignments[0][:,:,0].unsqueeze(2)
### Embedding of particles into a super graph
# Edge block
upper_count = V_supers[0].size(1)
V_super = self.get_super_features(mass_charge, V[:,:,:2], V[:,:,2:], R_vertex_to_super_r, upper_count, batch_size=batch_size)
V_lower_pos = V_supers[0][:,:,-4:-2]
E_to_super = self.edge_to_super_model(V_no_pos, V[:, :, :2], R_vertex_to_super_s, R_vertex_to_super_r, different_reciever=V_super[:, :, self.non_pos_indices], different_reciever_pos=V_super[:,:,-4:-2])
# Sum up incomming influences to the node
V_lower = torch.zeros((E_to_super.size(0), V_supers[0].size(1), E_to_super.size(2)), device=E_to_super.device).scatter_add_(1, R_vertex_to_super_r.expand(R_vertex_to_super_r.size(0), R_vertex_to_super_r.size(1), E_to_super.size(2)), E_to_super)
del E_to_super
V_lower = torch.cat([V_super[:, :, self.non_pos_indices], V_lower], dim=-1)
embeddings = deque([[V_lower, V_lower_pos]])
##### Upward pass + interactions between super nodes
for assignment, V_super, super_graph in zip(assignments[1:], V_supers[1:], super_graphs[1:]):
R_vertex_to_super_s = assignment[:,:,1].unsqueeze(2)
R_vertex_to_super_r = assignment[:,:,0].unsqueeze(2)
upper_count = V_super.size(1)
V_super = self.get_super_features(V_lower[:,:,:-(self.node_output_dim+2)], V_lower_pos, V_lower[:,:,-(self.node_output_dim+2):-self.node_output_dim], R_vertex_to_super_r, upper_count, batch_size=batch_size)
# Edge block
E_to_super = self.edge_to_upper_model(V_lower, V_lower_pos, R_vertex_to_super_s, R_vertex_to_super_r, different_reciever=V_super[:, :, self.non_pos_indices], different_reciever_pos=V_super[:,:,-4:-2])
del R_vertex_to_super_s
# Sum up incomming influences to the node
V_lower = torch.zeros((E_to_super.size(0), V_super.size(1), E_to_super.size(2)), device=E_to_super.device).scatter_add_(1, R_vertex_to_super_r.expand(R_vertex_to_super_r.size(0), R_vertex_to_super_r.size(1), E_to_super.size(2)), E_to_super)
del E_to_super, R_vertex_to_super_r
# Set values for the next iteration
V_lower_pos = V_super[:,:,-4:-2]
V_lower = torch.cat([V_super[:, :, self.non_pos_indices], V_lower], dim=-1)
del V_super
embeddings.appendleft([V_lower, V_lower_pos])
del V_lower_pos, V_lower
V_current, V_current_pos = embeddings.popleft()
R_s_super = super_graphs[-1][:,:,0].unsqueeze(2)
R_r_super = super_graphs[-1][:,:,1].unsqueeze(2)
R_super_to_vertex_s = assignments[-1][:,:,0].unsqueeze(2)
R_super_to_vertex_r = assignments[-1][:,:,1].unsqueeze(2)
E_current_n = self.super_edge_model(V_current, V_current_pos, R_s_super, R_r_super)
# Super node block
V_upper = self.super_node_model(V_current, E_current_n, R_r=R_r_super)
V_upper = torch.cat([V_current[:, :, :-V_upper.size(2)], V_upper], dim=-1)
V_upper_pos = V_current_pos
##### Downward pass
for embedding, super_graph, assignment in zip(embeddings, reversed(super_graphs[:-1]), reversed(assignments[1:])):
V_current, V_current_pos = embedding
R_s_super = super_graph[:,:,0].unsqueeze(2)
R_r_super = super_graph[:,:,1].unsqueeze(2)
R_super_to_vertex_s = assignment[:,:,0].unsqueeze(2)
R_super_to_vertex_r = assignment[:,:,1].unsqueeze(2)
upper_influence = self.edge_from_upper_model(V_upper, V_upper_pos, R_super_to_vertex_s, R_super_to_vertex_r, different_reciever=V_current, different_reciever_pos=V_current_pos)
E_current_n = self.super_edge_model(V_current, V_current_pos, R_s_super, R_r_super)
E_current_n = torch.cat([E_current_n, upper_influence], dim=1)
R_r_super = torch.cat([R_r_super, R_super_to_vertex_r], dim=1)
V_upper = self.super_node_model(V_current, E_current_n, R_r=R_r_super)
V_upper = torch.cat([V_current[:, :, :-V_upper.size(2)], V_upper], dim=-1)
V_upper_pos = V_current_pos
del E_current_n, R_s_super, R_r_super, embeddings, super_graphs
R_super_to_vertex_s = assignments[0][:,:,0].unsqueeze(2)
R_super_to_vertex_r = assignments[0][:,:,1].unsqueeze(2)
### Cell -> Particle edges
E_n_s = self.edge_from_super_model(V_upper, V_upper_pos, R_super_to_vertex_s, R_super_to_vertex_r, different_reciever=V_no_pos, different_reciever_pos=V[:, :, :2])
del assignments, V_supers, V_upper, V_upper_pos, R_super_to_vertex_s
### Calculating change of lower node particles
# Edge block
E_n = self.edge_model(V_no_pos, V[:, :, :2], R_s, R_r)
E_n = torch.cat([E_n, E_n_s], dim=1)
R_r = torch.cat([R_r, R_super_to_vertex_r], dim=1)
# # Node block
V_n = self.node_model(V_no_pos, E_n, R_r=R_r)
# Global block
U_n = self.global_model(V_n, E_n)
del V_n, E_n
# Hamiltonian
H = self.linear(U_n)
# Hamiltonian derivatives w.r.t inputs = dH/dq dH/dp
partial_derivatives = torch.autograd.grad(H.sum(), V, create_graph=True)[0]
# Return dq and dp
return torch.cat([partial_derivatives[:,:,2:], partial_derivatives[:,:,:2] * (-1.0)], dim=2) # dq=dH/dp, dp=-dH/dq
def forward(self, state, R_s, R_r, assignments, V_supers, super_graphs, dt):
# Transform inputs [m, x, y, vx, vy] to canonical coordinates [x,y,px,py]
mass_charge = state[:,:,:-4] # if no charge = [m]; with charge = [m, c]
momentum = state[:,:,-2:] * mass_charge[:,:,0].unsqueeze(2)
V = torch.cat([state[:,:,-4:-2], momentum], dim=2)
# Require grad to be able to compute partial derivatives
if not V.requires_grad:
V.requires_grad = True
# Compute updated canonical coordinates
if self.integrator == 'rk4':
new_canonical_coordinates = self.rk4(dt, mass_charge, V, R_s, R_r, assignments, V_supers, super_graphs)
elif self.integrator == 'euler':
new_canonical_coordinates = self.euler(dt, mass_charge, V, R_s, R_r, assignments, V_supers, super_graphs)
else:
raise Exception
# Convert back to original state format [x, y, vx, vy]
velocity = torch.div(new_canonical_coordinates[:,:,2:], mass_charge[:,:,0].unsqueeze(2))
new_state = torch.cat([new_canonical_coordinates[:,:,:2], velocity], dim=2)
return new_state
| 52.549053
| 384
| 0.664079
| 4,857
| 30,531
| 3.852996
| 0.05559
| 0.058673
| 0.029871
| 0.025436
| 0.859143
| 0.83499
| 0.819921
| 0.797371
| 0.783798
| 0.773004
| 0
| 0.021948
| 0.210573
| 30,531
| 580
| 385
| 52.639655
| 0.754502
| 0.176018
| 0
| 0.606509
| 0
| 0
| 0.003119
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071006
| false
| 0
| 0.008876
| 0
| 0.147929
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
17f8f085d1be650b34c5249ae6fea89baf58e97b
| 140
|
py
|
Python
|
graphgallery/functional/dense/__init__.py
|
Aria461863631/GraphGallery
|
7b62f80ab36b29013bea2538a6581fc696a80201
|
[
"MIT"
] | null | null | null |
graphgallery/functional/dense/__init__.py
|
Aria461863631/GraphGallery
|
7b62f80ab36b29013bea2538a6581fc696a80201
|
[
"MIT"
] | null | null | null |
graphgallery/functional/dense/__init__.py
|
Aria461863631/GraphGallery
|
7b62f80ab36b29013bea2538a6581fc696a80201
|
[
"MIT"
] | null | null | null |
from .attr_transform import *
from .flip import *
from .onehot import *
from .node_sim import knn_graph, attr_sim
from .similarity import *
| 23.333333
| 41
| 0.778571
| 21
| 140
| 5
| 0.52381
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 140
| 5
| 42
| 28
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aa2d34c11ca582e6cce57d83cd747fb824fef7b0
| 4,613
|
py
|
Python
|
ConsumerService/tests/tests.py
|
erlichg/clew
|
9a08893c3bac429787d4b310a19e39955e5531b9
|
[
"MIT"
] | null | null | null |
ConsumerService/tests/tests.py
|
erlichg/clew
|
9a08893c3bac429787d4b310a19e39955e5531b9
|
[
"MIT"
] | null | null | null |
ConsumerService/tests/tests.py
|
erlichg/clew
|
9a08893c3bac429787d4b310a19e39955e5531b9
|
[
"MIT"
] | null | null | null |
import unittest
from ConsumerService.consumer.utils import calculate_periods
class TestPeriodCalculationMethods(unittest.TestCase):
def test_empty(self):
self.assertEqual(calculate_periods([]), {})
def test_double_start(self):
with self.assertRaises(Exception):
calculate_periods([
{
"p_id": "1",
"medication_name": "X",
"action": "start",
"event_time": "2021-01-01T00:00:00+0000"
},
{
"p_id": "1",
"medication_name": "X",
"action": "start",
"event_time": "2021-01-01T01:00:00+0000"
}
])
def test_stop_without_start(self):
with self.assertRaises(Exception):
calculate_periods([
{
"p_id": "1",
"medication_name": "X",
"action": "stop",
"event_time": "2021-01-01T00:00:00+0000"
}
])
def test_cancel_start(self):
with self.assertRaises(Exception):
calculate_periods([
{
"p_id": "1",
"medication_name": "X",
"action": "cancel_start",
"event_time": "2021-01-01T00:00:00+0000"
}
])
def test_cancel_stop_without_start(self):
with self.assertRaises(Exception):
calculate_periods([
{
"p_id": "1",
"medication_name": "X",
"action": "start",
"event_time": "2021-01-01T00:00:00+0000"
},
{
"p_id": "1",
"medication_name": "X",
"action": "cancel_stop",
"event_time": "2021-01-01T01:00:00+0000"
}
])
def test_same_time(self):
self.assertEqual(calculate_periods([
{
"p_id": "1",
"medication_name": "X",
"action": "start",
"event_time": "2021-01-01T00:00:00+0000"
},
{
"p_id": "1",
"medication_name": "X",
"action": "stop",
"event_time": "2021-01-01T00:00:00+0000"
}
]), {'X': [("2021-01-01T00:00:00+0000", "2021-01-01T00:00:00+0000")]})
def test_open_period(self):
self.assertEqual(calculate_periods([
{
"p_id": "1",
"medication_name": "X",
"action": "start",
"event_time": "2021-01-01T00:00:00+0000"
}
]), {'X': [("2021-01-01T00:00:00+0000",)]})
def test_cancel_start(self):
self.assertEqual(calculate_periods([
{
"p_id": "1",
"medication_name": "X",
"action": "start",
"event_time": "2021-01-01T00:00:00+0000"
},
{
"p_id": "1",
"medication_name": "X",
"action": "cancel_start",
"event_time": "2021-01-01T01:00:00+0000"
},
{
"p_id": "1",
"medication_name": "X",
"action": "start",
"event_time": "2021-01-01T02:00:00+0000"
},
{
"p_id": "1",
"medication_name": "X",
"action": "stop",
"event_time": "2021-01-01T03:00:00+0000"
}
]), {'X': [("2021-01-01T02:00:00+0000", "2021-01-01T03:00:00+0000")]})
def test_simple_period(self):
self.assertEqual(calculate_periods([
{
"p_id": "1",
"medication_name": "X",
"action": "start",
"event_time": "2021-01-01T00:00:00+0000"
},
{
"p_id": "1",
"medication_name": "X",
"action": "stop",
"event_time": "2021-01-01T01:00:00+0000"
}
]), {'X': [("2021-01-01T00:00:00+0000", "2021-01-01T01:00:00+0000")]})
if __name__ == '__main__':
unittest.main()
| 33.18705
| 82
| 0.38695
| 398
| 4,613
| 4.268844
| 0.11809
| 0.077693
| 0.10359
| 0.123602
| 0.892878
| 0.868158
| 0.826957
| 0.826957
| 0.815185
| 0.806945
| 0
| 0.168029
| 0.469759
| 4,613
| 138
| 83
| 33.427536
| 0.526574
| 0
| 0
| 0.6
| 0
| 0
| 0.2571
| 0.114459
| 0
| 0
| 0
| 0
| 0.072
| 1
| 0.072
| false
| 0
| 0.016
| 0
| 0.096
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a4c7a79f495a83f2c8420a9706279353c44302ce
| 287
|
py
|
Python
|
lib/constants/__init__.py
|
PoireToPoire/backend
|
ea4fede661cde6f74b5233dc222ee0ef7a59b375
|
[
"MIT"
] | null | null | null |
lib/constants/__init__.py
|
PoireToPoire/backend
|
ea4fede661cde6f74b5233dc222ee0ef7a59b375
|
[
"MIT"
] | null | null | null |
lib/constants/__init__.py
|
PoireToPoire/backend
|
ea4fede661cde6f74b5233dc222ee0ef7a59b375
|
[
"MIT"
] | null | null | null |
from socket import gethostname
from os import path
API_HOST: str = gethostname()
INI_FILE_PATH: str = path.abspath(path.join(path.dirname(path.dirname(__file__)), "db/db.ini"))
STATIC_PATH: str = path.abspath(path.join(path.dirname(path.dirname(path.dirname(__file__))), "static"))
| 47.833333
| 104
| 0.756098
| 43
| 287
| 4.767442
| 0.372093
| 0.268293
| 0.219512
| 0.321951
| 0.560976
| 0.468293
| 0.468293
| 0.468293
| 0.468293
| 0.468293
| 0
| 0
| 0.094077
| 287
| 6
| 104
| 47.833333
| 0.788462
| 0
| 0
| 0
| 0
| 0
| 0.053004
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
351106eaaa1f0aa6970dcff5f0c4a0a4bd7b5cc1
| 46
|
py
|
Python
|
gym/envs/atari/__init__.py
|
23pointsNorth/gym
|
5c116fb3c91e872505300031d2bd60672b3a6e03
|
[
"Python-2.0",
"OLDAP-2.7"
] | 123
|
2018-11-20T09:14:29.000Z
|
2020-12-28T20:05:55.000Z
|
tema1/gym-master/gym/envs/atari/__init__.py
|
BrujitoOz/ia-course
|
c05e497b467aab4572f3578f1b9068d4585106d2
|
[
"MIT"
] | 38
|
2019-03-26T19:11:04.000Z
|
2022-02-19T14:19:51.000Z
|
tema1/gym-master/gym/envs/atari/__init__.py
|
BrujitoOz/ia-course
|
c05e497b467aab4572f3578f1b9068d4585106d2
|
[
"MIT"
] | 187
|
2018-11-28T11:38:02.000Z
|
2022-03-16T11:18:39.000Z
|
from gym.envs.atari.atari_env import AtariEnv
| 23
| 45
| 0.847826
| 8
| 46
| 4.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
352ae87d25865c10d2a4557ca927554ffd572580
| 47
|
py
|
Python
|
ekorpkit/models/__init__.py
|
entelecheia/eKorpKit
|
9521ae4c4749419fa2b088d1b9e518e5927b7cb8
|
[
"CC-BY-4.0"
] | 4
|
2022-02-26T10:54:16.000Z
|
2022-02-26T11:01:56.000Z
|
ekorpkit/models/__init__.py
|
entelecheia/eKorpKit
|
9521ae4c4749419fa2b088d1b9e518e5927b7cb8
|
[
"CC-BY-4.0"
] | 1
|
2022-03-25T06:37:12.000Z
|
2022-03-25T06:45:53.000Z
|
ekorpkit/models/__init__.py
|
entelecheia/eKorpKit
|
9521ae4c4749419fa2b088d1b9e518e5927b7cb8
|
[
"CC-BY-4.0"
] | null | null | null |
from .tokenizer.trainer import train_tokenizer
| 23.5
| 46
| 0.87234
| 6
| 47
| 6.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1025b6237c68b65e9d3c49f40b572b778b57e9de
| 69
|
py
|
Python
|
test/bootstrap.py
|
simonsmh/dcdownloader
|
28908919bbb687f284f7e8298fdb4c6f01600e9a
|
[
"MIT"
] | 173
|
2018-03-19T07:06:41.000Z
|
2022-02-15T00:26:35.000Z
|
test/bootstrap.py
|
simonsmh/dcdownloader
|
28908919bbb687f284f7e8298fdb4c6f01600e9a
|
[
"MIT"
] | 10
|
2018-04-09T05:48:53.000Z
|
2021-04-02T05:59:19.000Z
|
test/bootstrap.py
|
simonsmh/dcdownloader
|
28908919bbb687f284f7e8298fdb4c6f01600e9a
|
[
"MIT"
] | 27
|
2018-03-20T06:09:41.000Z
|
2021-06-08T06:44:35.000Z
|
import sys
sys.path.append(sys.path[0] + '/../')
import dcdownloader
| 17.25
| 37
| 0.695652
| 10
| 69
| 4.8
| 0.6
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 0.101449
| 69
| 4
| 38
| 17.25
| 0.758065
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
107bc64fa0390d06fb931573ffb84f6ef53b1f6f
| 40
|
py
|
Python
|
pvcnn_code/models/s3dis/__init__.py
|
pahn04/PPConv
|
395957b919786bb5b603f37a94ccf9173afce085
|
[
"MIT"
] | 1
|
2022-03-29T02:14:57.000Z
|
2022-03-29T02:14:57.000Z
|
pvcnn_code/models/s3dis/__init__.py
|
pahn04/PPConv
|
395957b919786bb5b603f37a94ccf9173afce085
|
[
"MIT"
] | null | null | null |
pvcnn_code/models/s3dis/__init__.py
|
pahn04/PPConv
|
395957b919786bb5b603f37a94ccf9173afce085
|
[
"MIT"
] | 1
|
2022-02-08T05:47:10.000Z
|
2022-02-08T05:47:10.000Z
|
from models.s3dis.ppcnnpp import PPCNN2
| 20
| 39
| 0.85
| 6
| 40
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.1
| 40
| 1
| 40
| 40
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
52a2db347d2dddf3d45cbbcc079ffcaea4edda83
| 16,294
|
py
|
Python
|
src/features_extraction.py
|
jimmystique/AudioClassification
|
9f9966306068cff7419f6c190752bab4d35b3870
|
[
"MIT"
] | null | null | null |
src/features_extraction.py
|
jimmystique/AudioClassification
|
9f9966306068cff7419f6c190752bab4d35b3870
|
[
"MIT"
] | null | null | null |
src/features_extraction.py
|
jimmystique/AudioClassification
|
9f9966306068cff7419f6c190752bab4d35b3870
|
[
"MIT"
] | null | null | null |
from librosa.feature import chroma_stft, rms, mfcc, spectral_centroid, spectral_bandwidth, spectral_flatness, spectral_rolloff
from librosa import feature
from librosa import stft, amplitude_to_db, magphase
import argparse
import yaml
import os
import multiprocessing
import pickle as pkl
import numpy as np
from utils import ensure_dir
import time
import datetime
import socket
def chroma_stft(processed_data_path, save_path, n_processes, sr=22050, S=None, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect', tuning=None, n_chroma=12):
""" Extract chroma features using STFT on all files at processed_data_path and save the extracted features at save_path
Args:
processed_data_path (str): Path to the directory containing the processed data
save_path (str): Path to the directory where to save the extracted features
n_processed (int): Number of processed to run at the same time to exctract features faster
sr (inter): sampling rate
S (np.ndarray): power spectrogram
norm (float or None): column-wise normalization
n_fft (int): FFT window size
hop_length (int): hop length
win_length (int): Each frame of audio is windowed by window(). The window will be of length win_length and then padded with zeros to match n_fft.
window (string, tuple, number, function, or np.ndarray [shape=(n_fft,)]): - a window specification (string, tuple, or number); see scipy.signal.get_window
- a window function, such as scipy.signal.windows.hann
- a vector or array of length n_fft
center (bool): - if True, the signal y is padded so that frame t is centered at y[t * hop_length].
- if False, then frame t begins at y[t * hop_length]
pad_mode (str): If center=True, the padding mode to use at the edges of the signal. By default, STFT uses reflection padding.
tuning (float): Deviation from A440 tuning in fractional chroma bins. If None, it is automatically estimated.
n_chroma (int): Number of chroma bins to produce (12 by default).
"""
print("Extracting Chroma Features with Short Time Fourier Transform ...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_chroma_stft, [[processed_file_path, save_path, sr, S, n_fft, hop_length, win_length, window, center, pad_mode, tuning, n_chroma] for processed_file_path in processed_data_files], chunksize=1)
def _chroma_stft(processed_file_path, save_path, sr, S, n_fft, hop_length, win_length, window, center, pad_mode, tuning, n_chroma):
""" Extract chroma features for the file at processed_file_path and save the features extracted at save_path
Args:
processed_data_path (str): Path to the directory containing the processed data
save_path (str): Path to the directory where to save the extracted features
sr (inter): sampling rate
S (np.ndarray): power spectrogram
norm (float or None): column-wise normalization
n_fft (int): FFT window size
hop_length (int): hop length
win_length (int): Each frame of audio is windowed by window(). The window will be of length win_length and then padded with zeros to match n_fft.
window (string, tuple, number, function, or np.ndarray [shape=(n_fft,)]): - a window specification (string, tuple, or number); see scipy.signal.get_window
- a window function, such as scipy.signal.windows.hann
- a vector or array of length n_fft
center (bool): - if True, the signal y is padded so that frame t is centered at y[t * hop_length].
- if False, then frame t begins at y[t * hop_length]
pad_mode (str): If center=True, the padding mode to use at the edges of the signal. By default, STFT uses reflection padding.
tuning (float): Deviation from A440 tuning in fractional chroma bins. If None, it is automatically estimated.
n_chroma (int): Number of chroma bins to produce (12 by default).
"""
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features = feature.chroma_stft(y=data, sr=sr, S=S, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode, tuning=tuning, n_chroma=n_chroma)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_chroma_stft_features.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- Chroma stft features extraction on {} Saved in {}".format(processed_file_path, save_file_path))
def root_mean_square(processed_data_path, save_path, n_processes, S=None, frame_length=2048, hop_length=512, center=True, pad_mode='reflect'):
print("Extracting features with Root Mean Square ...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_root_mean_square, [[processed_file_path, save_path, S, frame_length, hop_length, center, pad_mode] for processed_file_path in processed_data_files], chunksize=1)
def _root_mean_square(processed_file_path, save_path, S, frame_length, hop_length, center, pad_mode):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features = feature.rms(y=data, S=S, frame_length=frame_length, hop_length=hop_length, center=center, pad_mode=pad_mode)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_rms_features.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- RMS features extraction on {} Saved in {}".format(processed_file_path, save_file_path))
def mfcc(processed_data_path, save_path, n_processes, sr=22050, S=None, n_mfcc=20, dct_type=2, norm='ortho', lifter=0):
print("Extracting features with MFCC ...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
print(processed_data_files)
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_mfcc, [[processed_file_path, save_path, sr, S, n_mfcc, dct_type, norm, lifter] for processed_file_path in processed_data_files], chunksize=1)
def _mfcc(processed_file_path, save_path, sr, S, n_mfcc, dct_type, norm, lifter):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features = feature.mfcc(y=data, sr=sr, n_mfcc=n_mfcc, dct_type=dct_type, norm=norm, lifter=lifter)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_mfcc_features.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- MFCC features extraction on {} Saved in {}".format(processed_file_path, save_file_path))
def spectrogram(processed_data_path, save_path, n_processes, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect'):
print("Generating spectrograms ...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
print(processed_data_files)
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_spectrogram, [[processed_file_path, save_path, n_fft, hop_length, win_length, window, center, pad_mode] for processed_file_path in processed_data_files], chunksize=1)
def _spectrogram(processed_file_path, save_path, n_fft, hop_length, win_length, window, center, pad_mode):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
audio_data_stft_format = stft(y=data, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode)
data_extracted_features= amplitude_to_db(abs(audio_data_stft_format))
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_spectrogram.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- Generating spectrogram on {} Saved in {}".format(processed_file_path, save_file_path))
def spectrogram_centroid(processed_data_path, save_path, n_processes, sr=22050, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect'):
print("Extracting spectrogram centroid...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
print(processed_data_files)
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_spectrogram_centroid, [[processed_file_path, save_path, sr, n_fft, hop_length, win_length, window, center, pad_mode] for processed_file_path in processed_data_files], chunksize=1)
def _spectrogram_centroid(processed_file_path, save_path, sr, n_fft, hop_length, win_length, window, center, pad_mode):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features= spectral_centroid(data, sr=sr, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_spectrogram_centroid.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- Extracting spectrogram centroid on {} Saved in {}".format(processed_file_path, save_file_path))
def spectrogram_bandwith(processed_data_path, save_path, n_processes, sr=22050, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect'):
print("Extracting spectrogram bandwith...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
print(processed_data_files)
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_spectrogram_bandiwth, [[processed_file_path, save_path, sr, n_fft, hop_length, win_length, window, center, pad_mode] for processed_file_path in processed_data_files], chunksize=1)
def _spectrogram_bandiwth(processed_file_path, save_path, sr, n_fft, hop_length, win_length, window, center, pad_mode):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features= spectral_bandwidth(data, sr=sr, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_spectrogram_bandwith.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- Extracting spectrogram bandwith on {} Saved in {}".format(processed_file_path, save_file_path))
def spectrogram_flatness(processed_data_path, save_path, n_processes, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect'):
print("Extracting spectrogram flatness...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
print(processed_data_files)
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_spectrogram_flatness, [[processed_file_path, save_path, n_fft, hop_length, win_length, window, center, pad_mode] for processed_file_path in processed_data_files], chunksize=1)
def _spectrogram_flatness(processed_file_path, save_path, n_fft, hop_length, win_length, window, center, pad_mode):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features= spectral_flatness(data, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_spectrogram_flatness.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- Extracting spectrogram flatness on {} Saved in {}".format(processed_file_path, save_file_path))
def spectrogram_rolloff(processed_data_path, save_path, n_processes, sr=22050, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect'):
print("Extracting spectrogram bandwith...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
print(processed_data_files)
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_spectrogram_rolloff, [[processed_file_path, save_path, sr, n_fft, hop_length, win_length, window, center, pad_mode] for processed_file_path in processed_data_files], chunksize=1)
def _spectrogram_rolloff(processed_file_path, save_path, sr, n_fft, hop_length, win_length, window, center, pad_mode):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features= spectral_rolloff(data, sr=sr, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_spectrogram_rolloff.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- Extracting spectrogram rolloff on {} Saved in {}".format(processed_file_path, save_file_path))
def extract_features(processed_data_path, save_path, n_processes, algorithm):
""" Extract features from files at processed_data_path and save the extracted features found at save_path
Args:
processed_data_path (str): Path to the directory containing the processed data
save_path (str): Path to the directory where to save the extracted features
n_processed (int): Number of processed to run at the same time to exctract features faster
algorithm (dict): Dictionary containing a key "name" (corresponding to the name of a function that will be call to build a model) and a key "args" containing the hyperparameters of the model to be built.
"""
print(processed_data_path)
print(algorithm)
t1 = time.time()
globals()[algorithm["name"]](processed_data_path, save_path, n_processes, **algorithm["args"])
t2 = time.time()
with open("logs/logs.csv", "a") as myfile:
myfile.write("{:%Y-%m-%d %H:%M:%S},extract {} features,{},{},{:.2f}\n".format(datetime.datetime.now(),algorithm["name"],socket.gethostname(),n_processes,t2-t1))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--config_file", default="configs/config.yaml", type=str, help = "Path to the configuration file")
args = parser.parse_args()
features_extraction_cfg = yaml.safe_load(open(args.config_file))["features_extraction"]
print(features_extraction_cfg)
extract_features(**features_extraction_cfg)
| 57.575972
| 209
| 0.776789
| 2,490
| 16,294
| 4.806426
| 0.092369
| 0.079295
| 0.069602
| 0.042112
| 0.85127
| 0.8501
| 0.8501
| 0.843917
| 0.836564
| 0.836564
| 0
| 0.008226
| 0.112127
| 16,294
| 283
| 210
| 57.575972
| 0.819036
| 0.254879
| 0
| 0.51462
| 0
| 0
| 0.095715
| 0.012188
| 0
| 0
| 0
| 0
| 0
| 1
| 0.099415
| false
| 0
| 0.076023
| 0
| 0.175439
| 0.146199
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
52ef0934eb984fcad5d78d7b721b8fe32a0ed005
| 39,722
|
py
|
Python
|
tests/test_prom_metrics_check.py
|
maciejmazur10c/prom-metrics-check
|
12627554d0aef6a2dcba66f9a41b5ef1f8e6b114
|
[
"MIT"
] | 32
|
2020-07-24T12:02:06.000Z
|
2022-03-22T08:13:55.000Z
|
tests/test_prom_metrics_check.py
|
maciejmazur10c/prom-metrics-check
|
12627554d0aef6a2dcba66f9a41b5ef1f8e6b114
|
[
"MIT"
] | 1
|
2020-07-24T10:42:07.000Z
|
2020-07-24T11:30:46.000Z
|
tests/test_prom_metrics_check.py
|
maciejmazur10c/prom-metrics-check
|
12627554d0aef6a2dcba66f9a41b5ef1f8e6b114
|
[
"MIT"
] | 4
|
2020-08-02T06:49:52.000Z
|
2021-11-12T07:15:22.000Z
|
#!/usr/bin/env python
"""Tests for `prom_metrics_check` package."""
import unittest
from prom_metrics_check import prom_metrics_check, cli
def get_all_metrics(query=None):
return prom_metrics_check.find_metrics(
tokenized_query=prom_metrics_check.tokenize_string(query))
class TestCLI(unittest.TestCase):
def test_check_main(self):
with self.assertRaises(SystemExit) as cm:
cli.main(args=['--help'])
self.assertEqual(cm.exception.code, 0)
class BaseMessage:
def error_msg(self, metrics, expected, query):
return "\nQuery: {qry}\nFound: [{met}]\nExpected: [{exp}]".format(
qry=query, met=', '.join(metrics), exp=', '.join(expected))
class TestGeneralTokenize(unittest.TestCase, BaseMessage):
def test_example1(self):
query = """sum(up{cluster="$cluster", job="kubelet"})"""
metrics = get_all_metrics(query)
expected = {"up"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example2(self):
query = """sum(kubelet_running_pod_count{
cluster="$cluster", job="kubelet", instance=~"$instance"})"""
metrics = get_all_metrics(query)
expected = {"kubelet_running_pod_count"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example3(self):
query = """sum(kubelet_running_container_count{
cluster="$cluster", job="kubelet", instance=~"$instance"})"""
metrics = get_all_metrics(query)
expected = {"kubelet_running_container_count"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example4(self):
query = """sum(volume_manager_total_volumes{
cluster="$cluster", job="kubelet", instance=~"$instance",
state="actual_state_of_world"})"""
metrics = get_all_metrics(query)
expected = {"volume_manager_total_volumes"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example5(self):
query = """sum(volume_manager_total_volumes{
cluster="$cluster", job="kubelet", instance=~"$instance",
state="desired_state_of_world"})"""
metrics = get_all_metrics(query)
expected = {"volume_manager_total_volumes"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example6(self):
query = """sum(rate(kubelet_node_config_error{
cluster="$cluster", job="kubelet", instance=~"$instance"}[5m]))"""
metrics = get_all_metrics(query)
expected = {"kubelet_node_config_error"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example7(self):
query = """sum(rate(kubelet_runtime_operations_total{
cluster="$cluster",job="kubelet",
instance=~"$instance"}[5m])) by (operation_type, instance)"""
metrics = get_all_metrics(query)
expected = {"kubelet_runtime_operations_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example8(self):
query = """sum(rate(kubelet_runtime_operations_errors_total{
cluster="$cluster",job="kubelet",
instance=~"$instance"}[5m])) by (instance, operation_type)"""
metrics = get_all_metrics(query)
expected = {"kubelet_runtime_operations_errors_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example9(self):
query = """histogram_quantile(0.99, sum(rate(
kubelet_runtime_operations_duration_seconds_bucket{
cluster="$cluster",job="kubelet",instance=~"$instance"}[5m]))
by (instance, operation_type, le))"""
metrics = get_all_metrics(query)
expected = {"kubelet_runtime_operations_duration_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example10(self):
query = """sum(rate(kubelet_pod_start_duration_seconds_count{
cluster="$cluster",job="kubelet",
instance=~"$instance"}[5m])) by (instance)"""
metrics = get_all_metrics(query)
expected = {"kubelet_pod_start_duration_seconds_count"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example11(self):
query = """sum(rate(kubelet_pod_worker_duration_seconds_count{
cluster="$cluster",job="kubelet",
instance=~"$instance"}[5m])) by (instance)"""
metrics = get_all_metrics(query)
expected = {"kubelet_pod_worker_duration_seconds_count"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example12(self):
query = """histogram_quantile(0.99, sum(rate(
kubelet_pod_start_duration_seconds_count{
cluster="$cluster",job="kubelet",instance=~"$instance"}[5m]))
by (instance, le))"""
metrics = get_all_metrics(query)
expected = {"kubelet_pod_start_duration_seconds_count"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example13(self):
query = """histogram_quantile(0.99, sum(rate(
kubelet_pod_worker_duration_seconds_bucket{
cluster="$cluster",job="kubelet",instance=~"$instance"}[5m]))
by (instance, le))"""
metrics = get_all_metrics(query)
expected = {"kubelet_pod_worker_duration_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example14(self):
query = """sum(rate(storage_operation_duration_seconds_count{
cluster="$cluster",job="kubelet",
instance=~"$instance"}[5m]))
by (instance, operation_name, volume_plugin)"""
metrics = get_all_metrics(query)
expected = {"storage_operation_duration_seconds_count"}
self.assertCountEqual(set(metrics), expected, self.error_msg(
metrics, expected, query))
def test_example15(self):
query = """sum(rate(storage_operation_errors_total{
cluster="$cluster",job="kubelet",
instance=~"$instance"}[5m]))
by (instance, operation_name, volume_plugin)"""
metrics = get_all_metrics(query)
expected = {"storage_operation_errors_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example16(self):
query = """histogram_quantile(0.99, sum(rate(
storage_operation_duration_seconds_bucket{
cluster="$cluster", job="kubelet", instance=~"$instance"}[5m]))
by (instance, operation_name, volume_plugin, le))"""
metrics = get_all_metrics(query)
expected = {"storage_operation_duration_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example17(self):
query = """sum(rate(kubelet_cgroup_manager_duration_seconds_count{
cluster="$cluster", job="kubelet",
instance=~"$instance"}[5m])) by (instance, operation_type)"""
metrics = get_all_metrics(query)
expected = {"kubelet_cgroup_manager_duration_seconds_count"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example18(self):
query = """histogram_quantile(0.99, sum(rate(
kubelet_cgroup_manager_duration_seconds_bucket{
cluster="$cluster", job="kubelet", instance=~"$instance"}[5m]))
by (instance, operation_type, le))"""
metrics = get_all_metrics(query)
expected = {"kubelet_cgroup_manager_duration_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example19(self):
query = """sum(rate(kubelet_pleg_relist_duration_seconds_count{
cluster="$cluster", job="kubelet",
instance=~"$instance"}[5m])) by (instance)"""
metrics = get_all_metrics(query)
expected = {"kubelet_pleg_relist_duration_seconds_count"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example20(self):
query = """histogram_quantile(0.99, sum(rate(
kubelet_pleg_relist_interval_seconds_bucket{
cluster="$cluster",job="kubelet",instance=~"$instance"}[5m]))
by (instance, le))"""
metrics = get_all_metrics(query)
expected = {"kubelet_pleg_relist_interval_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example21(self):
query = """histogram_quantile(0.99, sum(rate(
kubelet_pleg_relist_duration_seconds_bucket{
cluster="$cluster",job="kubelet",instance=~"$instance"}[5m]))
by (instance, le))"""
metrics = get_all_metrics(query)
expected = {"kubelet_pleg_relist_duration_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example22(self):
query = """sum(rate(rest_client_requests_total{
cluster="$cluster",job="kubelet",
instance=~"$instance",code=~"2.."}[5m]))"""
metrics = get_all_metrics(query)
expected = {"rest_client_requests_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example23(self):
query = """sum(rate(rest_client_requests_total{
cluster="$cluster",job="kubelet",
instance=~"$instance",code=~"3.."}[5m]))"""
metrics = get_all_metrics(query)
expected = {"rest_client_requests_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example24(self):
query = """sum(rate(rest_client_requests_total{
cluster="$cluster",job="kubelet",
instance=~"$instance",code=~"4.."}[5m]))"""
metrics = get_all_metrics(query)
expected = {"rest_client_requests_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example25(self):
query = """sum(rate(rest_client_requests_total{
cluster="$cluster",job="kubelet",
instance=~"$instance",code=~"5.."}[5m]))"""
metrics = get_all_metrics(query)
expected = {"rest_client_requests_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example26(self):
query = """histogram_quantile(0.99, sum(rate(
rest_client_request_latency_seconds_bucket{
cluster="$cluster",job="kubelet", instance=~"$instance"}[5m]))
by (instance, verb, url, le))"""
metrics = get_all_metrics(query)
expected = {"rest_client_request_latency_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example27(self):
query = """process_resident_memory_bytes{
cluster="$cluster",job="kubelet",instance=~"$instance"}"""
metrics = get_all_metrics(query)
expected = {"process_resident_memory_bytes"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example28(self):
query = """rate(process_cpu_seconds_total{
cluster="$cluster",job="kubelet",instance=~"$instance"}[5m])"""
metrics = get_all_metrics(query)
expected = {"process_cpu_seconds_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example29(self):
query = """go_goroutines{cluster="$cluster",job="kubelet",
instance=~"$instance"}"""
metrics = get_all_metrics(query)
expected = {"go_goroutines"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example30(self):
query = """sort_desc(min(avg(rate(
node_cpu_seconds_total{mode="idle"}[2m])) by (instance)))"""
metrics = get_all_metrics(query)
expected = {"node_cpu_seconds_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example31(self):
query = """min(node_memory_MemAvailable_bytes/
node_memory_MemTotal_bytes)"""
metrics = get_all_metrics(query)
expected = {"node_memory_MemAvailable_bytes",
"node_memory_MemTotal_bytes"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example32(self):
query = """count(sum by (pod)(delta(
kube_pod_container_status_restarts_total[15m]) > 0))"""
metrics = get_all_metrics(query)
expected = {"kube_pod_container_status_restarts_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example33(self):
query = """sum by (pod)(delta(
kube_pod_container_status_restarts_total[15m]) > 0)"""
metrics = get_all_metrics(query)
expected = {"kube_pod_container_status_restarts_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example34(self):
query = """sum (kube_pod_status_phase{}) by (phase)"""
metrics = get_all_metrics(query)
expected = {"kube_pod_status_phase"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example35(self):
query = """kubelet_running_pod_count{
kubernetes_io_role =~ ".*node.*"}"""
metrics = get_all_metrics(query)
expected = {"kubelet_running_pod_count"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example36(self):
query = """node_load1"""
metrics = get_all_metrics(query)
expected = {"node_load1"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example37(self):
query = """node_memory_Buffers_bytes + node_memory_Cached_bytes"""
metrics = get_all_metrics(query)
expected = {
"node_memory_Buffers_bytes", "node_memory_Cached_bytes"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example38(self):
query = """avg(rate(node_cpu_seconds_total{mode="idle"}[2m]))
by (instance)"""
metrics = get_all_metrics(query)
expected = {"node_cpu_seconds_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example39(self):
query = """min(node_filesystem_avail_bytes{
mountpoint!~".*(serviceaccount|proc|sys).*",
device!="overlay"}/node_filesystem_size_bytes{
mountpoint!~".*(serviceaccount|proc|sys).*",
device!="overlay"}) by (device, instance)"""
metrics = get_all_metrics(query)
expected = {
"node_filesystem_avail_bytes", "node_filesystem_size_bytes"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example40(self):
query = """rate(node_disk_io_time_seconds_total[2m])"""
metrics = get_all_metrics(query)
expected = {"node_disk_io_time_seconds_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example41(self):
query = """sum(
node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{
%(clusterLabel)s="$cluster", namespace="$namespace"} *
on(namespace,pod) group_left(workload, workload_type)
mixin_pod_workload{%(clusterLabel)s="$cluster", namespace="$namespace",
workload_type="$type"}) by (workload, workload_type)"""
metrics = get_all_metrics(query)
expected = {
"node_namespace_pod_container:container_cpu_usage_seconds_total"
":sum_rate",
"mixin_pod_workload"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example42(self):
query = """sum(rate(kubelet_runtime_operations_total{
%(clusterLabel)s="$cluster",%(kubeletSelector)s,
instance=~"$instance"}[5m])) by (operation_type, instance)"""
metrics = get_all_metrics(query)
expected = {"kubelet_runtime_operations_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_example43(self):
query = """sum(irate(container_network_receive_bytes_total{
%(clusterLabel)s="$cluster",
%(namespaceLabel)s=~"$namespace"}[$__interval]) * on (namespace,pod)
group_left(workload,workload_type)
mixin_pod_workload{%(clusterLabel)s="$cluster",
%(namespaceLabel)s=~"$namespace", workload_type="$type"})
by (workload)"""
metrics = get_all_metrics(query)
expected = {"container_network_receive_bytes_total",
"mixin_pod_workload"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
class TestApiServerTokenize(unittest.TestCase, BaseMessage):
def test_apiserver_01(self):
query = """apiserver_request:availability30d{verb="all"}"""
metrics = get_all_metrics(query)
expected = {"apiserver_request:availability30d"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_02(self):
query = """100 * (apiserver_request:availability30d{
verb="all"} - 0.990000)"""
metrics = get_all_metrics(query)
expected = {"apiserver_request:availability30d"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_03(self):
query = """apiserver_request:availability30d{verb="read"}"""
metrics = get_all_metrics(query)
expected = {"apiserver_request:availability30d"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_04(self):
query = """sum by (code) (
code_resource:apiserver_request_total:rate5m{verb="read"})"""
metrics = get_all_metrics(query)
expected = {"code_resource:apiserver_request_total:rate5m"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_05(self):
query = """sum by (resource) (
code_resource:apiserver_request_total:rate5m{verb="read",
code=~"5.."}) / sum by (resource) (
code_resource:apiserver_request_total:rate5m{verb="read"})"""
metrics = get_all_metrics(query)
expected = {"code_resource:apiserver_request_total:rate5m"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_06(self):
query = """cluster_quantile:apiserver_request_duration_seconds:
histogram_quantile{verb="read"}"""
metrics = get_all_metrics(query)
expected = {
"cluster_quantile:apiserver_request_duration_seconds:"
"histogram_quantile"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_07(self):
query = """apiserver_request:availability30d{verb="write"}"""
metrics = get_all_metrics(query)
expected = {"apiserver_request:availability30d"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_08(self):
query = """sum by (code) (
code_resource:apiserver_request_total:rate5m{verb="write"})"""
metrics = get_all_metrics(query)
expected = {"code_resource:apiserver_request_total:rate5m"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_09(self):
query = """sum by (resource) (
code_resource:apiserver_request_total:rate5m{verb="write",
code=~"5.."}) / sum by (resource) (
code_resource:apiserver_request_total:rate5m{verb="write"})"""
metrics = get_all_metrics(query)
expected = {"code_resource:apiserver_request_total:rate5m"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_10(self):
query = """cluster_quantile:apiserver_request_duration_seconds:
histogram_quantile{verb="write"}"""
metrics = get_all_metrics(query)
expected = {
"cluster_quantile:apiserver_request_duration_seconds"
":histogram_quantile"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_11(self):
query = """sum(rate(workqueue_adds_total{
job="kube-apiserver", instance=~"$instance",
cluster="$cluster"}[5m])) by (instance, name)"""
metrics = get_all_metrics(query)
expected = {"workqueue_adds_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_12(self):
query = """sum(rate(workqueue_depth{
job="kube-apiserver", instance=~"$instance",
cluster="$cluster"}[5m])) by (instance, name)"""
metrics = get_all_metrics(query)
expected = {"workqueue_depth"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_13(self):
query = """histogram_quantile(0.99, sum(rate(
workqueue_queue_duration_seconds_bucket{
job="kube-apiserver", instance=~"$instance", cluster="$cluster"}[5m]))
by (instance, name, le))"""
metrics = get_all_metrics(query)
expected = {"workqueue_queue_duration_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_14(self):
query = """etcd_helper_cache_entry_total{
job="kube-apiserver", instance=~"$instance", cluster="$cluster"}"""
metrics = get_all_metrics(query)
expected = {"etcd_helper_cache_entry_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_15(self):
query = """sum(rate(etcd_helper_cache_hit_total{
job="kube-apiserver",instance=~"$instance",
cluster="$cluster"}[5m])) by (instance)"""
metrics = get_all_metrics(query)
expected = {"etcd_helper_cache_hit_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_16(self):
query = """sum(rate(etcd_helper_cache_miss_total{
job="kube-apiserver",instance=~"$instance",
cluster="$cluster"}[5m])) by (instance)"""
metrics = get_all_metrics(query)
expected = {"etcd_helper_cache_miss_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_17(self):
query = """histogram_quantile(0.99,sum(rate(
etcd_request_cache_get_duration_seconds_bucket{
job="kube-apiserver",instance=~"$instance", cluster="$cluster"}[5m]))
by (instance, le))"""
metrics = get_all_metrics(query)
expected = {"etcd_request_cache_get_duration_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_18(self):
query = """histogram_quantile(0.99,sum(rate(
etcd_request_cache_add_duration_seconds_bucket{
job="kube-apiserver",instance=~"$instance", cluster="$cluster"}[5m]))
by (instance, le))"""
metrics = get_all_metrics(query)
expected = {"etcd_request_cache_add_duration_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_19(self):
query = """process_resident_memory_bytes{
job="kube-apiserver",instance=~"$instance", cluster="$cluster"}"""
metrics = get_all_metrics(query)
expected = {"process_resident_memory_bytes"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_20(self):
query = """rate(process_cpu_seconds_total{
job="kube-apiserver",instance=~"$instance", cluster="$cluster"}[5m])"""
metrics = get_all_metrics(query)
expected = {"process_cpu_seconds_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_apiserver_21(self):
query = """go_goroutines{job="kube-apiserver",
instance=~"$instance", cluster="$cluster"}"""
metrics = get_all_metrics(query)
expected = {"go_goroutines"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
class TestKubeletTokenize(unittest.TestCase, BaseMessage):
def test_kubelet_01(self):
query = """sort_desc(sum(irate(container_network_receive_bytes_total{
namespace=~".+"}[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_receive_bytes_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_02(self):
query = """sort_desc(sum(irate(container_network_transmit_bytes_total{
namespace=~".+"}[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_transmit_bytes_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_03(self):
query = """sort_desc(sum(irate(container_network_receive_bytes_total{
namespace=~".+"}[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_receive_bytes_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_04(self):
query = """sort_desc(sum(irate(container_network_transmit_bytes_total{
namespace=~".+"}[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_transmit_bytes_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_05(self):
query = """sort_desc(avg(irate(container_network_receive_bytes_total{
namespace=~".+"}[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_receive_bytes_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_06(self):
query = """sort_desc(avg(irate(container_network_transmit_bytes_total{
namespace=~".+"}[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_transmit_bytes_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_07(self):
query = """sort_desc(sum(irate(container_network_receive_packets_total{
namespace=~".+"}[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_receive_packets_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_08(self):
query = """sort_desc(sum(irate(container_network_transmit_packets_total{
namespace=~".+"}[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_transmit_packets_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_09(self):
query = """sort_desc(sum(irate(
container_network_receive_packets_dropped_total{namespace=~".+"}
[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_receive_packets_dropped_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_10(self):
query = """sort_desc(sum(irate(
container_network_transmit_packets_dropped_total{namespace=~".+"}
[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_transmit_packets_dropped_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_11(self):
query = """sort_desc(avg(irate(
container_network_receive_bytes_total{namespace=~".+"}
[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_receive_bytes_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_12(self):
query = """sort_desc(avg(irate(
container_network_transmit_bytes_total{namespace=~".+"}
[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_transmit_bytes_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_13(self):
query = """sort_desc(sum(irate(
container_network_receive_bytes_total{namespace=~".+"}
[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_receive_bytes_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_14(self):
query = """sort_desc(sum(irate(
container_network_transmit_bytes_total{namespace=~".+"}
[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_transmit_bytes_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_15(self):
query = """sort_desc(sum(irate(
container_network_receive_packets_total{namespace=~".+"}
[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_receive_packets_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_16(self):
query = """sort_desc(sum(irate(
container_network_transmit_packets_total{namespace=~".+"}
[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_transmit_packets_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_17(self):
query = """sort_desc(sum(irate(
container_network_receive_packets_dropped_total{namespace=~".+"}
[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_receive_packets_dropped_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_18(self):
query = """sort_desc(sum(irate(
container_network_transmit_packets_dropped_total{namespace=~".+"}
[$interval:$resolution])) by (namespace))"""
metrics = get_all_metrics(query)
expected = {"container_network_transmit_packets_dropped_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_19(self):
query = """sort_desc(sum(rate(
node_netstat_Tcp_RetransSegs[$interval:$resolution]) /
rate(node_netstat_Tcp_OutSegs[$interval:$resolution]))
by (instance))"""
metrics = get_all_metrics(query)
expected = {"node_netstat_Tcp_RetransSegs", "node_netstat_Tcp_OutSegs"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_kubelet_20(self):
query = """sort_desc(sum(rate(
node_netstat_TcpExt_TCPSynRetrans[$interval:$resolution]) /
rate(node_netstat_Tcp_RetransSegs[$interval:$resolution]))
by (instance))"""
metrics = get_all_metrics(query)
expected = {"node_netstat_TcpExt_TCPSynRetrans",
"node_netstat_Tcp_RetransSegs"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
class TestControllerManagerTokenize(unittest.TestCase, BaseMessage):
def test_controllermanager_01(self):
query = """sum(up{job="kube-controller-manager"})"""
metrics = get_all_metrics(query)
expected = {"up"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_controllermanager_02(self):
query = """sum(rate(workqueue_adds_total{
job="kube-controller-manager", instance=~"$instance"}[5m]))
by (instance, name)"""
metrics = get_all_metrics(query)
expected = {"workqueue_adds_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_controllermanager_03(self):
query = """sum(rate(workqueue_depth{
job="kube-controller-manager", instance=~"$instance"}[5m]))
by (instance, name)"""
metrics = get_all_metrics(query)
expected = {"workqueue_depth"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_controllermanager_04(self):
query = """histogram_quantile(0.99, sum(rate(
workqueue_queue_duration_seconds_bucket{
job="kube-controller-manager", instance=~"$instance"}[5m]))
by (instance, name, le))"""
metrics = get_all_metrics(query)
expected = {"workqueue_queue_duration_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_controllermanager_05(self):
query = """sum(rate(rest_client_requests_total{
job="kube-controller-manager",
instance=~"$instance",code=~"2.."}[5m]))"""
metrics = get_all_metrics(query)
expected = {"rest_client_requests_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_controllermanager_06(self):
query = """sum(rate(rest_client_requests_total{
job="kube-controller-manager",
instance=~"$instance",code=~"3.."}[5m]))"""
metrics = get_all_metrics(query)
expected = {"rest_client_requests_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_controllermanager_07(self):
query = """sum(rate(rest_client_requests_total{
job="kube-controller-manager",
instance=~"$instance",code=~"4.."}[5m]))"""
metrics = get_all_metrics(query)
expected = {"rest_client_requests_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_controllermanager_08(self):
query = """sum(rate(rest_client_requests_total{
job="kube-controller-manager",
instance=~"$instance",code=~"5.."}[5m]))"""
metrics = get_all_metrics(query)
expected = {"rest_client_requests_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_controllermanager_09(self):
query = """histogram_quantile(0.99, sum(rate(
rest_client_request_latency_seconds_bucket{
job="kube-controller-manager",
instance=~"$instance", verb="POST"}[5m]))
by (verb, url, le))"""
metrics = get_all_metrics(query)
expected = {"rest_client_request_latency_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_controllermanager_10(self):
query = """histogram_quantile(0.99, sum(rate(
rest_client_request_latency_seconds_bucket{
job="kube-controller-manager", instance=~"$instance", verb="GET"}[5m]))
by (verb, url, le))"""
metrics = get_all_metrics(query)
expected = {"rest_client_request_latency_seconds_bucket"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_controllermanager_11(self):
query = """process_resident_memory_bytes{
job="kube-controller-manager",instance=~"$instance"}"""
metrics = get_all_metrics(query)
expected = {"process_resident_memory_bytes"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_controllermanager_12(self):
query = """rate(process_cpu_seconds_total{
job="kube-controller-manager",instance=~"$instance"}[5m])"""
metrics = get_all_metrics(query)
expected = {"process_cpu_seconds_total"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
def test_controllermanager_13(self):
query = """go_goroutines{
job="kube-controller-manager",instance=~"$instance"}"""
metrics = get_all_metrics(query)
expected = {"go_goroutines"}
self.assertCountEqual(
set(metrics), expected, self.error_msg(metrics, expected, query))
| 43.940265
| 80
| 0.656135
| 4,268
| 39,722
| 5.81373
| 0.061856
| 0.117882
| 0.051344
| 0.071092
| 0.929029
| 0.914682
| 0.893201
| 0.870753
| 0.846371
| 0.828396
| 0
| 0.010447
| 0.21444
| 39,722
| 903
| 81
| 43.988926
| 0.784739
| 0.001511
| 0
| 0.688295
| 0
| 0
| 0.410329
| 0.306057
| 0
| 0
| 0
| 0
| 0.125954
| 1
| 0.127226
| false
| 0
| 0.002545
| 0.002545
| 0.139949
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
52f2255e479f70a2993e96f39b24bf9c63835132
| 137
|
py
|
Python
|
core/python/dev_test.py
|
yunnant/kungfu
|
03dba19c922a5950068bd2d223488b8543ad8dd1
|
[
"Apache-2.0"
] | 1
|
2020-06-16T01:19:49.000Z
|
2020-06-16T01:19:49.000Z
|
core/python/dev_test.py
|
yunnant/kungfu
|
03dba19c922a5950068bd2d223488b8543ad8dd1
|
[
"Apache-2.0"
] | 1
|
2019-08-23T01:52:33.000Z
|
2019-08-23T01:52:33.000Z
|
core/python/dev_test.py
|
yunnant/kungfu
|
03dba19c922a5950068bd2d223488b8543ad8dd1
|
[
"Apache-2.0"
] | null | null | null |
from env import setup_environment_variables
if __name__ == '__main__':
setup_environment_variables()
from test import __main__
| 19.571429
| 43
| 0.781022
| 16
| 137
| 5.6875
| 0.625
| 0.351648
| 0.549451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167883
| 137
| 6
| 44
| 22.833333
| 0.798246
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
eabfe32613f655679b575dae7969200abbca3565
| 24
|
py
|
Python
|
scripts/driver.py
|
ankit-vaghela30/Distributed-Malware-classification
|
5479b5a9590c1ec436d937b287b7ffe08ff568b1
|
[
"MIT"
] | 3
|
2021-10-02T18:19:58.000Z
|
2021-10-31T13:40:37.000Z
|
scripts/driver.py
|
ankit-vaghela30/Distributed-Malware-classification
|
5479b5a9590c1ec436d937b287b7ffe08ff568b1
|
[
"MIT"
] | null | null | null |
scripts/driver.py
|
ankit-vaghela30/Distributed-Malware-classification
|
5479b5a9590c1ec436d937b287b7ffe08ff568b1
|
[
"MIT"
] | null | null | null |
import main
main.main()
| 8
| 11
| 0.75
| 4
| 24
| 4.5
| 0.5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 2
| 12
| 12
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d826abae2351c468d761d7a4548c4aebb110824f
| 45
|
py
|
Python
|
metrics/CD_EMD/cd/chamferdist/__init__.py
|
JiazeWang/SP-GAN
|
455003f78b1160ebe0a2056005b069808c0df35b
|
[
"MIT"
] | 73
|
2021-05-11T12:00:29.000Z
|
2022-03-31T09:40:12.000Z
|
metrics/CD_EMD/cd/chamferdist/__init__.py
|
JiazeWang/SP-GAN
|
455003f78b1160ebe0a2056005b069808c0df35b
|
[
"MIT"
] | 6
|
2021-08-18T13:03:43.000Z
|
2022-03-30T04:48:29.000Z
|
metrics/CD_EMD/cd/chamferdist/__init__.py
|
JiazeWang/SP-GAN
|
455003f78b1160ebe0a2056005b069808c0df35b
|
[
"MIT"
] | 13
|
2021-08-28T20:09:13.000Z
|
2022-03-20T12:42:51.000Z
|
from .ChamferDistance import ChamferDistance
| 22.5
| 44
| 0.888889
| 4
| 45
| 10
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d83c42031dd26c1475dc86270d7d4aea23d980e1
| 179
|
py
|
Python
|
secondProject/driveTest/driveManager/views.py
|
loic9654/Djangodev
|
2babb235d68f508c64171a146be8483009dea7f7
|
[
"Apache-2.0"
] | null | null | null |
secondProject/driveTest/driveManager/views.py
|
loic9654/Djangodev
|
2babb235d68f508c64171a146be8483009dea7f7
|
[
"Apache-2.0"
] | null | null | null |
secondProject/driveTest/driveManager/views.py
|
loic9654/Djangodev
|
2babb235d68f508c64171a146be8483009dea7f7
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from .models import Project, Observation
# Get Project and display them
def index(request):
return render(request, 'projects/index.html')
| 25.571429
| 49
| 0.782123
| 24
| 179
| 5.833333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139665
| 179
| 7
| 49
| 25.571429
| 0.909091
| 0.156425
| 0
| 0
| 0
| 0
| 0.126667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
dc1f5622ae1fd345e067fc739243864c0168fca5
| 156
|
py
|
Python
|
04/py/q5.py
|
RussellDash332/practice-makes-perfect
|
917822b461550a2e3679351e467362f95d9e428d
|
[
"MIT"
] | 2
|
2021-11-18T06:22:09.000Z
|
2021-12-25T09:52:57.000Z
|
04/py/q5.py
|
RussellDash332/practice-makes-perfect
|
917822b461550a2e3679351e467362f95d9e428d
|
[
"MIT"
] | 2
|
2021-11-17T16:28:00.000Z
|
2021-12-01T09:59:40.000Z
|
04/py/q5.py
|
RussellDash332/practice-makes-perfect
|
917822b461550a2e3679351e467362f95d9e428d
|
[
"MIT"
] | null | null | null |
def foo(x):
def bar(x, y):
return lambda y: y(x)
return lambda y: bar(x, y)
print(foo(lambda x: x**3)(lambda x: x**2)(lambda x: x)(4))
| 26
| 58
| 0.532051
| 32
| 156
| 2.59375
| 0.34375
| 0.253012
| 0.289157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.269231
| 156
| 6
| 58
| 26
| 0.701754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0.2
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
dc2ad14c2d877bc8d62f79f094a997c31725bf69
| 70
|
py
|
Python
|
library/pycount/src/pycount/__init__.py
|
introlab/demo_integration
|
fb74f2e70fc690b39e581430b83b3e66a35d756f
|
[
"BSD-3-Clause"
] | 1
|
2021-06-18T15:58:42.000Z
|
2021-06-18T15:58:42.000Z
|
library/pycount/src/pycount/__init__.py
|
introlab/demo_integration
|
fb74f2e70fc690b39e581430b83b3e66a35d756f
|
[
"BSD-3-Clause"
] | 3
|
2021-06-08T19:11:06.000Z
|
2021-07-01T18:38:17.000Z
|
library/pycount/src/pycount/__init__.py
|
introlab/demo_integration
|
fb74f2e70fc690b39e581430b83b3e66a35d756f
|
[
"BSD-3-Clause"
] | null | null | null |
from .characters import count_characters, count_characters_ignoreCase
| 35
| 69
| 0.9
| 8
| 70
| 7.5
| 0.625
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 70
| 1
| 70
| 70
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dc8abf9d6d88835f1ea4e4bda176af7c81e40329
| 285
|
py
|
Python
|
torchreid/data/__init__.py
|
qw85639229/hardest
|
ef86536dbbe1089248e34afbbb7bb513f97f58f1
|
[
"MIT"
] | 21
|
2020-10-13T01:33:31.000Z
|
2022-01-04T15:58:31.000Z
|
torchreid/data/__init__.py
|
qw85639229/hardest
|
ef86536dbbe1089248e34afbbb7bb513f97f58f1
|
[
"MIT"
] | 10
|
2020-11-18T07:40:22.000Z
|
2021-10-05T07:58:25.000Z
|
torchreid/data/__init__.py
|
qw85639229/hardest
|
ef86536dbbe1089248e34afbbb7bb513f97f58f1
|
[
"MIT"
] | 7
|
2020-11-19T08:40:27.000Z
|
2022-02-05T06:24:08.000Z
|
from __future__ import absolute_import
from __future__ import print_function
from .datasets import Dataset, ImageDataset, VideoDataset
from .datasets import register_image_dataset
from .datasets import register_video_dataset
from .datamanager import ImageDataManager, VideoDataManager
| 40.714286
| 59
| 0.880702
| 33
| 285
| 7.181818
| 0.484848
| 0.151899
| 0.227848
| 0.219409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094737
| 285
| 7
| 59
| 40.714286
| 0.918605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.166667
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f49eded389e556e54b5a7ef5f371c8e08a442fba
| 2,396
|
py
|
Python
|
tests/avs_client/test_device.py
|
Yud07/alexa-voice-service-client
|
8136dbe8ac426f6323001b7d42edc8d937f9a933
|
[
"MIT"
] | null | null | null |
tests/avs_client/test_device.py
|
Yud07/alexa-voice-service-client
|
8136dbe8ac426f6323001b7d42edc8d937f9a933
|
[
"MIT"
] | null | null | null |
tests/avs_client/test_device.py
|
Yud07/alexa-voice-service-client
|
8136dbe8ac426f6323001b7d42edc8d937f9a933
|
[
"MIT"
] | 2
|
2018-07-12T19:56:42.000Z
|
2018-07-20T23:56:35.000Z
|
import pytest
from avs_client.avs_client import device
@pytest.fixture
def manager():
return device.DeviceManager()
def test_default_device_state(manager):
assert manager.build_device_state() == [
{
'header': {
'namespace': 'AudioPlayer',
'name': 'PlaybackState'
},
'payload': {
'token': '',
'offsetInMilliseconds': 0,
'playerActivity': 'IDLE'
}
},
{
'header': {
'namespace': 'Speaker',
'name': 'VolumeState'
},
'payload': {
'volume': 100,
'muted': False,
}
},
{
'header': {
'namespace': 'SpeechSynthesizer',
'name': 'SpeechState'
},
'payload': {
'token': '',
'offsetInMilliseconds': 0,
'playerActivity': 'FINISHED'
}
}
]
def test_default_device_state_extra_context(manager):
context = {
'header': {
'namespace': 'Edgar',
'name': 'RoomState'
},
'payload': {
'room': 'kitchen'
}
}
assert manager.build_device_state(context) == [
{
'header': {
'namespace': 'AudioPlayer',
'name': 'PlaybackState'
},
'payload': {
'token': '',
'offsetInMilliseconds': 0,
'playerActivity': 'IDLE'
}
},
{
'header': {
'namespace': 'Speaker',
'name': 'VolumeState'
},
'payload': {
'volume': 100,
'muted': False,
}
},
{
'header': {
'namespace': 'SpeechSynthesizer',
'name': 'SpeechState'
},
'payload': {
'token': '',
'offsetInMilliseconds': 0,
'playerActivity': 'FINISHED'
}
},
{
'header': {
'namespace': 'Edgar',
'name': 'RoomState'
},
'payload': {
'room': 'kitchen'
}
}
]
| 23.490196
| 53
| 0.365609
| 122
| 2,396
| 7.065574
| 0.344262
| 0.139211
| 0.148492
| 0.153132
| 0.844548
| 0.719258
| 0.719258
| 0.719258
| 0.600928
| 0.600928
| 0
| 0.00841
| 0.503756
| 2,396
| 101
| 54
| 23.722772
| 0.716569
| 0
| 0
| 0.537634
| 0
| 0
| 0.250417
| 0
| 0
| 0
| 0
| 0
| 0.021505
| 1
| 0.032258
| false
| 0
| 0.021505
| 0.010753
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f4ca9970c41ae99b39ccbd65a68785991c66caaa
| 2,427
|
py
|
Python
|
zhmcclient/__init__.py
|
vkpro-forks/python-zhmcclient
|
eab2dca37cb417d03411450dabf72805214b5ca0
|
[
"Apache-2.0"
] | null | null | null |
zhmcclient/__init__.py
|
vkpro-forks/python-zhmcclient
|
eab2dca37cb417d03411450dabf72805214b5ca0
|
[
"Apache-2.0"
] | null | null | null |
zhmcclient/__init__.py
|
vkpro-forks/python-zhmcclient
|
eab2dca37cb417d03411450dabf72805214b5ca0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2017 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
zhmcclient - A pure Python client library for the IBM Z HMC Web Services
API.
For documentation, see TODO: Add link to RTD once available.
"""
from __future__ import absolute_import
from ._version import * # noqa: F401
from ._constants import * # noqa: F401
from ._exceptions import * # noqa: F401
from ._manager import * # noqa: F401
from ._resource import * # noqa: F401
from ._logging import * # noqa: F401
from ._session import * # noqa: F401
from ._timestats import * # noqa: F401
from ._client import * # noqa: F401
from ._cpc import * # noqa: F401
from ._lpar import * # noqa: F401
from ._partition import * # noqa: F401
from ._activation_profile import * # noqa: F401
from ._adapter import * # noqa: F401
from ._nic import * # noqa: F401
from ._hba import * # noqa: F401
from ._virtual_function import * # noqa: F401
from ._virtual_switch import * # noqa: F401
from ._port import * # noqa: F401
from ._notification import * # noqa: F401
from ._metrics import * # noqa: F401
from ._utils import * # noqa: F401
from ._console import * # noqa: F401
from ._user import * # noqa: F401
from ._user_role import * # noqa: F401
from ._user_pattern import * # noqa: F401
from ._password_rule import * # noqa: F401
from ._task import * # noqa: F401
from ._ldap_server_definition import * # noqa: F401
from ._unmanaged_cpc import * # noqa: F401
from ._storage_group import * # noqa: F401
from ._storage_volume import * # noqa: F401
from ._virtual_storage_resource import * # noqa: F401
from ._storage_group_template import * # noqa: F401
from ._storage_volume_template import * # noqa: F401
| 41.135593
| 74
| 0.667903
| 315
| 2,427
| 4.965079
| 0.403175
| 0.223785
| 0.313299
| 0.391304
| 0.205243
| 0.078005
| 0
| 0
| 0
| 0
| 0
| 0.06457
| 0.253399
| 2,427
| 58
| 75
| 41.844828
| 0.798565
| 0.451998
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 0
| 1
| 0
| true
| 0.027778
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
76228406605f4687607899e249b19479fa1c3905
| 43
|
py
|
Python
|
hoverpy/__init__.py
|
alvarocavalcanti/hoverpy
|
e153ec57f80634019d827d378f184c01fedc5a0e
|
[
"Apache-2.0"
] | 88
|
2016-11-10T18:05:28.000Z
|
2021-04-26T05:46:34.000Z
|
hoverpy/__init__.py
|
alvarocavalcanti/hoverpy
|
e153ec57f80634019d827d378f184c01fedc5a0e
|
[
"Apache-2.0"
] | 11
|
2016-12-10T21:03:25.000Z
|
2018-10-05T09:46:21.000Z
|
hoverpy/__init__.py
|
alvarocavalcanti/hoverpy
|
e153ec57f80634019d827d378f184c01fedc5a0e
|
[
"Apache-2.0"
] | 10
|
2016-11-10T19:02:28.000Z
|
2018-10-22T10:17:55.000Z
|
from .hp import *
from .decorators import *
| 21.5
| 25
| 0.744186
| 6
| 43
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 2
| 25
| 21.5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
762bd9624d5da0f955d6c9e2e5f60211974408e8
| 27
|
py
|
Python
|
tests/test_init.py
|
AgentIQ/aiq-airflow
|
e4463e00602dcdae26334d252502781534feeac8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_init.py
|
AgentIQ/aiq-airflow
|
e4463e00602dcdae26334d252502781534feeac8
|
[
"Apache-2.0"
] | 12
|
2020-04-03T17:05:53.000Z
|
2021-12-01T22:55:39.000Z
|
tests/test_init.py
|
AgentIQ/aiq-airflow
|
e4463e00602dcdae26334d252502781534feeac8
|
[
"Apache-2.0"
] | null | null | null |
def test_init():
pass
| 6.75
| 16
| 0.592593
| 4
| 27
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.296296
| 27
| 3
| 17
| 9
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
521fe976e8961f3942a21ac8dc40d6944128c1b7
| 24
|
py
|
Python
|
app/__init__.py
|
RAV10K1/med_cab_test
|
51e5673d25cb1c0e04344940c76d13b101828774
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
RAV10K1/med_cab_test
|
51e5673d25cb1c0e04344940c76d13b101828774
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
RAV10K1/med_cab_test
|
51e5673d25cb1c0e04344940c76d13b101828774
|
[
"MIT"
] | null | null | null |
from app.main import API
| 24
| 24
| 0.833333
| 5
| 24
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
522bd6b2f52115f5582f9449c38c5bd762480e44
| 109
|
py
|
Python
|
catag/authorize/views.py
|
catnlp/VisualTool
|
26122a5cccced04fa6befa4bfdd21d6352e6c027
|
[
"MIT"
] | null | null | null |
catag/authorize/views.py
|
catnlp/VisualTool
|
26122a5cccced04fa6befa4bfdd21d6352e6c027
|
[
"MIT"
] | null | null | null |
catag/authorize/views.py
|
catnlp/VisualTool
|
26122a5cccced04fa6befa4bfdd21d6352e6c027
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def index(request):
return render(request, "authorize/index.html")
| 18.166667
| 50
| 0.761468
| 14
| 109
| 5.928571
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137615
| 109
| 5
| 51
| 21.8
| 0.882979
| 0
| 0
| 0
| 0
| 0
| 0.183486
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
527c85b1c933d0ac8f9664fa64296821ae45b3ff
| 101
|
py
|
Python
|
office365/sharepoint/files/checkedOutFile.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | 544
|
2016-08-04T17:10:16.000Z
|
2022-03-31T07:17:20.000Z
|
office365/sharepoint/files/checkedOutFile.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | 438
|
2016-10-11T12:24:22.000Z
|
2022-03-31T19:30:35.000Z
|
office365/sharepoint/files/checkedOutFile.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | 202
|
2016-08-22T19:29:40.000Z
|
2022-03-30T20:26:15.000Z
|
from office365.sharepoint.base_entity import BaseEntity
class CheckedOutFile(BaseEntity):
pass
| 16.833333
| 55
| 0.821782
| 11
| 101
| 7.454545
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034091
| 0.128713
| 101
| 5
| 56
| 20.2
| 0.897727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
873ac0d2aa4089cf6a2f38448a5e6f51996ac90d
| 339
|
py
|
Python
|
test/script/struct-test.py
|
Fahien/pyspot
|
69a6fc817cdcf9101940025850d647567f5efe3e
|
[
"MIT"
] | 2
|
2018-01-09T13:06:25.000Z
|
2018-02-12T10:05:26.000Z
|
test/script/struct-test.py
|
Fahien/pyspot
|
69a6fc817cdcf9101940025850d647567f5efe3e
|
[
"MIT"
] | null | null | null |
test/script/struct-test.py
|
Fahien/pyspot
|
69a6fc817cdcf9101940025850d647567f5efe3e
|
[
"MIT"
] | null | null | null |
import pyspot
def create_details():
details = pyspot.test.Details(1)
details.thing.value = 1
return details
def send_details( details ):
return details
def compare_details( details ):
return details == pyspot.test.Details(3)
def change_details( details ):
details = pyspot.test.Details(6)
return details
| 16.142857
| 42
| 0.702065
| 43
| 339
| 5.44186
| 0.348837
| 0.299145
| 0.217949
| 0.307692
| 0.264957
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01487
| 0.20649
| 339
| 20
| 43
| 16.95
| 0.855019
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.083333
| 0.166667
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5e40df52334048873983c5b1e66637b3d37bea62
| 29
|
py
|
Python
|
test.py
|
jross1996/gxuyw-Introduction-to-Open-Source
|
555b3ad7818d93d64dd87f0da43eb00703eb2587
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
jross1996/gxuyw-Introduction-to-Open-Source
|
555b3ad7818d93d64dd87f0da43eb00703eb2587
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
jross1996/gxuyw-Introduction-to-Open-Source
|
555b3ad7818d93d64dd87f0da43eb00703eb2587
|
[
"Apache-2.0"
] | null | null | null |
print("this is my test file")
| 29
| 29
| 0.724138
| 6
| 29
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
5e52f0dfc36d0012e7a8247d050a167f621eb998
| 25,474
|
py
|
Python
|
src/tt_personal_messages/tt_personal_messages/tests/test_operations.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 1
|
2020-04-02T11:51:20.000Z
|
2020-04-02T11:51:20.000Z
|
src/tt_personal_messages/tt_personal_messages/tests/test_operations.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
src/tt_personal_messages/tt_personal_messages/tests/test_operations.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import asyncio
from aiohttp import test_utils
from tt_web import utils
from tt_web import postgresql as db
from .. import objects
from .. import relations
from .. import operations
from . import helpers
class OperationsTests(helpers.BaseTests):
async def check_account_created(self, number=1, id=666, new_messages_number=0, contacts=[]):
result = await db.sql('SELECT * FROM accounts ORDER BY created_at DESC')
self.assertEqual(len(result), number)
self.assertEqual(result[0]['id'], id)
self.assertEqual(result[0]['new_messages_number'], new_messages_number)
@test_utils.unittest_run_loop
async def test_increment_new_messages(self):
await operations.increment_new_messages(666)
await self.check_account_created(new_messages_number=1)
await operations.increment_new_messages(666)
await operations.increment_new_messages(666)
await self.check_account_created(new_messages_number=3)
@test_utils.unittest_run_loop
async def test_new_messages_number__has_account(self):
await operations.increment_new_messages(666)
await db.sql('UPDATE accounts SET new_messages_number=7')
number = await operations.new_messages_number(666)
self.assertEqual(number, 7)
@test_utils.unittest_run_loop
async def test_new_messages_number__no_account(self):
number = await operations.new_messages_number(666)
self.assertEqual(number, 0)
@test_utils.unittest_run_loop
async def test_read_messages__has_account(self):
await operations.increment_new_messages(666)
await db.sql('UPDATE accounts SET new_messages_number=7')
await operations.read_messages(666)
number = await operations.new_messages_number(666)
self.assertEqual(number, 0)
@test_utils.unittest_run_loop
async def test_read_messages__no_account(self):
await operations.read_messages(666)
number = await operations.new_messages_number(666)
self.assertEqual(number, 0)
@test_utils.unittest_run_loop
async def test_create_visibility(self):
message_1_id = await operations.create_message(sender_id=666, recipients_ids=[1, 3, 7], body='some странный text')
message_2_id = await operations.create_message(sender_id=666, recipients_ids=[1, 3, 7], body='some странный text')
await operations.create_visibility(1, message_1_id)
await operations.create_visibility(2, message_2_id)
result = await db.sql('SELECT account, message FROM visibilities')
self.assertCountEqual([dict(row) for row in result],
[{'account': 1, 'message': message_1_id},
{'account': 2, 'message': message_2_id}])
@test_utils.unittest_run_loop
async def test_add_to_conversation(self):
message_1_id = await operations.create_message(sender_id=666, recipients_ids=[1, 3, 7], body='some странный text')
message_2_id = await operations.create_message(sender_id=666, recipients_ids=[1, 3, 7], body='some странный text')
await operations.add_to_conversation(1, 2, message_1_id)
await operations.add_to_conversation(2, 1, message_2_id)
result = await db.sql('SELECT account_1, account_2, message FROM conversations')
self.assertCountEqual([dict(row) for row in result],
[{'account_1': 1, 'account_2': 2, 'message': message_1_id},
{'account_1': 1, 'account_2': 2, 'message': message_2_id}])
@test_utils.unittest_run_loop
async def test_create_message(self):
message_id = await operations.create_message(sender_id=666, recipients_ids=[1, 3, 7], body='some странный text')
result = await db.sql('SELECT * FROM messages')
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['sender'], 666)
self.assertEqual(result[0]['recipients'], [1, 3, 7])
self.assertEqual(result[0]['body'], 'some странный text')
@test_utils.unittest_run_loop
async def test_send_message__visibilities_created(self):
message_id = await operations.send_message(sender_id=666, recipients_ids=[1, 3, 7], body='some странный text')
result = await db.sql('SELECT account, message, visible FROM visibilities')
self.assertCountEqual([dict(row) for row in result],
[{'account': 666, 'message': message_id, 'visible': True},
{'account': 1, 'message': message_id, 'visible': True},
{'account': 3, 'message': message_id, 'visible': True},
{'account': 7, 'message': message_id, 'visible': True}])
@test_utils.unittest_run_loop
async def test_send_message__conversations_created(self):
message_id = await operations.send_message(sender_id=666, recipients_ids=[1, 3, 7], body='some странный text')
result = await db.sql('SELECT account_1, account_2, message FROM conversations')
self.assertCountEqual([dict(row) for row in result],
[{'account_1': 1, 'account_2': 666, 'message': message_id},
{'account_1': 3, 'account_2': 666, 'message': message_id},
{'account_1': 7, 'account_2': 666, 'message': message_id}])
@test_utils.unittest_run_loop
async def test_send_message__new_messages_increment(self):
await operations.send_message(sender_id=666, recipients_ids=[1, 3, 7], body='some странный text')
await operations.send_message(sender_id=1, recipients_ids=[7], body='some странный text')
result = await db.sql('SELECT id, new_messages_number FROM accounts')
self.assertCountEqual([dict(row) for row in result],
[{'id': 1, 'new_messages_number': 1},
{'id': 3, 'new_messages_number': 1},
{'id': 7, 'new_messages_number': 2}])
@test_utils.unittest_run_loop
async def test_send_message__contacts_created(self):
message_id = await operations.send_message(sender_id=666, recipients_ids=[1, 3, 7], body='some странный text')
contacts = await operations.get_contacts(666)
self.assertCountEqual(contacts, [1, 3, 7])
contacts = await operations.get_contacts(3)
self.assertCountEqual(contacts, [666])
@test_utils.unittest_run_loop
async def test_send_message__duplicate_recipients(self):
message_id = await operations.send_message(sender_id=666, recipients_ids=[1, 3, 7, 3, 7, 7], body='some странный text')
result = await db.sql('SELECT recipients, body FROM messages')
self.assertEqual([row['body'] for row in result], ['some странный text'])
self.assertEqual(len(result[0]['recipients']), 3)
self.assertEqual(set(result[0]['recipients']), {1, 3, 7})
@test_utils.unittest_run_loop
async def test_send_message__sender_is_recipient(self):
message_id = await operations.send_message(sender_id=666, recipients_ids=[666], body='some странный text')
self.assertEqual(message_id, None)
result = await db.sql('SELECT body FROM messages')
self.assertEqual(result, [])
@test_utils.unittest_run_loop
async def test_send_message__remove_sender_from_recipients(self):
message_id = await operations.send_message(sender_id=666, recipients_ids=[1, 3, 666, 7], body='some странный text')
result = await db.sql('SELECT body FROM messages')
self.assertEqual([row['body'] for row in result], ['some странный text'])
result = await db.sql('SELECT id FROM accounts')
self.assertEqual({row['id'] for row in result}, {1, 3, 7})
result = await db.sql('SELECT recipients FROM messages WHERE id=%(id)s', {'id': message_id})
self.assertEqual(set(result[0]['recipients']), {1, 3, 7})
result = await db.sql('SELECT account, message, visible FROM visibilities')
self.assertCountEqual([dict(row) for row in result],
[{'account': 666, 'message': message_id, 'visible': True},
{'account': 1, 'message': message_id, 'visible': True},
{'account': 3, 'message': message_id, 'visible': True},
{'account': 7, 'message': message_id, 'visible': True}])
result = await db.sql('SELECT account_1, account_2, message FROM conversations')
self.assertCountEqual([dict(row) for row in result],
[{'account_1': 1, 'account_2': 666, 'message': message_id},
{'account_1': 3, 'account_2': 666, 'message': message_id},
{'account_1': 7, 'account_2': 666, 'message': message_id}])
contacts = await operations.get_contacts(666)
self.assertCountEqual(contacts, [1, 3, 7])
contacts = await operations.get_contacts(3)
self.assertCountEqual(contacts, [666])
@test_utils.unittest_run_loop
async def test_send_message__duplicate_contacts(self):
await operations.send_message(sender_id=666, recipients_ids=[1, 3, 7], body='1')
await operations.send_message(sender_id=3, recipients_ids=[1, 666], body='2')
contacts = await operations.get_contacts(666)
self.assertCountEqual(contacts, [1, 3, 7])
contacts = await operations.get_contacts(1)
self.assertCountEqual(contacts, [3, 666])
contacts = await operations.get_contacts(3)
self.assertCountEqual(contacts, [1, 666])
contacts = await operations.get_contacts(7)
self.assertCountEqual(contacts, [666])
@test_utils.unittest_run_loop
async def test_hide_message(self):
message_id = await operations.send_message(sender_id=666, recipients_ids=[1, 3, 7], body='some странный text')
await operations.hide_message(666, message_id)
await operations.hide_message(3, message_id)
result = await db.sql('SELECT account, message, visible FROM visibilities')
self.assertCountEqual([dict(row) for row in result],
[{'account': 666, 'message': message_id, 'visible': False},
{'account': 1, 'message': message_id, 'visible': True},
{'account': 3, 'message': message_id, 'visible': False},
{'account': 7, 'message': message_id, 'visible': True}])
@test_utils.unittest_run_loop
async def test_hide_all_messages(self):
message_1_id = await operations.send_message(sender_id=666, recipients_ids=[1, 3, 7], body='some странный text')
message_2_id = await operations.send_message(sender_id=3, recipients_ids=[1, 666], body='some странный text')
await operations.hide_all_messages(666)
await operations.hide_all_messages(1)
result = await db.sql('SELECT account, message, visible FROM visibilities')
self.assertCountEqual([dict(row) for row in result],
[{'account': 666, 'message': message_1_id, 'visible': False},
{'account': 1, 'message': message_1_id, 'visible': False},
{'account': 3, 'message': message_1_id, 'visible': True},
{'account': 7, 'message': message_1_id, 'visible': True},
{'account': 666, 'message': message_2_id, 'visible': False},
{'account': 1, 'message': message_2_id, 'visible': False},
{'account': 3, 'message': message_2_id, 'visible': True}])
@test_utils.unittest_run_loop
async def test_hide_conversation(self):
message_1_id = await operations.send_message(sender_id=666, recipients_ids=[1, 3, 7], body='some странный text')
message_2_id = await operations.send_message(sender_id=3, recipients_ids=[1, 666], body='some странный text')
message_3_id = await operations.send_message(sender_id=666, recipients_ids=[3], body='some странный text')
await operations.hide_conversation(666, 3)
result = await db.sql('SELECT account, message, visible FROM visibilities')
self.assertCountEqual([dict(row) for row in result],
[{'account': 666, 'message': message_1_id, 'visible': False},
{'account': 1, 'message': message_1_id, 'visible': True},
{'account': 3, 'message': message_1_id, 'visible': True},
{'account': 7, 'message': message_1_id, 'visible': True},
{'account': 666, 'message': message_2_id, 'visible': False},
{'account': 1, 'message': message_2_id, 'visible': True},
{'account': 3, 'message': message_2_id, 'visible': True},
{'account': 666, 'message': message_3_id, 'visible': False},
{'account': 3, 'message': message_3_id, 'visible': True} ])
total, messages = await operations.load_conversation(666, 3)
self.assertEqual(total, 0)
total, messages = await operations.load_conversation(3, 666)
self.assertEqual(total, 3)
@test_utils.unittest_run_loop
async def test_remove_old_messages(self):
message_1_id = await operations.send_message(sender_id=1, recipients_ids=[2, 3, 4], body='1')
message_2_id = await operations.send_message(sender_id=2, recipients_ids=[3, 4, 5], body='2')
message_3_id = await operations.send_message(sender_id=3, recipients_ids=[4, 5, 6], body='3')
result = await db.sql('SELECT created_at FROM messages WHERE id=%(id)s', {'id': message_2_id})
await operations.remove_old_messages(accounts_ids=[1, 2, 3], barrier=result[0]['created_at'])
result = await db.sql('SELECT count(*) FROM messages')
result = await db.sql('SELECT sender FROM messages')
self.assertEqual({row['sender'] for row in result}, {2, 3})
result = await db.sql('SELECT account, message FROM visibilities')
self.assertCountEqual([dict(row) for row in result],
[{'account': 2, 'message': message_2_id},
{'account': 3, 'message': message_2_id},
{'account': 4, 'message': message_2_id},
{'account': 5, 'message': message_2_id},
{'account': 3, 'message': message_3_id},
{'account': 4, 'message': message_3_id},
{'account': 5, 'message': message_3_id},
{'account': 6, 'message': message_3_id}])
result = await db.sql('SELECT account_1, account_2, message FROM conversations')
self.assertCountEqual([dict(row) for row in result],
[{'account_1': 2, 'account_2': 3, 'message': message_2_id},
{'account_1': 2, 'account_2': 4, 'message': message_2_id},
{'account_1': 2, 'account_2': 5, 'message': message_2_id},
{'account_1': 3, 'account_2': 4, 'message': message_3_id},
{'account_1': 3, 'account_2': 5, 'message': message_3_id},
{'account_1': 3, 'account_2': 6, 'message': message_3_id}])
class LoadMessagesTests(helpers.BaseTests):
async def fill_database(self):
self.messages_ids = [await operations.send_message(sender_id=1, recipients_ids=[2, 3], body='1 ааа'),
await operations.send_message(sender_id=2, recipients_ids=[1, 3], body='2 ббб'),
await operations.send_message(sender_id=1, recipients_ids=[2, 4], body='3 ссс'),
await operations.send_message(sender_id=2, recipients_ids=[1, 4], body='4 ааа'),
await operations.send_message(sender_id=1, recipients_ids=[3, 4], body='5 ббб'),
await operations.send_message(sender_id=2, recipients_ids=[3, 4], body='6 ссс'),
await operations.send_message(sender_id=1, recipients_ids=[5], body='7 ааа'),
await operations.send_message(sender_id=2, recipients_ids=[5], body='8 ббб'),
await operations.send_message(sender_id=1, recipients_ids=[5], body='9 ссс')]
@test_utils.unittest_run_loop
async def test_no_messages(self):
await self.fill_database()
total, messages = await operations.load_messages(666, relations.OWNER_TYPE.random())
self.assertEqual(total, 0)
self.assertEqual(messages, [])
@test_utils.unittest_run_loop
async def test_account_and_type(self):
await self.fill_database()
total, messages = await operations.load_messages(1, relations.OWNER_TYPE.SENDER)
self.assertEqual(total, 5)
self.assertEqual({m.id for m in messages}, set(self.messages_ids[0:9:2]))
total, messages = await operations.load_messages(1, relations.OWNER_TYPE.RECIPIENT)
self.assertEqual(total, 2)
self.assertEqual({m.id for m in messages}, {self.messages_ids[1], self.messages_ids[3]})
total, messages = await operations.load_messages(2, relations.OWNER_TYPE.SENDER)
self.assertEqual(total, 4)
self.assertEqual({m.id for m in messages}, set(self.messages_ids[1:9:2]))
total, messages = await operations.load_messages(2, relations.OWNER_TYPE.RECIPIENT)
self.assertEqual(total, 2)
self.assertEqual({m.id for m in messages}, {self.messages_ids[0], self.messages_ids[2]})
@test_utils.unittest_run_loop
async def test_order(self):
await self.fill_database()
total, messages = await operations.load_messages(1, relations.OWNER_TYPE.SENDER)
self.assertEqual(total, 5)
self.assertEqual([m.id for m in messages], [m_id for m_id in reversed(self.messages_ids[0:9:2])])
@test_utils.unittest_run_loop
async def test_text(self):
await self.fill_database()
total, messages = await operations.load_messages(1, relations.OWNER_TYPE.SENDER, text='ааа')
self.assertEqual(total, 2)
self.assertEqual({m.id for m in messages}, {self.messages_ids[0], self.messages_ids[6]})
total, messages = await operations.load_messages(1, relations.OWNER_TYPE.RECIPIENT, text='ааа')
self.assertEqual(total, 1)
self.assertEqual({m.id for m in messages}, {self.messages_ids[3]})
@test_utils.unittest_run_loop
async def test_offset(self):
await self.fill_database()
total, messages = await operations.load_messages(1, relations.OWNER_TYPE.SENDER, offset=1)
self.assertEqual(total, 5)
self.assertEqual({m.id for m in messages}, set(self.messages_ids[0:8:2])) # does not include last record
@test_utils.unittest_run_loop
async def test_limit(self):
await self.fill_database()
total, messages = await operations.load_messages(1, relations.OWNER_TYPE.SENDER, limit=2)
self.assertEqual(total, 5)
self.assertEqual({m.id for m in messages}, set(self.messages_ids[6:9:2]))
@test_utils.unittest_run_loop
async def test_offset_and_limit(self):
await self.fill_database()
total, messages = await operations.load_messages(1, relations.OWNER_TYPE.SENDER, offset=1, limit=2)
self.assertEqual(total, 5)
self.assertEqual({m.id for m in messages}, set(self.messages_ids[4:7:2]))
class LoadConversationTests(helpers.BaseTests):
async def fill_database(self):
self.messages_ids = [await operations.send_message(sender_id=1, recipients_ids=[2, 3], body='1 ааа'),
await operations.send_message(sender_id=2, recipients_ids=[1, 3], body='2 ббб'),
await operations.send_message(sender_id=1, recipients_ids=[2, 4], body='3 ссс'),
await operations.send_message(sender_id=2, recipients_ids=[1, 4], body='4 ааа'),
await operations.send_message(sender_id=1, recipients_ids=[3, 4], body='5 ббб'),
await operations.send_message(sender_id=2, recipients_ids=[3, 4], body='6 ссс'),
await operations.send_message(sender_id=2, recipients_ids=[5], body='10'),
await operations.send_message(sender_id=2, recipients_ids=[5], body='11'),
await operations.send_message(sender_id=1, recipients_ids=[5], body='7 ааа'),
await operations.send_message(sender_id=2, recipients_ids=[5], body='8 ббб'),
await operations.send_message(sender_id=1, recipients_ids=[5], body='9 ссс')]
# load_conversation(account_id, partner_id, offset=0, limit=None):
@test_utils.unittest_run_loop
async def test_no_messages(self):
await self.fill_database()
total, messages = await operations.load_conversation(666, 1)
self.assertEqual(total, 0)
self.assertEqual(messages, [])
total, messages = await operations.load_conversation(3, 5)
self.assertEqual(total, 0)
self.assertEqual(messages, [])
@test_utils.unittest_run_loop
async def test_success(self):
await self.fill_database()
total, messages = await operations.load_conversation(1, 5)
self.assertEqual(total, 2)
self.assertEqual({m.id for m in messages}, {self.messages_ids[-1], self.messages_ids[-3]})
total, messages = await operations.load_conversation(5, 1)
self.assertEqual(total, 2)
self.assertEqual({m.id for m in messages}, {self.messages_ids[-1], self.messages_ids[-3]})
@test_utils.unittest_run_loop
async def test_filter_text(self):
await self.fill_database()
total, messages = await operations.load_conversation(1, 2, text='ааа')
self.assertEqual(total, 2)
self.assertEqual({m.id for m in messages}, {self.messages_ids[0], self.messages_ids[3]})
@test_utils.unittest_run_loop
async def test_success__multiple_recipients(self):
await self.fill_database()
total, messages = await operations.load_conversation(2, 3)
self.assertEqual(total, 2)
self.assertEqual({m.id for m in messages}, {self.messages_ids[1], self.messages_ids[5]})
total, messages = await operations.load_conversation(3, 2)
self.assertEqual(total, 2)
self.assertEqual({m.id for m in messages}, {self.messages_ids[1], self.messages_ids[5]})
@test_utils.unittest_run_loop
async def test_order(self):
await self.fill_database()
total, messages = await operations.load_conversation(1, 5)
self.assertEqual(total, 2)
self.assertEqual([m.id for m in messages], [self.messages_ids[-1], self.messages_ids[-3]])
total, messages = await operations.load_conversation(5, 1)
self.assertEqual(total, 2)
self.assertEqual([m.id for m in messages], [self.messages_ids[-1], self.messages_ids[-3]])
@test_utils.unittest_run_loop
async def test_offset(self):
await self.fill_database()
total, messages = await operations.load_conversation(1, 5, offset=1)
self.assertEqual(total, 2)
self.assertEqual([m.id for m in messages], [self.messages_ids[-3]])
@test_utils.unittest_run_loop
async def test_limit(self):
await self.fill_database()
total, messages = await operations.load_conversation(1, 5, limit=1)
self.assertEqual(total, 2)
self.assertEqual([m.id for m in messages], [self.messages_ids[-1]])
@test_utils.unittest_run_loop
async def test_offset_and_limit(self):
await self.fill_database()
total, messages = await operations.load_conversation(2, 5)
self.assertEqual(total, 3)
self.assertEqual([m.id for m in messages], [self.messages_ids[-2], self.messages_ids[-4], self.messages_ids[-5]])
total, messages = await operations.load_conversation(2, 5, offset=1, limit=1)
self.assertEqual(total, 3)
self.assertEqual([m.id for m in messages], [self.messages_ids[-4]])
class LoadMessageTests(helpers.BaseTests):
async def fill_database(self):
self.messages_ids = [await operations.send_message(sender_id=1, recipients_ids=[2], body='1 ааа')]
@test_utils.unittest_run_loop
async def test_sender(self):
await self.fill_database()
message = await operations.load_message(1, self.messages_ids[0])
self.assertEqual(message.body, '1 ааа')
@test_utils.unittest_run_loop
async def test_recipient(self):
await self.fill_database()
message = await operations.load_message(2, self.messages_ids[0])
self.assertEqual(message.body, '1 ааа')
@test_utils.unittest_run_loop
async def test_no_relation(self):
await self.fill_database()
message = await operations.load_message(3, self.messages_ids[0])
self.assertEqual(message, None)
| 44.225694
| 127
| 0.635668
| 3,235
| 25,474
| 4.783926
| 0.039567
| 0.099832
| 0.043616
| 0.067201
| 0.907987
| 0.880202
| 0.856552
| 0.827087
| 0.804601
| 0.775459
| 0
| 0.03515
| 0.245034
| 25,474
| 575
| 128
| 44.302609
| 0.769551
| 0.003651
| 0
| 0.569588
| 0
| 0
| 0.109075
| 0.001655
| 0
| 0
| 0
| 0
| 0.239691
| 1
| 0
| false
| 0
| 0.023196
| 0
| 0.033505
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5e5e44890ca0bbce2bcba6d4c6d9eec3220da96d
| 25,588
|
py
|
Python
|
pyrror/data.py
|
YanickT/Pyrror
|
846a85dd8941bf05de105df0f06a52864810203a
|
[
"MIT"
] | 1
|
2021-09-21T13:21:42.000Z
|
2021-09-21T13:21:42.000Z
|
pyrror/data.py
|
YanickT/Pyrror
|
846a85dd8941bf05de105df0f06a52864810203a
|
[
"MIT"
] | null | null | null |
pyrror/data.py
|
YanickT/Pyrror
|
846a85dd8941bf05de105df0f06a52864810203a
|
[
"MIT"
] | 1
|
2021-09-19T13:32:45.000Z
|
2021-09-19T13:32:45.000Z
|
from pyrror.controls import type_check, instancemethod
from pyrror.unit_helper import unit_control
from pyrror.data_helper import round_data, digits
from pyrror.unit import Unit
from typing import Union
class Data:
"""
Main-Class of the project.
Represents a value with uncertainty
"""
def __init__(self, value: str, error: str, sign: Union[str, Unit] = "", power: int = 0, n: int = 0):
"""
Initiate new value with uncertainty
:param value: str = value
:param error: str = uncertainty
:param sign: Union[str, Unit] = unit of the value
**sign-EBNF:**
S := '"' units '"' | '"' units '/' units '"'
units := unit | unit ';' units
unit := string | string '^' integer
:param power: int = power of the value (for dimensions like mV => power = -3 and unit = 'V')
:param n: int = significant digits of the error (if 0 (Default): get digits from error: str)
"""
type_check((value, str), (error, str))
if n == 0:
self.n = digits(error)
else:
self.n = n
if isinstance(sign, str):
sign = sign.split("/")
if len(sign) > 1:
sign = ["" if s == "1" else s for s in sign]
self.unit = Unit(sign[0], sign[1])
else:
self.unit = Unit(sign[0])
else:
self.unit = sign
self.power = power
self.error = float(error) * 10 ** self.power
self.value = float(value) * 10 ** self.power
round_data(self)
def __str__(self):
"""
Return a string representation of the Data.
:return: str = string representation of the Data
"""
# insert values
if self.n > 1:
string = f"({self.value * 10 ** (-self.power):.{(self.n - 1)}f}±{self.error * (10 ** -self.power):.{(self.n - 1)}f})"
elif self.n == 1:
string = f"({self.value * 10 ** (-self.power):.0f}±{self.error * (10 ** -self.power):.0f})"
else:
raise ValueError("n could not be smaller than 1")
# add power
if self.power != 0:
string += f"*10^{self.power}"
# add unit
unit = str(self.unit)
if unit != "":
string += f" {unit}"
return string
@instancemethod
def __repr__(self):
return self.__str__()
def latex(self):
# insert values
if self.n > 1:
string = f"({self.value * 10 ** (-self.power):.{(self.n - 1)}f}±{self.error * (10 ** -self.power):.{(self.n - 1)}f})"
elif self.n == 1:
string = f"({self.value * 10 ** (-self.power):.0f} \\pm {self.error * (10 ** -self.power):.0f})"
else:
raise ValueError("n could not be smaller than 1")
# add power
if self.power != 0:
string += f"\\cdot 10^{{{self.power}}}"
# add unit
unit = str(self.unit)
if unit != "":
string += f" {unit}"
return string
# Calculations using simplified gauss
def __number_mul(self, other):
"""
Helper function of multiplication of Data with float.
:param other: Union[int, float] = value to multiplicative Data with
:return: Data = result of the multiplication
"""
return Data(str(self.value * other), str(self.error * other), sign=self.unit, n=self.n)
def __const_mul(self, other):
"""
Helper function of multiplication of Data with Const.
:param other: Const = Const to multiplicative Data with
:return: Data = result of the multiplication
"""
return Data(str(self.value * other.value), str(self.error * other.value), sign=self.unit * other.unit, n=self.n)
def __data_mul(self, other):
"""
Helper function of multiplication of two Data.
:param other: Data = The other Data to multiplicative Data with
:return: Data = result of the multiplication
"""
result = self.value * other.value
error = str(result * ((self.error / self.value) ** 2 + (other.error / other.value) ** 2) ** 0.5)
significant_digits = min(self.n, other.n)
unit = self.unit * other.unit
result = str(result)
return Data(result, error, sign=unit, n=significant_digits)
@instancemethod
def __mul__(self, other):
"""
Multiplication of Data object with other. Which will happen depend on the type of other.
:param other: Union[Data, Const, int, float] = Object to multiply with
:return: Data = result of the multiplication
"""
type_other = type(other)
functions = {int: self.__number_mul, float: self.__number_mul, Const: self.__const_mul, Data: self.__data_mul}
if type_other not in functions:
raise ValueError(f"Unsupported operation '*' for Data and {type(other)}")
return functions[type_other](other)
@instancemethod
def __rmul__(self, other):
"""
Multiplication of Data object with other. Which will happen depend on the type of other.
:param other: Union[Data, Const, int, float] = Object to multiply with
:return: Data = result of the multiplication
"""
return self.__mul__(other)
@unit_control
def __data_add(self, other):
"""
Helper function for addition of two Data.
:param other: Data = other Data to add with Data
:return: Data = result of the addition
"""
result = self.value + other.value
significant_digits = min(self.n, other.n)
error = str((self.error ** 2 + other.error ** 2) ** 0.5)
unit = self.unit
result = str(result)
return Data(result, error, n=significant_digits, sign=unit)
def __number_add(self, other):
"""
Helper function for addition of a Data and an Union[int, float].
:param other: Union[int, float] = value to add
:return: Data = result of the addition
"""
if self.unit == Unit(""):
data = Data(str(self.value + other), str(self.error), n=self.n)
data.power = self.power
return data
@unit_control
def __const_add(self, other):
"""
Helper function for addition of a Data and an Const.
:param other: Const = value to add
:return: Data = result of the addition
"""
result = self.value + other.value
significant_digits = min(self.n, other.n)
unit = self.unit
result = str(result)
return Data(result, self.error, n=self.n, sign=unit)
@instancemethod
def __add__(self, other):
"""
Addition of a Data and other.
:param other: Union[Data, Const, int, float] = Object to add with
:return: Data = result of the addition
"""
type_other = type(other)
functions = {int: self.__number_add, float: self.__number_add, Const: self.__const_add, Data: self.__data_add}
if type_other not in functions:
raise ValueError("Unsupported operation '+' for Data and {type(other)}")
return functions[type_other](other)
@instancemethod
def __radd__(self, other):
"""
Addition of a Data and other.
:param other: Union[Data, Const, int, float] = Object to add with
:return: Data = result of the addition
"""
return self.__number_add(other)
@unit_control
def __data_sub(self, other):
"""
Helper function for subtraction of two Data.
:param other: Data = other Data to subtract with Data
:return: Data = result of the subtraction
"""
result = self.value - other.value
significant_digits = min(self.n, other.n)
error = str((self.error ** 2 + other.error ** 2) ** 0.5)
unit = self.unit
result = str(result)
return Data(result, error, sign=unit, n=significant_digits)
def __number_sub(self, other):
"""
Helper function for subtraction of a Data and an Union[int, float].
:param other: Union[int, float] = value to subtract
:return: Data = result of the subtraction
"""
if self.unit == Unit(""):
data = Data(str(self.value - other), str(self.error), n=self.n)
data.power = self.power
return data
@unit_control
def __const_sub(self, other):
"""
Helper function for subtraction of a Data and an Const.
:param other: Const = value to subtract
:return: Data = result of the subtraction
"""
return Data(str(self.value - other.value), str(self.error), n=self.n, sign=self.unit)
@instancemethod
def __sub__(self, other):
"""
Subtraction of two Data objects.
:param other: Data = other Data to subtract with Data
:return: Data = result of the subtraction
"""
type_other = type(other)
functions = {int: self.__number_sub, float: self.__number_sub, Const: self.__const_sub, Data: self.__data_sub}
if type_other not in functions:
raise ValueError("Unsupported operation '-' for Data and {type(other)}")
return functions[type_other](other)
@instancemethod
def __rsub__(self, other):
"""
Subtraction of a Data and other.
:param other: Union[Data, Const, int, float] = Object to subtract with
:return: Data = result of the substraction
"""
return -1 * self.__number_sub(other)
def __number_div(self, other):
"""
Helper function of division of Data with float.
:param other: Union[int, float] = value to divide Data with
:return: Data = result of division
"""
return Data(str(self.value / other), str(self.error / other), sign=self.unit, n=self.n)
def __const_div(self, other):
"""
Helper function of division of Data with Const.
:param other: Const = Const to divide Data with
:return: Data = result of division
"""
result = str(self.value / other.value)
unit = self.unit / other.unit
error = str(self.error / other.value)
significant_digits = self.n
return Data(result, error, sign=unit, n=significant_digits)
def __data_div(self, other):
"""
Helper function of division of Data with Data.
:param other: Data = Data to divide Data with
:return: Data = result of division
"""
result = self.value / other.value
significant_digits = min(self.n, other.n)
error = str(result * ((self.error / self.value) ** 2 + (other.error / other.value) ** 2) ** 0.5)
result = str(result)
unit = self.unit / other.unit
return Data(result, error, sign=unit, n=significant_digits)
@instancemethod
def __truediv__(self, other):
"""
Division of a Data object with other.
:param other: Union[Data, Const, int, float] = object to divide with
:return: Data = result of the division
"""
type_other = type(other)
functions = {int: self.__number_div, float: self.__number_div, Const: self.__const_div, Data: self.__data_div}
if type_other not in functions:
raise ValueError(f"Unsupported operation '/' for Data and {type_other}")
return functions[type_other](other)
@instancemethod
def __rtruediv__(self, other):
"""
Division of a Data object with other.
:param other: Union[int, float] = object to divide with
:return: Data = result of the division
"""
typ_other = type(other)
if typ_other == int or typ_other == float:
result = other / self.value
unit = self.unit.flip()
return Data(str(result), str(result * (self.error / self.value)), sign=unit, n=self.n)
else:
raise ValueError(f"Unsupported operation '/' for Data and {typ_other}")
def __pow__(self, other):
"""
Power of a Data object with other.
:param other: Union[int, float] = object to power with
:return: Data = result of the calculation
"""
typ_other = type(other)
if typ_other == int or typ_other == float:
result = self.value ** other
unit = self.unit ** other
return Data(str(result), str(result * (self.error / self.value)), sign=unit, n=self.n)
elif typ_other == Data:
raise ArithmeticError("Try to use a Formula instead!")
else:
raise TypeError(f"Unsupported operation '**' for Data and {typ_other}")
# Data comparisons
@unit_control
def __data_lt(self, other):
"""
Helper function of lt comparison of Data with Data.
:param other: Data = Data to compare with
:return: bool = result of comparison
"""
return self.value + self.error + other.error < other.value
@unit_control
def __const_lt(self, other):
"""
Helper function of lt comparison of Data with Const.
:param other: Const = Const to compare with
:return: bool = result of comparison
"""
return self.value + self.error < other.value
@instancemethod
def __lt__(self, other):
"""
Compare Data with other objects.
:param other: Union[Data, Const] = object to compare with
:return: bool = result of comparison
"""
type_other = type(other)
functions = {Const: self.__const_lt, Data: self.__data_lt}
if type_other not in functions:
raise ValueError(f"Unsupported operation '<' for Data and {type_other}")
return functions[type_other](other)
@unit_control
def __data_eq(self, other):
"""
Helper function of eq comparison of Data with Data.
:param other: Data = Data to compare with
:return: bool = result of comparison
"""
return self.value - self.error - other.error <= other.value <= self.value + self.error + other.error
@unit_control
def __const_eq(self, other):
"""
Helper function of eq comparison of Data with Const.
:param other: Const = Const to compare with
:return: bool = result of comparison
"""
return self.value - self.error <= other.value <= self.value + self.error
@instancemethod
def __eq__(self, other):
"""
Compare Data with other objects.
:param other: Union[Data, Const] = object to compare with
:return: bool = result of comparison
"""
type_other = type(other)
functions = {Const: self.__const_eq, Data: self.__data_eq}
if type_other not in functions:
raise ValueError(f"Unsupported operation '==' for Data and {type_other}")
return functions[type_other](other)
@unit_control
def __data_gt(self, other):
"""
Helper function of gt comparison of Data with Data.
:param other: Data = Data to compare with
:return: bool = result of comparison
"""
return self.value - self.error - other.error > other.value
@unit_control
def __const_gt(self, other):
"""
Helper function of eq comparison of Data with Const.
:param other: Const = Const to compare with
:return: bool = result of comparison
"""
return self.value - self.error > other.value
@instancemethod
def __gt__(self, other):
"""
Compare Data with other objects.
:param other: Union[Data, Const] = object to compare with
:return: bool = result of comparison
"""
type_other = type(other)
functions = {Const: self.__const_gt, Data: self.__data_gt}
if type_other not in functions:
raise ValueError(f"Unsupported operation '==' for Data and {type_other}")
return functions[type_other](other)
@instancemethod
def __ne__(self, other):
"""
Compare Data with other objects.
:param other: Union[Data, Const] = object to compare with
:return: bool = result of comparison
"""
return not self.__eq__(other)
@instancemethod
def __ge__(self, other):
"""
Compare Data with other objects.
:param other: Union[Data, Const] = object to compare with
:return: bool = result of comparison
"""
return self.__gt__(other) or self.__eq__(other)
@instancemethod
def __le__(self, other):
"""
Compare Data with other objects.
:param other: Union[Data, Const] = object to compare with
:return: bool = result of comparison
"""
return self.__lt__(other) or self.__eq__(other)
class Const:
"""
Class for constants and values with units if they carry no uncertainty (or a neglected one).
"""
def __init__(self, value, sign):
"""
Initalize a constant with a unit.
:param value: Union[int, float] = constant value
:param sign: Union[str, Unit] = String carrying the unit.
**sign-EBNF:**
S := '"' units '"' | '"' units '/' units '"'
units := unit | unit ';' units
unit := string | string '^' integer
"""
if isinstance(sign, str):
sign = sign.split("/")
if len(sign) > 1:
sign = ["" if s == "1" else s for s in sign]
self.unit = Unit(sign[0], sign[1])
else:
self.unit = Unit(sign[0])
else:
self.unit = sign
self.value = float(value)
@instancemethod
def __str__(self):
"""
Creates a string representation of the Const.
:return: str = representation of the Const
"""
string = str(self.value)
unit_string = str(self.unit)
if unit_string != "":
string += " " + unit_string
return string
@instancemethod
def __repr__(self):
return self.__str__()
@instancemethod
def __mul__(self, other):
"""
Multiplication with other.
:param other: Union[Data, Const, int, float] = other object to multiply with
:return: Union[Data, Const] = Result type depends on the other object
"""
if isinstance(other, (int, float)):
value = self.value * other
unit = self.unit
return Const(value, sign=unit)
elif isinstance(other, Const):
value = self.value * other.value
unit = self.unit * other.unit
return Const(value, sign=unit)
elif isinstance(other, Data):
value = self.value * other.value
unit = self.unit * other.unit
n = other.n
error = self.value * other.error
return Data(str(value), str(error), n=n, sign=unit)
else:
raise TypeError(f"unsupported operand '*' for Const and {type(other)}")
@instancemethod
def __rmul__(self, other):
"""
Multiplication with other.
:param other: Union[Data, Const, int, float] = other object to multiply with
:return: Union[Data, Const] = result type depends on the other object
"""
return self.__mul__(other)
@instancemethod
def __add__(self, other):
"""
Addition with other Const.
:param other: Union[Const, Data, int, float] = other object to add with
:return: Union[Const, Data, int, float] = result of the subtraction
"""
if isinstance(other, Const):
if self.unit == other.unit:
return Const(self.value + other.value, sign=self.unit)
else:
raise ArithmeticError("Addition of Data with different units is not possible")
elif isinstance(other, Data):
if self.unit == other.unit:
return Data(str(self.value + other.value), str(other.error), n=other.n, sign=self.unit)
else:
raise ArithmeticError("Addition of Data and Const with different units is not possible")
elif isinstance(other, (int, float)):
if self.unit == Unit():
return self.value + other
else:
raise ArithmeticError("Addition of values with different units is not possible")
else:
raise TypeError(f"unsupported operand '+' for Const and {type(other)}")
@instancemethod
def __radd__(self, other):
return self.__add__(other)
@instancemethod
def __sub__(self, other):
"""
Subtraction with other Const.
:param other: Union[Const, Data, int, float] = other object to add with
:return: Union[Const, Data] = result of the subtraction
"""
return self.__add__(-1 * other)
@instancemethod
def __rsub__(self, other):
neg_self = -1 * self
return neg_self.__add__(other)
@instancemethod
def __truediv__(self, other):
"""
Division with other object.
:param other: Union[Data, Const, int, float] = other object to add with
:return: Union[Data, Const] = Result type depends on the other object
"""
if isinstance(other, (int, float)):
value = self.value / other
unit = self.unit
return Const(value, sign=unit)
elif isinstance(other, Const):
value = self.value / other.value
unit = self.unit / other.unit
return Const(value, sign=unit)
elif isinstance(other, Data):
value = self.value / other.value
unit = self.unit / other.unit
n = other.n
error = self.value / other.error
return Data(str(value), str(error), n=n, sign=unit)
else:
raise TypeError(f"unsupported operand '/' for Const and {type(other)}")
@instancemethod
def __rtruediv__(self, other):
"""
Division with other object.
:param other: Union[int, float] = other object to add with
:return: Union[Data, Const] = Result type depends on the other object
"""
if isinstance(other, (int, float)):
result = other / self.value
unit = self.unit.flip()
return Const(result, unit)
else:
raise ValueError(f"Unsupported operation '/' for Const and {type(other)}")
@instancemethod
def __pow__(self, other):
"""
Power a Const object
:param other: Union[int, float] = object to power with
:return: Const = result of the calculation
"""
typ_other = type(other)
if typ_other == int or typ_other == float:
result = self.value ** other
unit = self.unit ** other
return Const(result, unit)
elif typ_other == Data:
raise ArithmeticError("Try to use a Formula instead!")
else:
raise TypeError(f"Unsupported operation '/' for Const and {typ_other}")
@unit_control
def __lt__(self, other):
"""
Compare Const with other objects.
:param other: Const = object to compare with
:return: bool = result of comparison
"""
if isinstance(other, Const):
return self.value < other.value
raise TypeError(f"unsupported operation '<' for Data and {type(other)}")
@unit_control
def __le__(self, other):
"""
Compare Const with other objects.
:param other: Const = object to compare with
:return: bool = result of comparison
"""
if isinstance(other, Const):
return self.value <= other.value
raise TypeError(f"unsupported operation '<=' for Data and {type(other)}")
@unit_control
def __eq__(self, other):
"""
Compare Const with other objects.
:param other: Const = object to compare with
:return: bool = result of comparison
"""
if isinstance(other, Const):
return self.value == other.value
raise TypeError(f"unsupported operation '==' for Data and {type(other)}")
@unit_control
def __ne__(self, other):
"""
Compare Const with other objects.
:param other: Const = object to compare with
:return: bool = result of comparison
"""
if isinstance(other, Const):
return self.value != other.value
raise TypeError(f"unsupported operation '!=' for Data and {type(other)}")
@unit_control
def __ge__(self, other):
"""
Compare Const with other objects.
:param other: Const = object to compare with
:return: bool = result of comparison
"""
if isinstance(other, Const):
return self.value >= other.value
raise TypeError(f"unsupported operation '>=' for Data and {type(other)}")
@unit_control
def __gt__(self, other):
"""
Compare Const with other objects.
:param other: Const = object to compare with
:return: bool = result of comparison
"""
if isinstance(other, Const):
return self.value > other.value
raise TypeError(f"unsupported operation '>' for Data and {type(other)}")
| 33.059432
| 129
| 0.578279
| 3,086
| 25,588
| 4.667531
| 0.052171
| 0.033741
| 0.030131
| 0.026243
| 0.875798
| 0.837823
| 0.800264
| 0.789919
| 0.762774
| 0.713621
| 0
| 0.004225
| 0.315578
| 25,588
| 773
| 130
| 33.102199
| 0.818078
| 0.300336
| 0
| 0.674095
| 0
| 0.011142
| 0.110929
| 0.007576
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153203
| false
| 0
| 0.013928
| 0.008357
| 0.337047
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0dcc399eb87c51956b9ec3d1f8b782ea4ef7e3f5
| 19,696
|
py
|
Python
|
scripts/disable_add_strats_bank_v1.py
|
AlphaFinanceLab/alphahomora-bsc
|
2da7c97c5622deba3aa3151d10123d41f5b7d035
|
[
"MIT"
] | 11
|
2021-03-27T09:56:05.000Z
|
2021-07-12T13:58:15.000Z
|
scripts/disable_add_strats_bank_v1.py
|
AlphaFinanceLab/alphahomora-bsc
|
2da7c97c5622deba3aa3151d10123d41f5b7d035
|
[
"MIT"
] | null | null | null |
scripts/disable_add_strats_bank_v1.py
|
AlphaFinanceLab/alphahomora-bsc
|
2da7c97c5622deba3aa3151d10123d41f5b7d035
|
[
"MIT"
] | 8
|
2021-03-18T23:41:25.000Z
|
2021-07-02T18:11:33.000Z
|
from brownie import accounts, interface, Contract
from brownie import (
Bank, ConfigurableInterestBankConfig, UniswapGoblin
)
from brownie import network
import eth_abi
from .utils import *
# network.gas_price('5 gwei')
from brownie.network.gas.strategies import GasNowScalingStrategy
gas_strategy = GasNowScalingStrategy(
initial_speed="fast", max_speed="fast", increment=1.085, block_duration=20)
# set gas strategy
network.gas_price(gas_strategy)
def main():
deployer = accounts.at(
'0xb593d82d53e2c187dc49673709a6e9f806cdc835', force=True)
# deployer = accounts.load('gh')
# goblin_list = uniswap_goblin_list + sushiswap_goblin_list
goblin_list = [
"0xe900e07ce6bcdd3c5696bfc67201e940e316c1f1", "0x35952c82e146da5251f2f822d7b679f34ffa71d3",
"0xb7bf6d2e6c4fa291d6073b51911bac17890e92ec", "0xa7120893283cc2aba8155d6b9887bf228a8a86d2",
"0x0ec3de9941479526bb3f530c23aaff84148d17a7", "0x09b4608a0ca9ae8002465eb48cd2f916edf5bf63",
"0x8c5cecc9abd8503d167e6a7f2862874b6193e6e4", "0xcbb95b7708b1b543ecb82b2d58db1711f88d265c",
"0x6d0eb60d814a21e2bed483c71879777c9217aa28", "0xfbc0d22bf0ecc735a03fd08fc20b48109cb89543",
"0x4668ff4d478c5459d6023c4a7efda853412fb999", "0x37ef9c13faa609d5eee21f84e4c6c7bf62e4002e",
"0xf285e8adf8b871a32c305ab20594cbb251341535", "0x6a279df44b5717e89b51645e287c734bd3086c1f",
"0x4d4ad9628f0c16bbd91cab3a39a8f15f11134300", "0xd6419fd982a7651a12a757ca7cd96b969d180330",
"0xf134fdd0bbce951e963d5bc5b0ffe445c9b6c5c6", "0xbb4755673e9df77f1af82f448d2b09f241752c05",
"0xcc11e2cf6755953eed483ba2b3c433647d0f18dc", "0xee781f10ce14a45f1d8c2487aeaf24d0366fb9fa",
"0x66e970f2602367f8ae46ccee79f6139737eaff1c", "0x1001ec1b6fc2438e8be6ffa338d3380237c0399a",
"0x6cc2c08e413638ceb38e3db964a114f139fff81e", "0x4ec23befb01b9903d58c4bea096d65927e9462cc",
"0x18712bcb987785d6679134abc7cddee669ec35ca", "0x14804802592c0f6e2fd03e78ec3efc9b56f1963d",
"0xbd95cfef698d4d582e66110475ec7e4e21120e4a", "0x766614adcff1137f8fced7f0804d184ce659826a",
"0xa8854bd26ee44ad3c78792d68564b96ad0a45245", "0xdaa93955982d32451f90a1109ecec7fecb7ee4b3",
"0x69fe7813f804a11e2fd279eba5dc1ecf6d6bf73b", "0x9d00b5eeedeea5141e82b101e645352a2ea960ba",
"0x8fc4c0566606aa0c715989928c12ce254f8e1228", "0x9d9c28f39696ce0ebc42ababd875977060e7afa1",
"0xee8f4e4b13c610bfa2c65d968ba1d5263d640ce6", "0x54a2c35d689f4314fa70dd018ea0a84c74506925",
"0x3c2bbb353b48d54b619db8ac6aa642627fb800e3", "0xcfbd9eeac76798571ed96ed60ca34df35f29ea8d",
"0x5c767dbf81ec894b2d70f2aa9e45a54692d0d7eb", "0x41f07d87a28adec58dba1d063d540b86ccbb989f",
"0xd902a3bedebad8bead116e8596497cf7d9f45da2", "0x795d3655d0d7ecbf26dd33b1a7676017bb0ee611",
]
all_eth_strat_addr = {
'0xe900e07ce6bcdd3c5696bfc67201e940e316c1f1': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0x35952c82e146da5251f2f822d7b679f34ffa71d3': '0x737aad349312f36b43041737d648051a39f146e8',
'0xb7bf6d2e6c4fa291d6073b51911bac17890e92ec': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a', # cannot call okStrats
'0xa7120893283cc2aba8155d6b9887bf228a8a86d2': '0x737aad349312f36b43041737d648051a39f146e8',
'0x0ec3de9941479526bb3f530c23aaff84148d17a7': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0x09b4608a0ca9ae8002465eb48cd2f916edf5bf63': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0x8c5cecc9abd8503d167e6a7f2862874b6193e6e4': '0x737aad349312f36b43041737d648051a39f146e8',
'0x6d0eb60d814a21e2bed483c71879777c9217aa28': '0x737aad349312f36b43041737d648051a39f146e8',
'0xfbc0d22bf0ecc735a03fd08fc20b48109cb89543': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0x4668ff4d478c5459d6023c4a7efda853412fb999': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a', # cannot call okStrats
'0x37ef9c13faa609d5eee21f84e4c6c7bf62e4002e': '0x737aad349312f36b43041737d648051a39f146e8',
'0xf285e8adf8b871a32c305ab20594cbb251341535': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0x6a279df44b5717e89b51645e287c734bd3086c1f': '0x737aad349312f36b43041737d648051a39f146e8',
'0x4d4ad9628f0c16bbd91cab3a39a8f15f11134300': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0xd6419fd982a7651a12a757ca7cd96b969d180330': '0x737aad349312f36b43041737d648051a39f146e8',
'0xf134fdd0bbce951e963d5bc5b0ffe445c9b6c5c6': '0x737aad349312f36b43041737d648051a39f146e8',
'0xbb4755673e9df77f1af82f448d2b09f241752c05': '0x737aad349312f36b43041737d648051a39f146e8',
'0xcc11e2cf6755953eed483ba2b3c433647d0f18dc': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a', # not found in constant.ts
'0xee781f10ce14a45f1d8c2487aeaf24d0366fb9fa': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0x66e970f2602367f8ae46ccee79f6139737eaff1c': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0x1001ec1b6fc2438e8be6ffa338d3380237c0399a': '0x737aad349312f36b43041737d648051a39f146e8',
'0x6cc2c08e413638ceb38e3db964a114f139fff81e': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
# '0x4ec23befb01b9903d58c4bea096d65927e9462cc': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a', # no allETHStrat disabled
'0x18712bcb987785d6679134abc7cddee669ec35ca': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0x14804802592c0f6e2fd03e78ec3efc9b56f1963d': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a', # cannot call okStrats
'0xbd95cfef698d4d582e66110475ec7e4e21120e4a': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0x766614adcff1137f8fced7f0804d184ce659826a': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
# '0xa8854bd26ee44ad3c78792d68564b96ad0a45245': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a', # no allETHStrat disabled
'0xdaa93955982d32451f90a1109ecec7fecb7ee4b3': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a', # cannot call okStrats
'0x69fe7813f804a11e2fd279eba5dc1ecf6d6bf73b': '0x737aad349312f36b43041737d648051a39f146e8',
'0x9d00b5eeedeea5141e82b101e645352a2ea960ba': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0x8fc4c0566606aa0c715989928c12ce254f8e1228': '0x737aad349312f36b43041737d648051a39f146e8',
'0x9d9c28f39696ce0ebc42ababd875977060e7afa1': '0x737aad349312f36b43041737d648051a39f146e8',
'0xee8f4e4b13c610bfa2c65d968ba1d5263d640ce6': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0x54a2c35d689f4314fa70dd018ea0a84c74506925': '0x737aad349312f36b43041737d648051a39f146e8',
# '0x3c2bbb353b48d54b619db8ac6aa642627fb800e3': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a', # no allETHStrat disabled
'0xcfbd9eeac76798571ed96ed60ca34df35f29ea8d': '0x737aad349312f36b43041737d648051a39f146e8',
'0x5c767dbf81ec894b2d70f2aa9e45a54692d0d7eb': '0x737aad349312f36b43041737d648051a39f146e8',
'0x41f07d87a28adec58dba1d063d540b86ccbb989f': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a', # cannot call okStrats
'0xd902a3bedebad8bead116e8596497cf7d9f45da2': '0x737aad349312f36b43041737d648051a39f146e8',
'0x795d3655d0d7ecbf26dd33b1a7676017bb0ee611': '0x737aad349312f36b43041737d648051a39f146e8',
'0xcbb95b7708b1b543ecb82b2d58db1711f88d265c': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a'
}
add_two_side_opt_strat_addr = {
'0xe900e07ce6bcdd3c5696bfc67201e940e316c1f1': '0x8de16d5884a418f1034f78045da47f2cae4012a4',
'0x35952c82e146da5251f2f822d7b679f34ffa71d3': '0x587fd08d2979659534d301944b105559ce072ad1',
'0xb7bf6d2e6c4fa291d6073b51911bac17890e92ec': '0x1b1db87e728a2c22d596e331caabb0c99790113e', # cannot call okStrats
'0xa7120893283cc2aba8155d6b9887bf228a8a86d2': '0x8d4958f312ac3009d3804dc659d6a439d34e2821',
'0x0ec3de9941479526bb3f530c23aaff84148d17a7': '0x42d7b319807c50f8719698e52315742ad6f00c5a',
'0x09b4608a0ca9ae8002465eb48cd2f916edf5bf63': '0x3f9dd1b039a19a7cb1dd016527e8566bce185936',
'0x8c5cecc9abd8503d167e6a7f2862874b6193e6e4': '0xbe615dfed36d753999f367458671a4954f7b43e8',
'0x6d0eb60d814a21e2bed483c71879777c9217aa28': '0xa8f70a2b021094746ffdeacab15105e5cfe6dc9b',
'0xfbc0d22bf0ecc735a03fd08fc20b48109cb89543': '0x3702bbba321c2fe7be4731f558d2d60fa20eeff9',
'0x4668ff4d478c5459d6023c4a7efda853412fb999': '0x1debf8e2ddfc4764376e8e4ed5bc8f1b403d2629', # cannot call okStrats
'0x37ef9c13faa609d5eee21f84e4c6c7bf62e4002e': '0x3ecd838f6a5ef357237cdd226bab90255549ec71',
'0xf285e8adf8b871a32c305ab20594cbb251341535': '0xdce3ab478450b101eba5f86b74e014e45d2d385b',
'0x6a279df44b5717e89b51645e287c734bd3086c1f': '0x109bfde650bb8fb7709ceefc2af81013238289fc',
'0x4d4ad9628f0c16bbd91cab3a39a8f15f11134300': '0x759034a7e6428430c7383c10b01515ef38b61ed5',
'0xd6419fd982a7651a12a757ca7cd96b969d180330': '0xea2b4ab299541053152398ee42b0875f2d6870df',
'0xf134fdd0bbce951e963d5bc5b0ffe445c9b6c5c6': '0xa0fe022d098f92e561aadabe59ab6f15c4a4fe9e',
'0xbb4755673e9df77f1af82f448d2b09f241752c05': '0x18864491083dc4588a9eecbeb28f22a9bf45dad1',
'0xcc11e2cf6755953eed483ba2b3c433647d0f18dc': '0xacd4e6d35f96a30c4f7923f95139e275eb783e04', # not found in constant.ts
'0xee781f10ce14a45f1d8c2487aeaf24d0366fb9fa': '0xf6090bcf0be8e9b256364b015222b2d58bfc8fba',
'0x66e970f2602367f8ae46ccee79f6139737eaff1c': '0x23324a5b4e737440a3b29159bf0b1e39ad93f5a6',
'0x1001ec1b6fc2438e8be6ffa338d3380237c0399a': '0x9f440181f3c8092a5a4c1daa62c8ee3342890762',
'0x6cc2c08e413638ceb38e3db964a114f139fff81e': '0xc6d05f8d77a80a04e69ad055ff7f1a599b459ead',
'0x4ec23befb01b9903d58c4bea096d65927e9462cc': '0x90b5f08283565de70f7ed78116469abb6b030aea',
'0x18712bcb987785d6679134abc7cddee669ec35ca': '0xd2dadd442727b7172ddab1b73b726a1ef9dbb51f',
'0x14804802592c0f6e2fd03e78ec3efc9b56f1963d': '0xa1dc7ce03cb285aca8bde9c27d1e5d4731871814', # cannot call okStrats
'0xbd95cfef698d4d582e66110475ec7e4e21120e4a': '0x483747e40bdb6ab28b4b4ea73b9d62d4d44c509e',
'0x766614adcff1137f8fced7f0804d184ce659826a': '0x124fc2970c4dc1cacb813187e6c1a0d2f01c6c53',
'0xa8854bd26ee44ad3c78792d68564b96ad0a45245': '0x9f73e638a1de6464ad953ec21a12701de10e69cf',
'0xdaa93955982d32451f90a1109ecec7fecb7ee4b3': '0xb39f78e505e0959c96a38c91987713bad8519480', # cannot call okStrats
'0x69fe7813f804a11e2fd279eba5dc1ecf6d6bf73b': '0xc207be77051492f89aa7d650a6f03dc76fbf00a6',
'0x9d00b5eeedeea5141e82b101e645352a2ea960ba': '0x23091694539a083940eb4236215cc82a619fe475',
'0x8fc4c0566606aa0c715989928c12ce254f8e1228': '0xa2d3e7fc0ef83d28fcabc8fb621d8990bfe48115',
'0x9d9c28f39696ce0ebc42ababd875977060e7afa1': '0x1c4413ac634d96faee6b64ee98c2bfbcc85dfc4a',
'0xee8f4e4b13c610bfa2c65d968ba1d5263d640ce6': '0xd84f554a24977cf7bda60fc11d6358c432007814',
'0x54a2c35d689f4314fa70dd018ea0a84c74506925': '0xb004229fc9a8f22aac373923d40ac7f3887863d7',
'0x3c2bbb353b48d54b619db8ac6aa642627fb800e3': '0x325a606c8c043ef1e2d07ea6faae543aef7b13cf',
'0xcfbd9eeac76798571ed96ed60ca34df35f29ea8d': '0xb601361832518d31a18462ce243226811674b987',
'0x5c767dbf81ec894b2d70f2aa9e45a54692d0d7eb': '0x8448bde9e8643e1adbe610eee0b2efd4b16b830c',
'0x41f07d87a28adec58dba1d063d540b86ccbb989f': '0xedd9d44e302b0bfa693d0179a1ee14dde48306a6', # cannot call okStrats
'0xd902a3bedebad8bead116e8596497cf7d9f45da2': '0x4b1f0ce67303ca233515980219beaeeb389132f7',
'0x795d3655d0d7ecbf26dd33b1a7676017bb0ee611': '0xd3ea1b6de0ed59bec8b768d2cdc995002c7de95a',
'0xcbb95b7708b1b543ecb82b2d58db1711f88d265c': '0xb96abafe296b51fd245d3c80d2a0e97f933b3285'
}
contracts_no_ok_strats_to_check = set([
'0xb7bf6d2e6c4fa291d6073b51911bac17890e92ec', '0x4668ff4d478c5459d6023c4a7efda853412fb999',
'0x14804802592c0f6e2fd03e78ec3efc9b56f1963d', '0xdaa93955982d32451f90a1109ecec7fecb7ee4b3',
'0x41f07d87a28adec58dba1d063d540b86ccbb989f'
])
goblins = {x: UniswapGoblin.at(x) for x in goblin_list}
print('mapping goblins success')
print('checking if strats are already disabled')
for goblin_addr in goblin_list:
if goblin_addr in contracts_no_ok_strats_to_check:
continue
if goblin_addr in all_eth_strat_addr:
assert goblins[goblin_addr].okStrats(all_eth_strat_addr[goblin_addr]) == True, (
f'all-eth strategy has already been disabled in {goblin_addr}'
)
if goblin_addr in add_two_side_opt_strat_addr:
assert goblins[goblin_addr].okStrats(add_two_side_opt_strat_addr[goblin_addr]) == True, (
f'add-two-side-opt strategy has already been disabled in {goblin_addr}'
)
print('disable allETHOnly and addTwoSidesOptimal strategy')
for goblin_addr, goblin in goblins.items():
strategies = []
if goblin_addr in all_eth_strat_addr:
strategies.append(all_eth_strat_addr[goblin_addr])
if goblin_addr in add_two_side_opt_strat_addr:
strategies.append(add_two_side_opt_strat_addr[goblin_addr])
goblin.setStrategyOk(strategies, False, {'from': deployer})
print("Done!!!")
print("End of deploy process!!!")
# ###########################################################
# # test opening strats
print('==========================================')
print('start testing')
alice = accounts[0]
print('execute allbnb strategies; expect all error')
bank = Bank.at('0x67b66c99d3eb37fa76aa3ed1ff33e8e39f0b9c7a')
tokens_for_goblin = {
'0xe900e07ce6bcdd3c5696bfc67201e940e316c1f1': '0x8de16d5884a418f1034f78045da47f2cae4012a4',
'0x35952c82e146da5251f2f822d7b679f34ffa71d3': '0x587fd08d2979659534d301944b105559ce072ad1',
'0xb7bf6d2e6c4fa291d6073b51911bac17890e92ec': '0x1b1db87e728a2c22d596e331caabb0c99790113e',
'0xa7120893283cc2aba8155d6b9887bf228a8a86d2': '0x8d4958f312ac3009d3804dc659d6a439d34e2821',
'0x0ec3de9941479526bb3f530c23aaff84148d17a7': '0x42d7b319807c50f8719698e52315742ad6f00c5a',
'0x09b4608a0ca9ae8002465eb48cd2f916edf5bf63': '0x3f9dd1b039a19a7cb1dd016527e8566bce185936',
'0x8c5cecc9abd8503d167e6a7f2862874b6193e6e4': '0xbe615dfed36d753999f367458671a4954f7b43e8',
'0x6d0eb60d814a21e2bed483c71879777c9217aa28': '0xa8f70a2b021094746ffdeacab15105e5cfe6dc9b',
'0xfbc0d22bf0ecc735a03fd08fc20b48109cb89543': '0x3702bbba321c2fe7be4731f558d2d60fa20eeff9',
'0x4668ff4d478c5459d6023c4a7efda853412fb999': '0x1debf8e2ddfc4764376e8e4ed5bc8f1b403d2629',
'0x37ef9c13faa609d5eee21f84e4c6c7bf62e4002e': '0x3ecd838f6a5ef357237cdd226bab90255549ec71',
'0xf285e8adf8b871a32c305ab20594cbb251341535': '0xdce3ab478450b101eba5f86b74e014e45d2d385b',
'0x6a279df44b5717e89b51645e287c734bd3086c1f': '0x109bfde650bb8fb7709ceefc2af81013238289fc',
'0x4d4ad9628f0c16bbd91cab3a39a8f15f11134300': '0x759034a7e6428430c7383c10b01515ef38b61ed5',
'0xd6419fd982a7651a12a757ca7cd96b969d180330': '0xea2b4ab299541053152398ee42b0875f2d6870df',
'0xf134fdd0bbce951e963d5bc5b0ffe445c9b6c5c6': '0xa0fe022d098f92e561aadabe59ab6f15c4a4fe9e',
'0xbb4755673e9df77f1af82f448d2b09f241752c05': '0x18864491083dc4588a9eecbeb28f22a9bf45dad1',
'0xcc11e2cf6755953eed483ba2b3c433647d0f18dc': '0xb55f46d5bd3e6609b39707afbabd8a61ffed9d0a',
'0xee781f10ce14a45f1d8c2487aeaf24d0366fb9fa': '0xf6090bcf0be8e9b256364b015222b2d58bfc8fba',
'0x66e970f2602367f8ae46ccee79f6139737eaff1c': '0x23324a5b4e737440a3b29159bf0b1e39ad93f5a6',
'0x1001ec1b6fc2438e8be6ffa338d3380237c0399a': '0x9f440181f3c8092a5a4c1daa62c8ee3342890762',
'0x6cc2c08e413638ceb38e3db964a114f139fff81e': '0xc6d05f8d77a80a04e69ad055ff7f1a599b459ead',
'0x4ec23befb01b9903d58c4bea096d65927e9462cc': '0x90b5f08283565de70f7ed78116469abb6b030aea',
'0x18712bcb987785d6679134abc7cddee669ec35ca': '0xd2dadd442727b7172ddab1b73b726a1ef9dbb51f',
'0x14804802592c0f6e2fd03e78ec3efc9b56f1963d': '0xa1dc7ce03cb285aca8bde9c27d1e5d4731871814',
'0xbd95cfef698d4d582e66110475ec7e4e21120e4a': '0x483747e40bdb6ab28b4b4ea73b9d62d4d44c509e',
'0x766614adcff1137f8fced7f0804d184ce659826a': '0x124fc2970c4dc1cacb813187e6c1a0d2f01c6c53',
'0xa8854bd26ee44ad3c78792d68564b96ad0a45245': '0x9f73e638a1de6464ad953ec21a12701de10e69cf',
'0xdaa93955982d32451f90a1109ecec7fecb7ee4b3': '0xb39f78e505e0959c96a38c91987713bad8519480',
'0x69fe7813f804a11e2fd279eba5dc1ecf6d6bf73b': '0xc207be77051492f89aa7d650a6f03dc76fbf00a6',
'0x9d00b5eeedeea5141e82b101e645352a2ea960ba': '0x23091694539a083940eb4236215cc82a619fe475',
'0x8fc4c0566606aa0c715989928c12ce254f8e1228': '0xa2d3e7fc0ef83d28fcabc8fb621d8990bfe48115',
'0x9d9c28f39696ce0ebc42ababd875977060e7afa1': '0x1c4413ac634d96faee6b64ee98c2bfbcc85dfc4a',
'0xee8f4e4b13c610bfa2c65d968ba1d5263d640ce6': '0xd84f554a24977cf7bda60fc11d6358c432007814',
'0x54a2c35d689f4314fa70dd018ea0a84c74506925': '0xb004229fc9a8f22aac373923d40ac7f3887863d7',
'0x3c2bbb353b48d54b619db8ac6aa642627fb800e3': '0x325a606c8c043ef1e2d07ea6faae543aef7b13cf',
'0xcfbd9eeac76798571ed96ed60ca34df35f29ea8d': '0xb601361832518d31a18462ce243226811674b987',
'0x5c767dbf81ec894b2d70f2aa9e45a54692d0d7eb': '0x8448bde9e8643e1adbe610eee0b2efd4b16b830c',
'0x41f07d87a28adec58dba1d063d540b86ccbb989f': '0xedd9d44e302b0bfa693d0179a1ee14dde48306a6',
'0xd902a3bedebad8bead116e8596497cf7d9f45da2': '0x4b1f0ce67303ca233515980219beaeeb389132f7',
'0x795d3655d0d7ecbf26dd33b1a7676017bb0ee611': '0xd3ea1b6de0ed59bec8b768d2cdc995002c7de95a',
'0xcbb95b7708b1b543ecb82b2d58db1711f88d265c': '0xb96abafe296b51fd245d3c80d2a0e97f933b3285'
}
for goblin_addr in goblins.keys():
if goblin_addr not in all_eth_strat_addr:
continue
print('check', goblin_addr)
try:
bank.work(
0,
goblin_addr,
0,
0,
eth_abi.encode_abi(
['address', 'bytes'],
[
all_eth_strat_addr[goblin_addr],
eth_abi.encode_abi(['address', 'uint'], [tokens_for_goblin[goblin_addr], 0])
]
),
{'from': alice, 'value': '1 ether'}
)
assert False, 'the above command should be reverted'
except Exception as err:
print('got error as expect!!!')
assert "unapproved work strategy" in str(err), (
f'incorrect msg error; got {err}'
)
print('execute addTwoSidesOptimal strategy; expect error')
for goblin_addr in goblins.keys():
if goblin_addr not in add_two_side_opt_strat_addr:
continue
print('check', goblin_addr)
try:
bank.work(
0,
goblin_addr,
0,
0,
eth_abi.encode_abi(
['address', 'bytes'],
[
add_two_side_opt_strat_addr[goblin_addr],
eth_abi.encode_abi(['address', 'uint', 'uint'], [tokens_for_goblin[goblin_addr], 0, 0])
]
),
{'from': alice, 'value': '1 ether'}
)
assert False, 'the above command should be reverted'
except Exception as err:
print('got error as expect!!!')
assert "unapproved work strategy" in str(err), (
f'incorrect msg error; got {err}'
)
print('End of testing!!!')
| 70.594982
| 127
| 0.779295
| 827
| 19,696
| 18.411125
| 0.262394
| 0.017733
| 0.011822
| 0.00683
| 0.545843
| 0.545449
| 0.465322
| 0.456784
| 0.398923
| 0.398923
| 0
| 0.463256
| 0.149523
| 19,696
| 278
| 128
| 70.848921
| 0.445705
| 0.038891
| 0
| 0.496032
| 0
| 0
| 0.697904
| 0.659698
| 0
| 0
| 0.657469
| 0
| 0.02381
| 1
| 0.003968
| false
| 0
| 0.02381
| 0
| 0.027778
| 0.055556
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ddb4d2eed2b931ee89d94dc3ad533dee325b812
| 2,066
|
py
|
Python
|
integrators/contact.py
|
mseri/contact-variational-integrator
|
abdb887eb404568f585aeb2c2a743d0dc5afa3b9
|
[
"MIT"
] | 1
|
2019-12-16T21:29:48.000Z
|
2019-12-16T21:29:48.000Z
|
integrators/contact.py
|
mseri/contact-variational-integrator
|
abdb887eb404568f585aeb2c2a743d0dc5afa3b9
|
[
"MIT"
] | 1
|
2019-07-31T20:48:16.000Z
|
2019-08-01T21:04:03.000Z
|
integrators/contact.py
|
mseri/contact-variational-integrator
|
abdb887eb404568f585aeb2c2a743d0dc5afa3b9
|
[
"MIT"
] | 1
|
2019-12-16T21:29:50.000Z
|
2019-12-16T21:29:50.000Z
|
import numpy as np
from integrators.common import getsteps
def contact(init, tspan, h, a, acc, forcing):
"""
Integrate the damped oscillator with damping factor a
using the first order contact variational integrator.
"""
steps = getsteps(tspan, h)
hsq = np.math.pow(h, 2)
t0, _ = tspan
sol = np.empty([steps, 2], dtype=np.float64)
sol[0] = np.array(init)
for i in range(steps-1):
p, x = sol[i]
xnew = x + (h-hsq*a)*p - 0.5*hsq*acc(x) + 0.5*hsq*forcing(t0+h*i)
pnew = (1.0-h*a)*p + 0.5*h*(
forcing(t0+h*i) + forcing(t0+h*(i+1)) - acc(x) - acc(xnew)
)
sol[i+1] = np.array((pnew, xnew))
return sol
# Note: this is no longer discussed in the paper but is a
# straightforward modification of the arguments presented there.
def midpoint(init, tspan, h, a):
"""
Integrate the damped oscillator with damping factor a
using the first order midpoint contact variational integrator.
"""
steps = getsteps(tspan, h)
hsq = np.math.pow(h, 2)
sol = np.empty([steps, 2], dtype=np.float64)
sol[0] = np.array(init)
for i in range(steps-1):
p, x = sol[i]
xnew = (h - hsq*a)/(1.0 + 0.25*hsq) * p \
+ (1.0-0.25*hsq)/(1.0+0.25*hsq)*x
pnew = (xnew-x)/h - 0.25*h*(x+xnew)
sol[i+1] = np.array((pnew, xnew))
return sol
def symcontact(init, tspan, h, a, acc, forcing):
"""
Integrate the damped oscillator with damping factor a
using the second order contact variational integrator.
"""
steps = getsteps(tspan, h)
hsq = np.math.pow(h, 2)
t0, _ = tspan
sol = np.empty([steps, 2], dtype=np.float64)
sol[0] = np.array(init)
for i in range(steps-1):
p, x = sol[i]
xnew = x + (h - 0.5*hsq*a)*p - 0.5*hsq*acc(x) + 0.5*hsq*forcing(t0+h*i)
pnew = (1.0-0.5*h*a)/(1.0 + 0.5*h*a)*p + 0.5*h*(
forcing(t0+h*i) + forcing(t0+h*(i+1)) - acc(x) - acc(xnew)
)/(1.0 + 0.5*h*a)
sol[i+1] = np.array((pnew, xnew))
return sol
| 30.835821
| 79
| 0.561471
| 352
| 2,066
| 3.289773
| 0.196023
| 0.017271
| 0.051813
| 0.056995
| 0.800518
| 0.779793
| 0.767703
| 0.767703
| 0.767703
| 0.74266
| 0
| 0.051027
| 0.269603
| 2,066
| 66
| 80
| 31.30303
| 0.716368
| 0.222168
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.047619
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ddf0a8fa43a63c5840ad2b7033022deb03e6730
| 41
|
py
|
Python
|
projects/playqa/nein/__init__.py
|
mitchelljeff/SUMMAD4.3
|
33bb3a74cff16a7aa699660a08d98ddcd662cad5
|
[
"MIT"
] | 1
|
2017-09-15T14:06:07.000Z
|
2017-09-15T14:06:07.000Z
|
projects/playqa/nein/__init__.py
|
mitchelljeff/SUMMAD4.3
|
33bb3a74cff16a7aa699660a08d98ddcd662cad5
|
[
"MIT"
] | null | null | null |
projects/playqa/nein/__init__.py
|
mitchelljeff/SUMMAD4.3
|
33bb3a74cff16a7aa699660a08d98ddcd662cad5
|
[
"MIT"
] | null | null | null |
from .agent import *
from .web import App
| 20.5
| 20
| 0.756098
| 7
| 41
| 4.428571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 41
| 2
| 21
| 20.5
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2187054f973d00f2bf3638ca583c43a20ce9c800
| 29
|
py
|
Python
|
pyflickr/__init__.py
|
rf777rf777/PyFlickr
|
eb3da9cbf62699eea27362d810bf9e974f91fcb7
|
[
"MIT"
] | 8
|
2018-09-03T12:39:00.000Z
|
2020-04-25T03:48:41.000Z
|
pyflickr/__init__.py
|
rf777rf777/PyFlickr
|
eb3da9cbf62699eea27362d810bf9e974f91fcb7
|
[
"MIT"
] | null | null | null |
pyflickr/__init__.py
|
rf777rf777/PyFlickr
|
eb3da9cbf62699eea27362d810bf9e974f91fcb7
|
[
"MIT"
] | 1
|
2018-09-08T15:41:30.000Z
|
2018-09-08T15:41:30.000Z
|
from .api import PyFlickr
| 9.666667
| 26
| 0.724138
| 4
| 29
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 29
| 2
| 27
| 14.5
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
21cf83986ab19364f0ad8ace7ce6c4fac0004386
| 955
|
py
|
Python
|
tests/examples/test_heartbeat_padring.py
|
psumesh/siliconcompiler
|
14663c1d0d6c46994bc9bb24595db7e4ac4e1600
|
[
"Apache-2.0"
] | 424
|
2021-12-04T15:45:12.000Z
|
2022-03-31T20:27:55.000Z
|
tests/examples/test_heartbeat_padring.py
|
psumesh/siliconcompiler
|
14663c1d0d6c46994bc9bb24595db7e4ac4e1600
|
[
"Apache-2.0"
] | 105
|
2021-12-03T21:25:29.000Z
|
2022-03-31T22:36:59.000Z
|
tests/examples/test_heartbeat_padring.py
|
psumesh/siliconcompiler
|
14663c1d0d6c46994bc9bb24595db7e4ac4e1600
|
[
"Apache-2.0"
] | 38
|
2021-12-04T21:26:20.000Z
|
2022-03-21T02:39:29.000Z
|
import os
import pytest
import siliconcompiler
import sys
@pytest.mark.eda
def test_heartbeat_padring_with_floorplan(setup_example_test, oh_dir):
setup_example_test('heartbeat_padring')
from floorplan_build import build_core, build_top
# Run the build, and verify its outputs.
build_core()
build_top()
assert os.path.isfile('build/heartbeat/job0/export/0/outputs/heartbeat.gds')
assert os.path.isfile('build/heartbeat_top/job0/export/0/outputs/heartbeat_top.gds')
@pytest.mark.eda
def test_heartbeat_padring_without_floorplan(setup_example_test, oh_dir):
setup_example_test('heartbeat_padring')
from build import build_core, build_top
# Run the build, and verify its outputs.
build_core()
build_top()
assert os.path.isfile('build/heartbeat/job0/export/0/outputs/heartbeat.gds')
assert os.path.isfile('build/heartbeat_top/job0/export/0/outputs/heartbeat_top.gds')
del sys.modules['build']
| 30.806452
| 88
| 0.769634
| 139
| 955
| 5.05036
| 0.266187
| 0.074074
| 0.11396
| 0.096866
| 0.874644
| 0.874644
| 0.874644
| 0.77208
| 0.77208
| 0.77208
| 0
| 0.009639
| 0.13089
| 955
| 30
| 89
| 31.833333
| 0.836145
| 0.080628
| 0
| 0.571429
| 0
| 0
| 0.296
| 0.251429
| 0
| 0
| 0
| 0
| 0.190476
| 1
| 0.095238
| false
| 0
| 0.285714
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
21ef52cc9c135d05d4dc47d52c97e92a3daf0bbf
| 99
|
py
|
Python
|
migrations/376-mkt-featured-collections.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
migrations/376-mkt-featured-collections.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
migrations/376-mkt-featured-collections.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
# Migration removed because it depends on models which have been removed
def run():
return False
| 19.8
| 72
| 0.777778
| 15
| 99
| 5.133333
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 99
| 5
| 73
| 19.8
| 0.950617
| 0.707071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
21ff2c120950dc9e7f7ffcb94a61cdfa29a7584c
| 109
|
py
|
Python
|
litcoin/script/humanreadable.py
|
odonnellnoel/litcoin
|
cebe745df97d060c16b8d9dfa9e58a0418f75560
|
[
"MIT"
] | null | null | null |
litcoin/script/humanreadable.py
|
odonnellnoel/litcoin
|
cebe745df97d060c16b8d9dfa9e58a0418f75560
|
[
"MIT"
] | null | null | null |
litcoin/script/humanreadable.py
|
odonnellnoel/litcoin
|
cebe745df97d060c16b8d9dfa9e58a0418f75560
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from ..binhex import x
def script_to_human_readable(script):
return x(script)
| 12.111111
| 37
| 0.724771
| 17
| 109
| 4.470588
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010989
| 0.165138
| 109
| 8
| 38
| 13.625
| 0.824176
| 0.192661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
1d0662a8432af166300c315b1dfb23d502e62128
| 25
|
py
|
Python
|
dama/plotting/__init__.py
|
philippeller/MilleFeuille
|
962c322531e208a7d20a273a56d13b954ad80bc3
|
[
"Apache-2.0"
] | 4
|
2020-04-22T07:46:27.000Z
|
2021-03-11T11:44:08.000Z
|
dama/plotting/__init__.py
|
philippeller/MilleFeuille
|
962c322531e208a7d20a273a56d13b954ad80bc3
|
[
"Apache-2.0"
] | 4
|
2020-04-22T07:14:36.000Z
|
2021-03-10T13:56:06.000Z
|
dama/plotting/__init__.py
|
philippeller/pynocular
|
962c322531e208a7d20a273a56d13b954ad80bc3
|
[
"Apache-2.0"
] | 1
|
2021-03-09T19:22:44.000Z
|
2021-03-09T19:22:44.000Z
|
from .stat_plot import *
| 12.5
| 24
| 0.76
| 4
| 25
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
10f1cd5b126636eea3ba2da396fff6b122f18ae9
| 3,304
|
py
|
Python
|
wavenet_vocoder/builder.py
|
dendisuhubdy/parallel_wavenet_vocoder
|
8f2bd7c0bd30cb90cc7ff8438ce78545c409227b
|
[
"MIT"
] | 155
|
2018-08-02T09:08:08.000Z
|
2022-01-03T22:14:52.000Z
|
wavenet_vocoder/builder.py
|
dendisuhubdy/parallel_wavenet_vocoder
|
8f2bd7c0bd30cb90cc7ff8438ce78545c409227b
|
[
"MIT"
] | 1
|
2019-09-02T10:42:36.000Z
|
2019-09-24T02:50:18.000Z
|
wavenet_vocoder/builder.py
|
dendisuhubdy/parallel_wavenet_vocoder
|
8f2bd7c0bd30cb90cc7ff8438ce78545c409227b
|
[
"MIT"
] | 34
|
2018-08-06T02:46:34.000Z
|
2021-03-15T02:18:20.000Z
|
# coding: utf-8
from __future__ import with_statement, print_function, absolute_import
def wavenet(out_channels=256,
layers=20,
stacks=2,
residual_channels=512,
gate_channels=512,
skip_out_channels=512,
cin_channels=-1,
gin_channels=-1,
weight_normalization=True,
dropout=1 - 0.95,
kernel_size=3,
n_speakers=None,
upsample_conditional_features=False,
upsample_scales=[16, 16],
freq_axis_kernel_size=3,
scalar_input=False,
use_speaker_embedding=True,
legacy=True,
use_gaussian=False,
):
from wavenet_vocoder import WaveNet
model = WaveNet(out_channels=out_channels, layers=layers, stacks=stacks,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_out_channels=skip_out_channels,
kernel_size=kernel_size, dropout=dropout,
weight_normalization=weight_normalization,
cin_channels=cin_channels, gin_channels=gin_channels,
n_speakers=n_speakers,
upsample_conditional_features=upsample_conditional_features,
upsample_scales=upsample_scales,
freq_axis_kernel_size=freq_axis_kernel_size,
scalar_input=scalar_input,
use_speaker_embedding=use_speaker_embedding,
legacy=legacy,
use_gaussian=use_gaussian,
)
return model
def student(out_channels=256,
iaf_layers=[10, 10, 10, 10, 10, 10],
iaf_stacks=[1, 1, 1, 1, 1, 1],
residual_channels=128,
gate_channels=128,
skip_out_channels=128,
cin_channels=-1,
gin_channels=-1,
weight_normalization=True,
dropout=1 - 0.95,
kernel_size=3,
n_speakers=None,
upsample_conditional_features=False,
upsample_scales=[16, 16],
freq_axis_kernel_size=3,
scalar_input=False,
use_speaker_embedding=True,
legacy=True,
use_gaussian=False,
):
from wavenet_vocoder import Student
model = Student(out_channels=out_channels, iaf_layers=iaf_layers, iaf_stacks=iaf_stacks,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_out_channels=skip_out_channels,
kernel_size=kernel_size, dropout=dropout,
weight_normalization=weight_normalization,
cin_channels=cin_channels, gin_channels=gin_channels,
n_speakers=n_speakers,
upsample_conditional_features=upsample_conditional_features,
upsample_scales=upsample_scales,
freq_axis_kernel_size=freq_axis_kernel_size,
scalar_input=scalar_input,
use_speaker_embedding=use_speaker_embedding,
legacy=legacy,
use_gaussian=use_gaussian,
)
return model
| 37.977011
| 92
| 0.578692
| 328
| 3,304
| 5.417683
| 0.185976
| 0.074283
| 0.050647
| 0.060777
| 0.785594
| 0.775464
| 0.775464
| 0.775464
| 0.775464
| 0.775464
| 0
| 0.033191
| 0.361683
| 3,304
| 86
| 93
| 38.418605
| 0.809388
| 0.003935
| 0
| 0.753247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025974
| false
| 0
| 0.038961
| 0
| 0.090909
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
10f684ba1a09ab7305df1167555b80d5701d57b3
| 28,115
|
py
|
Python
|
spark_fhir_schemas/stu3/complex_types/communicationrequest.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | 2
|
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/stu3/complex_types/communicationrequest.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/stu3/complex_types/communicationrequest.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
DataType,
TimestampType,
)
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class CommunicationRequestSchema:
"""
A request to convey information; e.g. the CDS system proposes that an alert be
sent to a responsible provider, the CDS system proposes that the public health
agency be notified about a reportable condition.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A request to convey information; e.g. the CDS system proposes that an alert be
sent to a responsible provider, the CDS system proposes that the public health
agency be notified about a reportable condition.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a CommunicationRequest resource
identifier: A unique ID of this request for reference purposes. It must be provided if
user wants it returned as part of any output, otherwise it will be
autogenerated, if needed, by CDS system. Does not need to be the actual ID of
the source system.
basedOn: A plan or proposal that is fulfilled in whole or in part by this request.
replaces: Completed or terminated request(s) whose function is taken by this new
request.
groupIdentifier: A shared identifier common to all requests that were authorized more or less
simultaneously by a single author, representing the identifier of the
requisition, prescription or similar form.
status: The status of the proposal or order.
category: The type of message to be sent such as alert, notification, reminder,
instruction, etc.
priority: Characterizes how quickly the proposed act must be initiated. Includes
concepts such as stat, urgent, routine.
medium: A channel that was used for this communication (e.g. email, fax).
subject: The patient or group that is the focus of this communication request.
recipient: The entity (e.g. person, organization, clinical information system, device,
group, or care team) which is the intended target of the communication.
topic: The resources which were related to producing this communication request.
context: The encounter or episode of care within which the communication request was
created.
payload: Text, attachment(s), or resource(s) to be communicated to the recipient.
occurrenceDateTime: The time when this communication is to occur.
occurrencePeriod: The time when this communication is to occur.
authoredOn: For draft requests, indicates the date of initial creation. For requests with
other statuses, indicates the date of activation.
sender: The entity (e.g. person, organization, clinical information system, or device)
which is to be the source of the communication.
requester: The individual who initiated the request and has responsibility for its
activation.
reasonCode: Describes why the request is being made in coded or textual form.
reasonReference: Indicates another resource whose existence justifies this request.
note: Comments made about the request by the requester, sender, recipient, subject
or other participants.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.communicationrequest_payload import (
CommunicationRequest_PayloadSchema,
)
from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema
from spark_fhir_schemas.stu3.complex_types.communicationrequest_requester import (
CommunicationRequest_RequesterSchema,
)
from spark_fhir_schemas.stu3.complex_types.annotation import AnnotationSchema
if (
max_recursion_limit
and nesting_list.count("CommunicationRequest") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["CommunicationRequest"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a CommunicationRequest resource
StructField("resourceType", StringType(), True),
# A unique ID of this request for reference purposes. It must be provided if
# user wants it returned as part of any output, otherwise it will be
# autogenerated, if needed, by CDS system. Does not need to be the actual ID of
# the source system.
StructField(
"identifier",
ArrayType(
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A plan or proposal that is fulfilled in whole or in part by this request.
StructField(
"basedOn",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Completed or terminated request(s) whose function is taken by this new
# request.
StructField(
"replaces",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A shared identifier common to all requests that were authorized more or less
# simultaneously by a single author, representing the identifier of the
# requisition, prescription or similar form.
StructField(
"groupIdentifier",
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The status of the proposal or order.
StructField("status", StringType(), True),
# The type of message to be sent such as alert, notification, reminder,
# instruction, etc.
StructField(
"category",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Characterizes how quickly the proposed act must be initiated. Includes
# concepts such as stat, urgent, routine.
StructField("priority", StringType(), True),
# A channel that was used for this communication (e.g. email, fax).
StructField(
"medium",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The patient or group that is the focus of this communication request.
StructField(
"subject",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The entity (e.g. person, organization, clinical information system, device,
# group, or care team) which is the intended target of the communication.
StructField(
"recipient",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The resources which were related to producing this communication request.
StructField(
"topic",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The encounter or episode of care within which the communication request was
# created.
StructField(
"context",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Text, attachment(s), or resource(s) to be communicated to the recipient.
StructField(
"payload",
ArrayType(
CommunicationRequest_PayloadSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The time when this communication is to occur.
StructField("occurrenceDateTime", TimestampType(), True),
# The time when this communication is to occur.
StructField(
"occurrencePeriod",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# For draft requests, indicates the date of initial creation. For requests with
# other statuses, indicates the date of activation.
StructField("authoredOn", StringType(), True),
# The entity (e.g. person, organization, clinical information system, or device)
# which is to be the source of the communication.
StructField(
"sender",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The individual who initiated the request and has responsibility for its
# activation.
StructField(
"requester",
CommunicationRequest_RequesterSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Describes why the request is being made in coded or textual form.
StructField(
"reasonCode",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Indicates another resource whose existence justifies this request.
StructField(
"reasonReference",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Comments made about the request by the requester, sender, recipient, subject
# or other participants.
StructField(
"note",
ArrayType(
AnnotationSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 49.498239
| 101
| 0.540139
| 2,555
| 28,115
| 5.720548
| 0.140509
| 0.073071
| 0.046182
| 0.068966
| 0.830597
| 0.820676
| 0.820676
| 0.797209
| 0.776546
| 0.770799
| 0
| 0.002806
| 0.417002
| 28,115
| 567
| 102
| 49.585538
| 0.888902
| 0.289383
| 0
| 0.698492
| 0
| 0
| 0.023683
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002513
| false
| 0
| 0.032663
| 0
| 0.042714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
80049a30b6f4a5a8e2b777606a07b87d2087251d
| 23
|
py
|
Python
|
mytoolz/__init__.py
|
mykkro/mytoolz
|
acdde5b7da75fbf507368efbda77656b7126c61b
|
[
"MIT"
] | null | null | null |
mytoolz/__init__.py
|
mykkro/mytoolz
|
acdde5b7da75fbf507368efbda77656b7126c61b
|
[
"MIT"
] | null | null | null |
mytoolz/__init__.py
|
mykkro/mytoolz
|
acdde5b7da75fbf507368efbda77656b7126c61b
|
[
"MIT"
] | null | null | null |
from .mytoolz import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
801e45fc9621bcd7d2cb10ccc06ab3a41a023d96
| 9,677
|
py
|
Python
|
bvbq/bvbq_functions.py
|
DFNaiff/BVBQ
|
48f0eb624483f67b748d791efc0c06ddfb6e0646
|
[
"MIT"
] | null | null | null |
bvbq/bvbq_functions.py
|
DFNaiff/BVBQ
|
48f0eb624483f67b748d791efc0c06ddfb6e0646
|
[
"MIT"
] | null | null | null |
bvbq/bvbq_functions.py
|
DFNaiff/BVBQ
|
48f0eb624483f67b748d791efc0c06ddfb6e0646
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
"""
Objective functions for BVBQ
"""
import math
import torch
from . import bayesquad
from . import distributions
from . import utils
def mcbq_dmvn_relbo(logprobgp, mean, var, mixmeans, mixvars, mixweights,
nsamples=100, logdelta=-20., reg=1.0):
"""
RELBO objective function for mixture of diagonal Gaussians
Parameters
----------
logprobgp : SimpleGP
Gaussian Process object approximating logdensity
mean : torch.Tensor
Mean vector of proposed diagonal Gaussian distribution
var : torch.Tensor
Variance vector of proposed diagonal Gaussian distribution
mixmeans : torch.Tensor
Mean matrix of current mixtures of
diagonal normal distribution, of shape (nmixtures,dim)
mixvars : torch.Tensor
Variance matrix of current mixtures of
diagonal normal distribution, of shape (nmixtures,dim)
mixweights : torch.Tensor
Weights vector of current mixture components
nsamples : int
Number of samples for Monte Carlo estimation of cross-entropy
logdelta : float
Logarithm of regularizer term for cross entropy
reg : float
Regularizer term for self entropy
Returns
-------
torch.Tensor
Value of RELBO
"""
term1 = bayesquad.separable_dmvn_bq(
logprobgp, mean, var, return_var=False) # Variance
samples = distributions.DiagonalNormalDistribution.sample_(
nsamples, mean, var)
term2_ = distributions.MixtureDiagonalNormalDistribution.logprob_(
samples, mixmeans, mixvars, mixweights)
term2 = -utils.logbound(term2_, logdelta).mean() # Cross entropy
term3 = 0.5*torch.sum(torch.log(2*math.pi*math.e*var)) # Entropy
return term1 + term2 + reg*term3
def mcbq_dmvn_lbrelbo(logprobgp, mean, var, mixmeans, mixvars, mixweights,
logdelta=-20, reg=1.0):
"""
LRELBO objective function for mixture of diagonal Gaussians
Parameters
----------
logprobgp : SimpleGP
Gaussian Process object approximating logdensity
mean : torch.Tensor
Mean vector of proposed diagonal Gaussian distribution
var : torch.Tensor
Variance vector of proposed diagonal Gaussian distribution
mixmeans : torch.Tensor
Mean matrix of current mixtures of
diagonal normal distribution, of shape (nmixtures,dim)
mixvars : torch.Tensor
Variance matrix of current mixtures of
diagonal normal distribution, of shape (nmixtures,dim)
mixweights : torch.Tensor
Weights vector of current mixture components
logdelta : float
Logarithm of regularizer term for cross entropy
reg : float
Regularizer term for self entropy
Returns
-------
torch.Tensor
Value of LRELBO
"""
term1 = bayesquad.separable_dmvn_bq(
logprobgp, mean, var, return_var=False) # Variance
term2 = utils.lb_mvn_mixmvn_cross_entropy(
mean, var, mixmeans, mixvars, mixweights, logdelta) # Cross entropy
term3 = 0.5*torch.sum(torch.log(2*math.pi*math.e*var)) # Entropy
return term1 + term2 + reg*term3
def mcbq_mixdmvn_delbodw(weight, logprobgp, mean, var,
mixmeans, mixvars, mixweights,
nsamples=1000):
"""
Gradient (in relation to weight) of
boosting objective function for mixture of diagonal Gaussians
Parameters
----------
weight : torch.Tensor
Weight of new component
logprobgp : SimpleGP
Gaussian Process object approximating logdensity
mean : torch.Tensor
Mean vector of proposed diagonal Gaussian distribution
var : torch.Tensor
Variance vector of proposed diagonal Gaussian distribution
mixmeans : torch.Tensor
Mean matrix of current mixtures of
diagonal normal distribution, of shape (nmixtures,dim)
mixvars : torch.Tensor
Variance matrix of current mixtures of
diagonal normal distribution, of shape (nmixtures,dim)
mixweights : torch.Tensor
Weights vector of current mixture components
nsamples : int
Number of samples for Monte Carlo estimation of entropy
Returns
-------
torch.Tensor
Value of gradient
"""
weight = utils.tensor_convert(weight)
logprob_terms = logprob_terms_mixdmvn_delbodw(
logprobgp, mean, var,
mixmeans, mixvars, mixweights)
entropy_terms = entropy_terms_mixdmvn_delbodw(
weight, mean, var,
mixmeans, mixvars, mixweights,
nsamples)
return logprob_terms + entropy_terms
def logprob_terms_mixdmvn_delbodw(logprobgp, mean, var,
mixmeans, mixvars, mixweights):
"""
Log-density term of gradient (in relation to weight) of
boosting objective function for mixture of diagonal Gaussians
Parameters
----------
logprobgp : SimpleGP
Gaussian Process object approximating logdensity
mean : torch.Tensor
Mean vector of proposed diagonal Gaussian distribution
var : torch.Tensor
Variance vector of proposed diagonal Gaussian distribution
mixmeans : torch.Tensor
Mean matrix of current mixtures of
diagonal normal distribution, of shape (nmixtures,dim)
mixvars : torch.Tensor
Variance matrix of current mixtures of
diagonal normal distribution, of shape (nmixtures,dim)
mixweights : torch.Tensor
Weights vector of current mixture components
Returns
-------
torch.Tensor
Value of logdensity term
"""
term1 = bayesquad.separable_dmvn_bq(logprobgp, mean, var, return_var=False)
term2 = bayesquad.separable_mixdmvn_bq(logprobgp, mixmeans,
mixvars, mixweights,
return_var=False)
return term1 - term2
def entropy_terms_mixdmvn_delbodw(weight, mean, var,
mixmeans, mixvars, mixweights,
nsamples=1000):
"""
Entropy term of gradient (in relation to weight) of
boosting objective function for mixture of diagonal Gaussians
Parameters
----------
weight : torch.Tensor
Weight of new component
logprobgp : SimpleGP
Gaussian Process object approximating logdensity
mean : torch.Tensor
Mean vector of proposed diagonal Gaussian distribution
var : torch.Tensor
Variance vector of proposed diagonal Gaussian distribution
mixmeans : torch.Tensor
Mean matrix of current mixtures of
diagonal normal distribution, of shape (nmixtures,dim)
mixvars : torch.Tensor
Variance matrix of current mixtures of
diagonal normal distribution, of shape (nmixtures,dim)
mixweights : torch.Tensor
Weights vector of current mixture components
nsamples : int
Number of samples for Monte Carlo estimation of entropy
Returns
-------
torch.Tensor
Value of logdensity term
"""
weight = utils.tensor_convert(weight)
mixmeans_up = torch.vstack([mixmeans, mean])
mixvars_up = torch.vstack([mixvars, var])
mixweights_up = torch.hstack([(1-weight)*mixweights, weight])
samplesprevious = distributions.MixtureDiagonalNormalDistribution.sample_(
nsamples, mixmeans, mixvars, mixweights)
samplesproposal = distributions.DiagonalNormalDistribution.sample_(
nsamples, mean, var)
term3 = -distributions.MixtureDiagonalNormalDistribution.logprob_(
samplesproposal, mixmeans_up, mixvars_up, mixweights_up).mean()
term4 = distributions.MixtureDiagonalNormalDistribution.logprob_(
samplesprevious, mixmeans_up, mixvars_up, mixweights_up).mean()
return term3 + term4
def bq_mixmvn_elbo(logprobgp, mixmeans, mixvars, mixweights, nsamples):
"""
ELBO objective function for mixture of diagonal Gaussians
Parameters
----------
logprobgp : SimpleGP
Gaussian Process object approximating logdensity
mixmeans : torch.Tensor
Mean matrix of current mixtures of
diagonal normal distribution, of shape (nmixtures,dim)
mixvars : torch.Tensor
Variance matrix of current mixtures of
diagonal normal distribution, of shape (nmixtures,dim)
mixweights : torch.Tensor
Weights vector of current mixture components
nsamples : int
Number of samples for Monte Carlo estimation of entropy
Returns
-------
torch.Tensor
Value of ELBO
"""
term1 = bayesquad.separable_mixdmvn_bq(logprobgp, mixmeans,
mixvars, mixweights,
return_var=False)
samples = distributions.MixtureDiagonalNormalDistribution.sample_(
nsamples, mixmeans, mixvars, mixweights)
term2 = -distributions.MixtureDiagonalNormalDistribution.logprob_(
samples, mixmeans, mixvars, mixweights).mean()
return term1 + term2
| 36.379699
| 79
| 0.629534
| 957
| 9,677
| 6.298851
| 0.124347
| 0.065693
| 0.06221
| 0.045786
| 0.868281
| 0.854678
| 0.820007
| 0.753816
| 0.723955
| 0.723955
| 0
| 0.008532
| 0.3096
| 9,677
| 265
| 80
| 36.516981
| 0.893728
| 0.508629
| 0
| 0.338235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.073529
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8022e00a98393f6df5c3372cef9776bf84cb68a2
| 22
|
py
|
Python
|
pptb/tools/__init__.py
|
cattidea/paddle-toolbox
|
e9503d6c82165f1c632eeda020abd3a1d5cbfcf9
|
[
"Apache-2.0",
"MIT"
] | 6
|
2021-10-09T07:36:10.000Z
|
2021-12-08T01:05:30.000Z
|
pptb/tools/__init__.py
|
hanknewbird/paddle-toolbox
|
1f1e4d2dd38e797092c1bba0ec3797dd4bef43f6
|
[
"Apache-2.0",
"MIT"
] | 4
|
2021-11-17T15:26:51.000Z
|
2021-12-24T10:58:41.000Z
|
pptb/tools/__init__.py
|
hanknewbird/paddle-toolbox
|
1f1e4d2dd38e797092c1bba0ec3797dd4bef43f6
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-12-08T01:05:59.000Z
|
2021-12-08T01:05:59.000Z
|
from .mixing import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
80374c590f82ff89749c3589c53cf500347e0814
| 4,815
|
py
|
Python
|
tests/test_onboarding.py
|
clifton/dydx-v3-python
|
974ffc8f3512aa48171ef8dc2e623d6df3536812
|
[
"Apache-2.0"
] | 109
|
2021-01-07T02:19:24.000Z
|
2022-03-27T21:56:36.000Z
|
tests/test_onboarding.py
|
clifton/dydx-v3-python
|
974ffc8f3512aa48171ef8dc2e623d6df3536812
|
[
"Apache-2.0"
] | 73
|
2021-01-14T23:29:58.000Z
|
2022-03-30T09:27:54.000Z
|
tests/test_onboarding.py
|
clifton/dydx-v3-python
|
974ffc8f3512aa48171ef8dc2e623d6df3536812
|
[
"Apache-2.0"
] | 60
|
2021-01-13T04:34:12.000Z
|
2022-03-26T10:14:35.000Z
|
from web3 import Web3
from dydx3 import Client
from dydx3.constants import NETWORK_ID_MAINNET
from dydx3.constants import NETWORK_ID_ROPSTEN
from tests.constants import DEFAULT_HOST
GANACHE_PRIVATE_KEY = (
'0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d'
)
EXPECTED_API_KEY_CREDENTIALS_MAINNET = {
'key': '50fdcaa0-62b8-e827-02e8-a9520d46cb9f',
'secret': 'rdHdKDAOCa0B_Mq-Q9kh8Fz6rK3ocZNOhKB4QsR9',
'passphrase': '12_1LuuJMZUxcj3kGBWc',
}
EXPECTED_STARK_PRIVATE_KEY_MAINNET = (
'0x170d807cafe3d8b5758f3f698331d292bf5aeb71f6fd282f0831dee094ee891'
)
EXPECTED_API_KEY_CREDENTIALS_ROPSTEN = {
'key': '9c1d91a5-0a30-1ed4-2d3d-b840a479b965',
'secret': 'hHYEswFe5MHMm8gFb81Jas9b7iLQUicsVv5YBRMY',
'passphrase': '9z5Ew7m2DLQd87Xlk7Hd',
}
EXPECTED_STARK_PRIVATE_KEY_ROPSTEN = (
'0x50505654b282eb3debadddeddfa1bc76545a6837dcd59d7d41f6a282a4bbccc'
)
class TestOnboarding():
def test_derive_stark_key_on_mainnet_from_web3(self):
web3 = Web3() # Connect to a local Ethereum node.
client = Client(
host=DEFAULT_HOST,
network_id=NETWORK_ID_MAINNET,
web3=web3,
)
signer_address = web3.eth.accounts[0]
stark_private_key = client.onboarding.derive_stark_key(signer_address)
assert stark_private_key == EXPECTED_STARK_PRIVATE_KEY_MAINNET
def test_recover_default_api_key_credentials_on_mainnet_from_web3(self):
web3 = Web3() # Connect to a local Ethereum node.
client = Client(
host=DEFAULT_HOST,
network_id=NETWORK_ID_MAINNET,
web3=web3,
)
signer_address = web3.eth.accounts[0]
api_key_credentials = (
client.onboarding.recover_default_api_key_credentials(
signer_address,
)
)
assert api_key_credentials == EXPECTED_API_KEY_CREDENTIALS_MAINNET
def test_derive_stark_key_on_ropsten_from_web3(self):
web3 = Web3() # Connect to a local Ethereum node.
client = Client(
host=DEFAULT_HOST,
network_id=NETWORK_ID_ROPSTEN,
web3=web3,
)
signer_address = web3.eth.accounts[0]
stark_private_key = client.onboarding.derive_stark_key(signer_address)
assert stark_private_key == EXPECTED_STARK_PRIVATE_KEY_ROPSTEN
def test_recover_default_api_key_credentials_on_ropsten_from_web3(self):
web3 = Web3() # Connect to a local Ethereum node.
client = Client(
host=DEFAULT_HOST,
network_id=NETWORK_ID_ROPSTEN,
web3=web3,
)
signer_address = web3.eth.accounts[0]
api_key_credentials = (
client.onboarding.recover_default_api_key_credentials(
signer_address,
)
)
assert api_key_credentials == EXPECTED_API_KEY_CREDENTIALS_ROPSTEN
def test_derive_stark_key_on_mainnet_from_priv(self):
client = Client(
host=DEFAULT_HOST,
network_id=NETWORK_ID_MAINNET,
eth_private_key=GANACHE_PRIVATE_KEY,
api_key_credentials={'key': 'value'},
)
signer_address = client.default_address
stark_private_key = client.onboarding.derive_stark_key(signer_address)
assert stark_private_key == EXPECTED_STARK_PRIVATE_KEY_MAINNET
def test_recover_default_api_key_credentials_on_mainnet_from_priv(self):
client = Client(
host=DEFAULT_HOST,
network_id=NETWORK_ID_MAINNET,
eth_private_key=GANACHE_PRIVATE_KEY,
)
signer_address = client.default_address
api_key_credentials = (
client.onboarding.recover_default_api_key_credentials(
signer_address,
)
)
assert api_key_credentials == EXPECTED_API_KEY_CREDENTIALS_MAINNET
def test_derive_stark_key_on_ropsten_from_priv(self):
client = Client(
host=DEFAULT_HOST,
network_id=NETWORK_ID_ROPSTEN,
eth_private_key=GANACHE_PRIVATE_KEY,
)
signer_address = client.default_address
stark_private_key = client.onboarding.derive_stark_key(signer_address)
assert stark_private_key == EXPECTED_STARK_PRIVATE_KEY_ROPSTEN
def test_recover_default_api_key_credentials_on_ropsten_from_priv(self):
client = Client(
host=DEFAULT_HOST,
network_id=NETWORK_ID_ROPSTEN,
eth_private_key=GANACHE_PRIVATE_KEY,
)
signer_address = client.default_address
api_key_credentials = (
client.onboarding.recover_default_api_key_credentials(
signer_address,
)
)
assert api_key_credentials == EXPECTED_API_KEY_CREDENTIALS_ROPSTEN
| 36.203008
| 78
| 0.69055
| 520
| 4,815
| 5.919231
| 0.125
| 0.074724
| 0.127031
| 0.059779
| 0.816764
| 0.776478
| 0.755036
| 0.755036
| 0.741391
| 0.741391
| 0
| 0.058043
| 0.248598
| 4,815
| 132
| 79
| 36.477273
| 0.792703
| 0.028037
| 0
| 0.576271
| 0
| 0
| 0.092834
| 0.074439
| 0
| 0
| 0.041925
| 0
| 0.067797
| 1
| 0.067797
| false
| 0.016949
| 0.042373
| 0
| 0.118644
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3378725c50b20e31315c907379a25ef5ffaaa789
| 43
|
py
|
Python
|
test_hello.py
|
dhruvinjoshi/pynet_dhruvin
|
9b5530f741dc5390635176018a9b2b3fa22760dc
|
[
"Apache-2.0"
] | null | null | null |
test_hello.py
|
dhruvinjoshi/pynet_dhruvin
|
9b5530f741dc5390635176018a9b2b3fa22760dc
|
[
"Apache-2.0"
] | null | null | null |
test_hello.py
|
dhruvinjoshi/pynet_dhruvin
|
9b5530f741dc5390635176018a9b2b3fa22760dc
|
[
"Apache-2.0"
] | null | null | null |
print "Hello everyone how are you doing?"
| 14.333333
| 41
| 0.744186
| 7
| 43
| 4.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 43
| 2
| 42
| 21.5
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
3386ef9bc2be0a72e0fb10257ff99fb95367b824
| 105
|
py
|
Python
|
config/composites.py
|
veltzer/pyeventroute
|
1f1511d55b437a00ba5d3e0fce24d88b013d7c0b
|
[
"MIT"
] | 14
|
2017-01-06T20:01:29.000Z
|
2021-09-26T08:26:07.000Z
|
config/composites.py
|
veltzer/pyeventroute
|
1f1511d55b437a00ba5d3e0fce24d88b013d7c0b
|
[
"MIT"
] | 3
|
2020-05-20T05:05:52.000Z
|
2021-09-27T06:47:36.000Z
|
config/composites.py
|
veltzer/pyeventroute
|
1f1511d55b437a00ba5d3e0fce24d88b013d7c0b
|
[
"MIT"
] | 10
|
2017-04-01T04:36:34.000Z
|
2020-12-26T07:36:25.000Z
|
import config.apt
import config.git
deb_version = f"{config.git.git_version}~{config.apt.apt_codename}"
| 21
| 67
| 0.790476
| 17
| 105
| 4.705882
| 0.470588
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07619
| 105
| 4
| 68
| 26.25
| 0.824742
| 0
| 0
| 0
| 0
| 0
| 0.47619
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
338e351b1f87474c5ccaff02419f0480fa4ccf9f
| 8,527
|
py
|
Python
|
kuratowski/k33_k5_nonplanar.py
|
dcabatin/manim
|
e49cfca97bc01f7c9a4d75806ee0fef8e1b18654
|
[
"MIT"
] | null | null | null |
kuratowski/k33_k5_nonplanar.py
|
dcabatin/manim
|
e49cfca97bc01f7c9a4d75806ee0fef8e1b18654
|
[
"MIT"
] | null | null | null |
kuratowski/k33_k5_nonplanar.py
|
dcabatin/manim
|
e49cfca97bc01f7c9a4d75806ee0fef8e1b18654
|
[
"MIT"
] | null | null | null |
from functools import reduce
import itertools as it
import operator as op
import copy
import numpy as np
import random
from manimlib.imports import *
from kuratowski.our_discrete_graph_scene import *
from kuratowski.k3_3 import K33
class makeText(Scene):
def construct(self):
#######Code#######
#Making text
first_line = TextMobject("Manim is fun")
second_line = TextMobject("and useful")
final_line = TextMobject("Hope you like it too!", color=BLUE)
color_final_line = TextMobject("Hope you like it too!")
#Coloring
color_final_line.set_color_by_gradient(BLUE,PURPLE)
#Position text
second_line.next_to(first_line, DOWN)
#Showing text
self.wait(1)
self.play(Write(first_line), Write(second_line))
self.wait(1)
self.play(FadeOut(second_line), ReplacementTransform(first_line, final_line))
self.wait(1)
self.play(Transform(final_line, color_final_line))
self.wait(2)
class K33_Nonplanar(OurGraphTheory):
def construct(self):
self.graph = K33()
super().construct()
# 2 5
# 1 4
# 0 3
removals = []
lemma = TextMobject("Lemma: $K_{3, 3}$ is Nonplanar")
lemma.shift(UP * 3.5)
self.play(Write(lemma))
removals.append(lemma)
self.draw(self.vertices)
self.draw(self.edges)
self.wait()
# V - E + F = 2
# removals.extend(self.vertices)
# removals.append(self.edges)
eulers_form = TextMobject("$V - E + F = 2$")
eulers_form.shift(LEFT * 4.5 + UP * 2.5)
self.play(Write(eulers_form))
self.wait(2.5)
removals.append(eulers_form)
# V = 6
self.accent_vertices()
eulers_form = TextMobject("$6 - E + F = 2$")
eulers_form.shift(LEFT * 4.5 + UP * 1.5)
self.play(Write(eulers_form))
self.wait(2.5)
removals.append(eulers_form)
# E = 9
self.accent_edges()
eulers_form = TextMobject("$6 - 9 + F = 2$")
eulers_form.shift(LEFT * 4.5 + UP * 0.5)
self.play(Write(eulers_form))
self.wait(2.5)
removals.append(eulers_form)
# F = 5
eulers_form = TextMobject("$F = 5$")
eulers_form.shift(LEFT * 4.5 + DOWN * 0.5)
self.play(Write(eulers_form))
self.wait(2.5)
removals.append(eulers_form)
eulers_form = TextMobject("No 3 Edge Faces")
eulers_form.shift(RIGHT * 4.5 + UP * 2.5)
self.play(Write(eulers_form))
removals.append(eulers_form)
# no 3 edge cycles
three_cycles = [
[4, 1, 5, 2],
[3, 2, 5, 1],
[3, 0, 5, 2],
[2, 3, 1, 4],
[5, 0, 3, 2],
[2, 5, 0, 3],
[1, 5, 0, 3],
]
for path in three_cycles:
path = self.trace_path(path, run_time = 1.3)
self.remove(*path)
self.wait(0.5)
edges_faces = TextMobject("No 3 Edge Faces")
edges_faces.shift(RIGHT * 4.5 + UP * 2.5)
self.play(Write(edges_faces))
self.wait(2.5)
removals.append(edges_faces)
edges_faces = TextMobject("$4F \leq 2E$")
edges_faces.shift(RIGHT * 4.5 + UP * 1.5)
self.play(Write(edges_faces))
self.wait(1.5)
removals.append(edges_faces)
# E = 9
self.accent_edges()
edges_faces = TextMobject("$4F \leq 2*9$")
edges_faces.shift(RIGHT * 4.5 + UP * 0.5)
self.play(Write(edges_faces))
self.wait(1.5)
removals.append(edges_faces)
edges_faces = TextMobject("$F \leq 4.5$")
edges_faces.shift(RIGHT * 4.5 + DOWN * 0.5)
self.play(Write(edges_faces))
self.wait(2.5)
removals.append(edges_faces)
#thus 4f <= 2e gives f <= 3
#gives 5 <= 3 contradiction
contradiction = TextMobject("$5 = F \leq 4.5$")
contradiction.shift(DOWN * 2.5)
self.play(Write(contradiction))
self.wait(1.5)
removals.append(contradiction)
contradiction2 = TextMobject("$5 \leq 4.5$")
contradiction2.shift(DOWN * 2.5)
self.play(Transform(contradiction, contradiction2))
self.wait(4.5)
removals.append(contradiction2)
self.play(*[FadeOut(v) for v in removals + self.vertices + self.edges])
c1 = 2*np.cos(2*PI / 5)
c2 = 2*np.cos(PI / 5)
s1 = 2*np.sin(2*PI / 5)
s2 = 2*np.sin(4*PI / 5)
class K5(Graph):
"""
2 5
1 4
0 3
"""
def construct(self):
self.vertices = [
(0,2,0),
(s1,c1,0),
(s2,-1*c2,0),
(-1*s2,-1*c2,0),
(-1*s1,c1,0)
]
self.edges = [
(a, b)
for a in range(5)
for b in range(a+1,5)
]
class K5_Nonplanar(OurGraphTheory):
def construct(self):
self.graph = K5()
super().construct()
# 2 5
# 1 4
# 0 3
removals = []
lemma = TextMobject("Lemma: $K_{5}$ is Nonplanar")
lemma.shift(UP * 3.5)
self.play(Write(lemma))
removals.append(lemma)
self.draw(self.vertices)
self.draw(self.edges)
self.wait()
# V - E + F = 2
# removals.extend(self.vertices)
# removals.append(self.edges)
eulers_form = TextMobject("$V - E + F = 2$")
eulers_form.shift(LEFT * 4.5 + UP * 2.5)
self.play(Write(eulers_form))
self.wait(2)
removals.append(eulers_form)
# V = 6
self.accent_vertices()
eulers_form = TextMobject("$5 - E + F = 2$")
eulers_form.shift(LEFT * 4.5 + UP * 1.5)
self.play(Write(eulers_form))
self.wait(2)
removals.append(eulers_form)
# E = 9
self.accent_edges()
eulers_form = TextMobject("$5 - 10 + F = 2$")
eulers_form.shift(LEFT * 4.5 + UP * 0.5)
self.play(Write(eulers_form))
self.wait(1.5)
removals.append(eulers_form)
# F = 5
eulers_form = TextMobject("$F = 7$")
eulers_form.shift(LEFT * 4.5 + DOWN * 0.5)
self.play(Write(eulers_form))
self.wait(1.5)
removals.append(eulers_form)
# eulers_form = TextMobject("No 3 Edge Faces")
# eulers_form.shift(RIGHT * 4.5 + UP * 2.5)
# self.play(Write(eulers_form))
# removals.append(eulers_form)
# # no 3 edge cycles
# three_cycles = [
# [4, 1, 5, 2],
# [3, 2, 5, 1],
# [3, 0, 5, 2],
# [2, 3, 1, 4],
# [5, 0, 3, 2],
# [2, 5, 0, 3],
# [1, 5, 0, 3],
# ]
# for path in three_cycles:
# path = self.trace_path(path, run_time = 1.3)
# self.remove(*path)
# self.wait(0.5)
# edges_faces = TextMobject("No 3 Edge Faces")
# edges_faces.shift(RIGHT * 4.5 + UP * 2.5)
# self.play(Write(edges_faces))
# self.wait(2.5)
# removals.append(edges_faces)
edges_faces = TextMobject("$3F \leq 2E$")
edges_faces.shift(RIGHT * 4.5 + UP * 1.5)
self.play(Write(edges_faces))
self.wait(3)
removals.append(edges_faces)
# E = 9
self.accent_edges()
edges_faces = TextMobject("$3F \leq 2*10$")
edges_faces.shift(RIGHT * 4.5 + UP * 0.5)
self.play(Write(edges_faces))
self.wait(1)
removals.append(edges_faces)
edges_faces = TextMobject("$F \leq \\frac{20}{3}$")
edges_faces.shift(RIGHT * 4.5 + DOWN * 0.5)
self.play(Write(edges_faces))
self.wait(2)
removals.append(edges_faces)
#thus 4f <= 2e gives f <= 3
#gives 5 <= 3 contradiction
contradiction = TextMobject("$7 = F \leq \\frac{20}{3}$")
contradiction.shift(DOWN * 2.5)
self.play(Write(contradiction))
self.wait(1.5)
removals.append(contradiction)
contradiction2 = TextMobject("$7 \leq \\frac{20}{3}$")
contradiction2.shift(DOWN * 2.5)
self.play(Transform(contradiction, contradiction2))
self.wait(4.5)
removals.append(contradiction2)
self.play(*[FadeOut(v) for v in removals + self.vertices + self.edges])
| 28.905085
| 85
| 0.52797
| 1,118
| 8,527
| 3.923971
| 0.112701
| 0.091178
| 0.049236
| 0.070207
| 0.832687
| 0.818327
| 0.807385
| 0.785503
| 0.76909
| 0.752222
| 0
| 0.059145
| 0.335757
| 8,527
| 295
| 86
| 28.905085
| 0.715395
| 0.119151
| 0
| 0.591398
| 0
| 0
| 0.056192
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021505
| false
| 0
| 0.048387
| 0
| 0.091398
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
33a0dc13be891f9faa3533c550557fe32149e360
| 36
|
py
|
Python
|
fpgaedu/shell/commands/__init__.py
|
fpgaedu/fpgaedu
|
da7b0c1871d8172243ee77156df8e6c8bb1006d1
|
[
"Apache-2.0"
] | null | null | null |
fpgaedu/shell/commands/__init__.py
|
fpgaedu/fpgaedu
|
da7b0c1871d8172243ee77156df8e6c8bb1006d1
|
[
"Apache-2.0"
] | null | null | null |
fpgaedu/shell/commands/__init__.py
|
fpgaedu/fpgaedu
|
da7b0c1871d8172243ee77156df8e6c8bb1006d1
|
[
"Apache-2.0"
] | null | null | null |
from .program import ProgramCommand
| 18
| 35
| 0.861111
| 4
| 36
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
33a822c266b5f102d01862ebed3a0d67d8882c0f
| 43,717
|
py
|
Python
|
niapy/algorithms/basic/pso.py
|
eltociear/NiaPy
|
7884aefec8f013d9f8db5c1af7080a61dd19a31d
|
[
"MIT"
] | null | null | null |
niapy/algorithms/basic/pso.py
|
eltociear/NiaPy
|
7884aefec8f013d9f8db5c1af7080a61dd19a31d
|
[
"MIT"
] | 1
|
2021-08-13T07:52:40.000Z
|
2021-08-16T08:52:20.000Z
|
niapy/algorithms/basic/pso.py
|
eltociear/NiaPy
|
7884aefec8f013d9f8db5c1af7080a61dd19a31d
|
[
"MIT"
] | 2
|
2021-08-08T08:29:53.000Z
|
2021-08-12T15:31:55.000Z
|
# encoding=utf8
"""Particle swarm algorithm module."""
import numpy as np
from niapy.algorithms.algorithm import Algorithm
from niapy.util import full_array
from niapy.util.repair import reflect
__all__ = [
'ParticleSwarmAlgorithm',
'ParticleSwarmOptimization',
'CenterParticleSwarmOptimization',
'MutatedParticleSwarmOptimization',
'MutatedCenterParticleSwarmOptimization',
'ComprehensiveLearningParticleSwarmOptimizer',
'MutatedCenterUnifiedParticleSwarmOptimization',
'OppositionVelocityClampingParticleSwarmOptimization'
]
class ParticleSwarmAlgorithm(Algorithm):
r"""Implementation of Particle Swarm Optimization algorithm.
Algorithm:
Particle Swarm Optimization algorithm
Date:
2018
Authors:
Lucija Brezočnik, Grega Vrbančič, Iztok Fister Jr. and Klemen Berkovič
License:
MIT
Reference paper:
Kennedy, J. and Eberhart, R. "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995.
Attributes:
Name (List[str]): List of strings representing algorithm names
c1 (float): Cognitive component.
c2 (float): Social component.
w (Union[float, numpy.ndarray[float]]): Inertial weight.
min_velocity (Union[float, numpy.ndarray[float]]): Minimal velocity.
max_velocity (Union[float, numpy.ndarray[float]]): Maximal velocity.
repair (Callable[[numpy.ndarray, numpy.ndarray, numpy.ndarray, Optional[numpy.random.Generator]], numpy.ndarray]): Repair method for velocity.
See Also:
* :class:`niapy.algorithms.Algorithm`
"""
Name = ['WeightedVelocityClampingParticleSwarmAlgorithm', 'WVCPSO']
@staticmethod
def info():
r"""Get basic information of algorithm.
Returns:
str: Basic information of algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.info`
"""
return r"""Kennedy, J. and Eberhart, R. "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995."""
def __init__(self, population_size=25, c1=2.0, c2=2.0, w=0.7, min_velocity=-1.5, max_velocity=1.5, repair=reflect,
*args, **kwargs):
"""Initialize ParticleSwarmAlgorithm.
Args:
population_size (int): Population size
c1 (float): Cognitive component.
c2 (float): Social component.
w (Union[float, numpy.ndarray]): Inertial weight.
min_velocity (Union[float, numpy.ndarray]): Minimal velocity.
max_velocity (Union[float, numpy.ndarray]): Maximal velocity.
repair (Callable[[np.ndarray, np.ndarray, np.ndarray, dict], np.ndarray]): Repair method for velocity.
See Also:
* :func:`niapy.algorithms.Algorithm.__init__`
"""
super().__init__(population_size, *args, **kwargs)
self.c1 = c1
self.c2 = c2
self.w = w
self.min_velocity = min_velocity
self.max_velocity = max_velocity
self.repair = repair
def set_parameters(self, population_size=25, c1=2.0, c2=2.0, w=0.7, min_velocity=-1.5, max_velocity=1.5,
repair=reflect, **kwargs):
r"""Set Particle Swarm Algorithm main parameters.
Args:
population_size (int): Population size
c1 (float): Cognitive component.
c2 (float): Social component.
w (Union[float, numpy.ndarray]): Inertial weight.
min_velocity (Union[float, numpy.ndarray]): Minimal velocity.
max_velocity (Union[float, numpy.ndarray]): Maximal velocity.
repair (Callable[[np.ndarray, np.ndarray, np.ndarray, dict], np.ndarray]): Repair method for velocity.
See Also:
* :func:`niapy.algorithms.Algorithm.set_parameters`
"""
super().set_parameters(population_size=population_size, **kwargs)
self.c1 = c1
self.c2 = c2
self.w = w
self.min_velocity = min_velocity
self.max_velocity = max_velocity
self.repair = repair
def get_parameters(self):
r"""Get value of parameters for this instance of algorithm.
Returns:
Dict[str, Union[int, float, numpy.ndarray]]: Dictionary which has parameters mapped to values.
See Also:
* :func:`niapy.algorithms.Algorithm.get_parameters`
"""
d = super().get_parameters()
d.update({
'c1': self.c1,
'c2': self.c2,
'w': self.w,
'min_velocity': self.min_velocity,
'max_velocity': self.max_velocity
})
return d
def init(self, task):
r"""Initialize dynamic arguments of Particle Swarm Optimization algorithm.
Args:
task (Task): Optimization task.
Returns:
Dict[str, Union[float, numpy.ndarray]]:
* w (numpy.ndarray): Inertial weight.
* min_velocity (numpy.ndarray): Minimal velocity.
* max_velocity (numpy.ndarray): Maximal velocity.
* v (numpy.ndarray): Initial velocity of particle.
"""
return {
'w': full_array(self.w, task.dimension),
'min_velocity': full_array(self.min_velocity, task.dimension),
'max_velocity': full_array(self.max_velocity, task.dimension),
'v': np.zeros((self.population_size, task.dimension))
}
def init_population(self, task):
r"""Initialize population and dynamic arguments of the Particle Swarm Optimization algorithm.
Args:
task: Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, list, dict]:
1. Initial population.
2. Initial population fitness/function values.
3. Additional arguments.
4. Additional keyword arguments:
* personal_best (numpy.ndarray): particles best population.
* personal_best_fitness (numpy.ndarray[float]): particles best positions function/fitness value.
* w (numpy.ndarray): Inertial weight.
* min_velocity (numpy.ndarray): Minimal velocity.
* max_velocity (numpy.ndarray): Maximal velocity.
* v (numpy.ndarray): Initial velocity of particle.
See Also:
* :func:`niapy.algorithms.Algorithm.init_population`
"""
pop, fpop, d = super().init_population(task)
d.update(self.init(task))
d.update({'personal_best': pop.copy(), 'personal_best_fitness': fpop.copy()})
return pop, fpop, d
def update_velocity(self, v, p, pb, gb, w, min_velocity, max_velocity, task, **kwargs):
r"""Update particle velocity.
Args:
v (numpy.ndarray): Current velocity of particle.
p (numpy.ndarray): Current position of particle.
pb (numpy.ndarray): Personal best position of particle.
gb (numpy.ndarray): Global best position of particle.
w (Union[float, numpy.ndarray]): Weights for velocity adjustment.
min_velocity (numpy.ndarray): Minimal velocity allowed.
max_velocity (numpy.ndarray): Maximal velocity allowed.
task (Task): Optimization task.
kwargs: Additional arguments.
Returns:
numpy.ndarray: Updated velocity of particle.
"""
return self.repair(
w * v + self.c1 * self.random(task.dimension) * (pb - p) + self.c2 * self.random(task.dimension) * (gb - p),
min_velocity, max_velocity)
def run_iteration(self, task, pop, fpop, xb, fxb, **params):
r"""Core function of Particle Swarm Optimization algorithm.
Args:
task (Task): Optimization task.
pop (numpy.ndarray): Current populations.
fpop (numpy.ndarray): Current population fitness/function values.
xb (numpy.ndarray): Current best particle.
fxb (float): Current best particle fitness/function value.
params (dict): Additional function keyword arguments.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, dict]:
1. New population.
2. New population fitness/function values.
3. New global best position.
4. New global best positions function/fitness value.
5. Additional arguments.
6. Additional keyword arguments:
* personal_best (numpy.ndarray): Particles best population.
* personal_best_fitness (numpy.ndarray[float]): Particles best positions function/fitness value.
* w (numpy.ndarray): Inertial weight.
* min_velocity (numpy.ndarray): Minimal velocity.
* max_velocity (numpy.ndarray): Maximal velocity.
* v (numpy.ndarray): Initial velocity of particle.
See Also:
* :class:`niapy.algorithms.algorithm.Algorithm.run_iteration`
"""
personal_best = params.pop('personal_best')
personal_best_fitness = params.pop('personal_best_fitness')
w = params.pop('w')
min_velocity = params.pop('min_velocity')
max_velocity = params.pop('max_velocity')
v = params.pop('v')
for i in range(len(pop)):
v[i] = self.update_velocity(v[i], pop[i], personal_best[i], xb, w, min_velocity, max_velocity, task)
pop[i] = task.repair(pop[i] + v[i], rng=self.rng)
fpop[i] = task.eval(pop[i])
if fpop[i] < personal_best_fitness[i]:
personal_best[i], personal_best_fitness[i] = pop[i].copy(), fpop[i]
if fpop[i] < fxb:
xb, fxb = pop[i].copy(), fpop[i]
return pop, fpop, xb, fxb, {'personal_best': personal_best, 'personal_best_fitness': personal_best_fitness,
'w': w, 'min_velocity': min_velocity, 'max_velocity': max_velocity, 'v': v}
class ParticleSwarmOptimization(ParticleSwarmAlgorithm):
r"""Implementation of Particle Swarm Optimization algorithm.
Algorithm:
Particle Swarm Optimization algorithm
Date:
2018
Authors:
Lucija Brezočnik, Grega Vrbančič, Iztok Fister Jr. and Klemen Berkovič
License:
MIT
Reference paper:
Kennedy, J. and Eberhart, R. "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995.
Attributes:
Name (List[str]): List of strings representing algorithm names
See Also:
* :class:`niapy.algorithms.basic.WeightedVelocityClampingParticleSwarmAlgorithm`
"""
Name = ['ParticleSwarmAlgorithm', 'PSO']
@staticmethod
def info():
r"""Get basic information of algorithm.
Returns:
str: Basic information of algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.info`
"""
return r"""Kennedy, J. and Eberhart, R. "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995."""
def __init__(self, *args, **kwargs):
"""Initialize ParticleSwarmOptimization."""
super().__init__(*args, **kwargs)
self.w = 1.0
self.min_velocity = -np.inf
self.max_velocity = np.inf
def set_parameters(self, **kwargs):
r"""Set core parameters of algorithm.
See Also:
* :func:`niapy.algorithms.basic.WeightedVelocityClampingParticleSwarmAlgorithm.set_parameters`
"""
kwargs.pop('w', None), kwargs.pop('vMin', None), kwargs.pop('vMax', None)
super().set_parameters(w=1, min_velocity=-np.inf, max_velocity=np.inf, **kwargs)
class OppositionVelocityClampingParticleSwarmOptimization(ParticleSwarmAlgorithm):
r"""Implementation of Opposition-Based Particle Swarm Optimization with Velocity Clamping.
Algorithm:
Opposition-Based Particle Swarm Optimization with Velocity Clamping
Date:
2019
Authors:
Klemen Berkovič
License:
MIT
Reference paper:
Shahzad, Farrukh, et al. "Opposition-based particle swarm optimization with velocity clamping (OVCPSO)." Advances in Computational Intelligence. Springer, Berlin, Heidelberg, 2009. 339-348
Attributes:
p0: Probability of opposite learning phase.
w_min: Minimum inertial weight.
w_max: Maximum inertial weight.
sigma: Velocity scaling factor.
See Also:
* :class:`niapy.algorithms.basic.ParticleSwarmAlgorithm`
"""
Name = ['OppositionVelocityClampingParticleSwarmOptimization', 'OVCPSO']
@staticmethod
def info():
r"""Get basic information of algorithm.
Returns:
str: Basic information of algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.info`
"""
return r"""Shahzad, Farrukh, et al. "Opposition-based particle swarm optimization with velocity clamping (OVCPSO)." Advances in Computational Intelligence. Springer, Berlin, Heidelberg, 2009. 339-348"""
def __init__(self, p0=.3, w_min=.4, w_max=.9, sigma=.1, c1=1.49612, c2=1.49612, *args, **kwargs):
"""Initialize OppositionVelocityClampingParticleSwarmOptimization.
Args:
p0 (float): Probability of running Opposite learning.
w_min (numpy.ndarray): Minimal value of weights.
w_max (numpy.ndarray): Maximum value of weights.
sigma (numpy.ndarray): Velocity range factor.
c1 (float): Cognitive component.
c2 (float): Social component.
See Also:
* :func:`niapy.algorithm.basic.ParticleSwarmAlgorithm.__init__`
"""
kwargs.pop('w', None)
super().__init__(w=w_max, c1=c1, c2=c2, *args, **kwargs)
self.p0 = p0
self.w_min = w_min
self.w_max = w_max
self.sigma = sigma
def set_parameters(self, p0=.3, w_min=.4, w_max=.9, sigma=.1, c1=1.49612, c2=1.49612, **kwargs):
r"""Set core algorithm parameters.
Args:
p0 (float): Probability of running Opposite learning.
w_min (numpy.ndarray): Minimal value of weights.
w_max (numpy.ndarray): Maximum value of weights.
sigma (numpy.ndarray): Velocity range factor.
c1 (float): Cognitive component.
c2 (float): Social component.
See Also:
* :func:`niapy.algorithm.basic.ParticleSwarmAlgorithm.set_parameters`
"""
kwargs.pop('w', None)
super().set_parameters(w=w_max, c1=c1, c2=c2, **kwargs)
self.p0 = p0
self.w_min = w_min
self.w_max = w_max
self.sigma = sigma
def get_parameters(self):
r"""Get value of parameters for this instance of algorithm.
Returns:
Dict[str, Union[int, float, numpy.ndarray]]: Dictionary which has parameters mapped to values.
See Also:
* :func:`niapy.algorithms.basic.ParticleSwarmAlgorithm.get_parameters`
"""
d = ParticleSwarmAlgorithm.get_parameters(self)
d.pop('min_velocity', None), d.pop('max_velocity', None)
d.update({
'p0': self.p0, 'w_min': self.w_min, 'w_max': self.w_max, 'sigma': self.sigma
})
return d
@staticmethod
def opposite_learning(s_l, s_h, pop, fpop, task):
r"""Run opposite learning phase.
Args:
s_l (numpy.ndarray): lower limit of opposite particles.
s_h (numpy.ndarray): upper limit of opposite particles.
pop (numpy.ndarray): Current populations positions.
fpop (numpy.ndarray): Current populations functions/fitness values.
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float]:
1. New particles position
2. New particles function/fitness values
3. New best position of opposite learning phase
4. new best function/fitness value of opposite learning phase
"""
s_r = s_l + s_h
s = np.asarray([s_r - e for e in pop])
s_f = np.asarray([task.eval(e) for e in s])
s, s_f = np.concatenate([pop, s]), np.concatenate([fpop, s_f])
sorted_indices = np.argsort(s_f)
return s[sorted_indices[:len(pop)]], s_f[sorted_indices[:len(pop)]], s[sorted_indices[0]], s_f[
sorted_indices[0]]
def init_population(self, task):
r"""Init starting population and dynamic parameters.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, list, dict]:
1. Initialized population.
2. Initialized populations function/fitness values.
3. Additional arguments.
4. Additional keyword arguments:
* personal_best (numpy.ndarray): particles best population.
* personal_best_fitness (numpy.ndarray[float]): particles best positions function/fitness value.
* vMin (numpy.ndarray): Minimal velocity.
* vMax (numpy.ndarray): Maximal velocity.
* V (numpy.ndarray): Initial velocity of particle.
* S_u (numpy.ndarray): upper bound for opposite learning.
* S_l (numpy.ndarray): lower bound for opposite learning.
"""
pop, fpop, d = super().init_population(task)
s_l, s_h = task.lower, task.upper
pop, fpop, _, _ = self.opposite_learning(s_l, s_h, pop, fpop, task)
pb_indices = np.where(fpop < d['personal_best_fitness'])
d['personal_best'][pb_indices], d['personal_best_fitness'][pb_indices] = pop[pb_indices], fpop[pb_indices]
d['min_velocity'], d['max_velocity'] = self.sigma * (task.upper - task.lower), self.sigma * (
task.lower - task.upper)
d.update({'s_l': s_l, 's_h': s_h})
return pop, fpop, d
def run_iteration(self, task, pop, fpop, xb, fxb, **params):
r"""Core function of Opposite-based Particle Swarm Optimization with velocity clamping algorithm.
Args:
task (Task): Optimization task.
pop (numpy.ndarray): Current population.
fpop (numpy.ndarray): Current populations function/fitness values.
xb (numpy.ndarray): Current global best position.
fxb (float): Current global best positions function/fitness value.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, list, dict]:
1. New population.
2. New populations function/fitness values.
3. New global best position.
4. New global best positions function/fitness value.
5. Additional arguments.
6. Additional keyword arguments:
* personal_best: particles best population.
* personal_best_fitness: particles best positions function/fitness value.
* min_velocity: Minimal velocity.
* max_velocity: Maximal velocity.
* v: Initial velocity of particle.
* s_h: upper bound for opposite learning.
* s_l: lower bound for opposite learning.
"""
personal_best = params.pop('personal_best')
personal_best_fitness = params.pop('personal_best_fitness')
min_velocity = params.pop('min_velocity')
max_velocity = params.pop('max_velocity')
v = params.pop('v')
s_l = params.pop('s_l')
s_h = params.pop('s_h')
if self.random() < self.p0:
pop, fpop, nb, fnb = self.opposite_learning(s_l, s_h, pop, fpop, task)
pb_indices = np.where(fpop < personal_best_fitness)
personal_best[pb_indices], personal_best_fitness[pb_indices] = pop[pb_indices], fpop[pb_indices]
if fnb < fxb:
xb, fxb = nb.copy(), fnb
else:
w = self.w_max - ((self.w_max - self.w_min) / task.max_iters) * (task.iters + 1)
for i in range(len(pop)):
v[i] = self.update_velocity(v[i], pop[i], personal_best[i], xb, w, min_velocity, max_velocity, task)
pop[i] = task.repair(pop[i] + v[i], rng=self.rng)
fpop[i] = task.eval(pop[i])
if fpop[i] < personal_best_fitness[i]:
personal_best[i], personal_best_fitness[i] = pop[i].copy(), fpop[i]
if fpop[i] < fxb:
xb, fxb = pop[i].copy(), fpop[i]
min_velocity, max_velocity = self.sigma * np.min(pop, axis=0), self.sigma * np.max(pop, axis=0)
return pop, fpop, xb, fxb, {'personal_best': personal_best, 'personal_best_fitness': personal_best_fitness,
'min_velocity': min_velocity,
'max_velocity': max_velocity, 'v': v, 's_l': s_l, 's_h': s_h}
class CenterParticleSwarmOptimization(ParticleSwarmAlgorithm):
r"""Implementation of Center Particle Swarm Optimization.
Algorithm:
Center Particle Swarm Optimization
Date:
2019
Authors:
Klemen Berkovič
License:
MIT
Reference paper:
H.-C. Tsai, Predicting strengths of concrete-type specimens using hybrid multilayer perceptrons with center-Unified particle swarm optimization, Adv. Eng. Softw. 37 (2010) 1104–1112.
See Also:
* :class:`niapy.algorithms.basic.WeightedVelocityClampingParticleSwarmAlgorithm`
"""
Name = ['CenterParticleSwarmOptimization', 'CPSO']
@staticmethod
def info():
r"""Get basic information of algorithm.
Returns:
str: Basic information of algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.info`
"""
return r"""H.-C. Tsai, Predicting strengths of concrete-type specimens using hybrid multilayer perceptrons with center-Unified particle swarm optimization, Adv. Eng. Softw. 37 (2010) 1104–1112."""
def __init__(self, *args, **kwargs):
"""Initialize CPSO."""
kwargs.pop('min_velocity', None), kwargs.pop('max_velocity', None)
super().__init__(min_velocity=-np.inf, max_velocity=np.inf, *args, **kwargs)
def set_parameters(self, **kwargs):
r"""Set core algorithm parameters.
Args:
**kwargs: Additional arguments.
See Also:
:func:`niapy.algorithm.basic.WeightedVelocityClampingParticleSwarmAlgorithm.set_parameters`
"""
kwargs.pop('min_velocity', None), kwargs.pop('max_velocity', None)
super().set_parameters(min_velocity=-np.inf, max_velocity=np.inf, **kwargs)
def get_parameters(self):
r"""Get value of parameters for this instance of algorithm.
Returns:
Dict[str, Union[int, float, numpy.ndarray]]: Dictionary which has parameters mapped to values.
See Also:
* :func:`niapy.algorithms.basic.ParticleSwarmAlgorithm.get_parameters`
"""
d = super().get_parameters()
d.pop('min_velocity', None), d.pop('max_velocity', None)
return d
def run_iteration(self, task, pop, fpop, xb, fxb, **params):
r"""Core function of algorithm.
Args:
task (Task): Optimization task.
pop (numpy.ndarray): Current population of particles.
fpop (numpy.ndarray): Current particles function/fitness values.
xb (numpy.ndarray): Current global best particle.
fxb (numpy.float): Current global best particles function/fitness value.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, dict]:
1. New population of particles.
2. New populations function/fitness values.
3. New global best particle.
4. New global best particle function/fitness value.
5. Additional arguments.
6. Additional keyword arguments.
See Also:
* :func:`niapy.algorithm.basic.WeightedVelocityClampingParticleSwarmAlgorithm.run_iteration`
"""
pop, fpop, xb, fxb, d = super().run_iteration(task, pop, fpop, xb, fxb, **params)
c = np.sum(pop, axis=0) / len(pop)
fc = task.eval(c)
if fc <= fxb:
xb, fxb = c, fc
return pop, fpop, xb, fxb, d
class MutatedParticleSwarmOptimization(ParticleSwarmAlgorithm):
r"""Implementation of Mutated Particle Swarm Optimization.
Algorithm:
Mutated Particle Swarm Optimization
Date:
2019
Authors:
Klemen Berkovič
License:
MIT
Reference paper:
H. Wang, C. Li, Y. Liu, S. Zeng, a hybrid particle swarm algorithm with cauchy mutation, Proceedings of the 2007 IEEE Swarm Intelligence Symposium (2007) 356–360.
Attributes:
num_mutations (int): Number of mutations of global best particle.
See Also:
* :class:`niapy.algorithms.basic.WeightedVelocityClampingParticleSwarmAlgorithm`
"""
Name = ['MutatedParticleSwarmOptimization', 'MPSO']
@staticmethod
def info():
r"""Get basic information of algorithm.
Returns:
str: Basic information of algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.info`
"""
return r"""H. Wang, C. Li, Y. Liu, S. Zeng, a hybrid particle swarm algorithm with cauchy mutation, Proceedings of the 2007 IEEE Swarm Intelligence Symposium (2007) 356–360."""
def __init__(self, num_mutations=10, *args, **kwargs):
"""Initialize MPSO."""
kwargs.pop('min_velocity', None), kwargs.pop('max_velocity', None)
super().__init__(min_velocity=-np.inf, max_velocity=np.inf, *args, **kwargs)
self.num_mutations = num_mutations
def set_parameters(self, num_mutations=10, **kwargs):
r"""Set core algorithm parameters.
Args:
num_mutations (int): Number of mutations of global best particle.
**kwargs: Additional arguments.
See Also:
* :func:`niapy.algorithm.basic.WeightedVelocityClampingParticleSwarmAlgorithm.set_parameters`
"""
kwargs.pop('min_velocity', None), kwargs.pop('max_velocity', None)
ParticleSwarmAlgorithm.set_parameters(self, min_velocity=-np.inf, max_velocity=np.inf, **kwargs)
self.num_mutations = num_mutations
def get_parameters(self):
r"""Get value of parameters for this instance of algorithm.
Returns:
Dict[str, Union[int, float, numpy.ndarray]]: Dictionary which has parameters mapped to values.
See Also:
* :func:`niapy.algorithms.basic.ParticleSwarmAlgorithm.get_parameters`
"""
d = ParticleSwarmAlgorithm.get_parameters(self)
d.pop('min_velocity', None), d.pop('max_velocity', None)
d.update({'num_mutations': self.num_mutations})
return d
def run_iteration(self, task, pop, fpop, xb, fxb, **params):
r"""Core function of algorithm.
Args:
task (Task): Optimization task.
pop (numpy.ndarray): Current population of particles.
fpop (numpy.ndarray): Current particles function/fitness values.
xb (numpy.ndarray): Current global best particle.
fxb (float): Current global best particles function/fitness value.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, list, dict]:
1. New population of particles.
2. New populations function/fitness values.
3. New global best particle.
4. New global best particle function/fitness value.
5. Additional arguments.
6. Additional keyword arguments.
See Also:
* :func:`niapy.algorithm.basic.WeightedVelocityClampingParticleSwarmAlgorithm.run_iteration`
"""
pop, fpop, xb, fxb, d = ParticleSwarmAlgorithm.run_iteration(self, task, pop, fpop, xb, fxb, **params)
v = d['v']
v_a = (np.sum(v, axis=0) / len(v))
v_a = v_a / np.max(np.abs(v_a))
for _ in range(self.num_mutations):
g = task.repair(xb + v_a * self.uniform(task.lower, task.upper), self.rng)
fg = task.eval(g)
if fg <= fxb:
xb, fxb = g, fg
return pop, fpop, xb, fxb, d
class MutatedCenterParticleSwarmOptimization(CenterParticleSwarmOptimization):
r"""Implementation of Mutated Particle Swarm Optimization.
Algorithm:
Mutated Center Particle Swarm Optimization
Date:
2019
Authors:
Klemen Berkovič
License:
MIT
Reference paper:
TODO find one
Attributes:
num_mutations (int): Number of mutations of global best particle.
See Also:
* :class:`niapy.algorithms.basic.CenterParticleSwarmOptimization`
"""
Name = ['MutatedCenterParticleSwarmOptimization', 'MCPSO']
@staticmethod
def info():
r"""Get basic information of algorithm.
Returns:
str: Basic information of algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.info`
"""
return r"""TODO find one"""
def __init__(self, num_mutations=10, *args, **kwargs):
"""Initialize MCPSO."""
kwargs.pop('min_velocity', None), kwargs.pop('max_velocity', None)
super().__init__(min_velocity=-np.inf, max_velocity=np.inf, *args, **kwargs)
self.num_mutations = num_mutations
def set_parameters(self, num_mutations=10, **kwargs):
r"""Set core algorithm parameters.
Args:
num_mutations (int): Number of mutations of global best particle.
**kwargs: Additional arguments.
See Also:
* :func:`niapy.algorithm.basic.CenterParticleSwarmOptimization.set_parameters`
"""
kwargs.pop('min_velocity', None), kwargs.pop('max_velocity', None)
ParticleSwarmAlgorithm.set_parameters(self, min_velocity=-np.inf, max_velocity=np.inf, **kwargs)
self.num_mutations = num_mutations
def get_parameters(self):
r"""Get value of parameters for this instance of algorithm.
Returns:
Dict[str, Union[int, float, numpy.ndarray]]: Dictionary which has parameters mapped to values.
See Also:
* :func:`niapy.algorithms.basic.CenterParticleSwarmOptimization.get_parameters`
"""
d = CenterParticleSwarmOptimization.get_parameters(self)
d.update({'num_mutations': self.num_mutations})
return d
def run_iteration(self, task, pop, fpop, xb, fxb, **params):
r"""Core function of algorithm.
Args:
task (Task): Optimization task.
pop (numpy.ndarray): Current population of particles.
fpop (numpy.ndarray): Current particles function/fitness values.
xb (numpy.ndarray): Current global best particle.
fxb (float: Current global best particles function/fitness value.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, list, dict]:
1. New population of particles.
2. New populations function/fitness values.
3. New global best particle.
4. New global best particle function/fitness value.
5. Additional arguments.
6. Additional keyword arguments.
See Also:
* :func:`niapy.algorithm.basic.WeightedVelocityClampingParticleSwarmAlgorithm.run_iteration`
"""
pop, fpop, xb, fxb, d = CenterParticleSwarmOptimization.run_iteration(self, task, pop, fpop, xb, fxb, **params)
v = d['v']
v_a = (np.sum(v, axis=0) / len(v))
v_a = v_a / np.max(np.abs(v_a))
for _ in range(self.num_mutations):
g = task.repair(xb + v_a * self.uniform(task.lower, task.upper), self.rng)
fg = task.eval(g)
if fg <= fxb:
xb, fxb = g, fg
return pop, fpop, xb, fxb, d
class MutatedCenterUnifiedParticleSwarmOptimization(MutatedCenterParticleSwarmOptimization):
r"""Implementation of Mutated Particle Swarm Optimization.
Algorithm:
Mutated Center Unified Particle Swarm Optimization
Date:
2019
Authors:
Klemen Berkovič
License:
MIT
Reference paper:
Tsai, Hsing-Chih. "Unified particle swarm delivers high efficiency to particle swarm optimization." Applied Soft Computing 55 (2017): 371-383.
Attributes:
Name (List[str]): Names of algorithm.
See Also:
* :class:`niapy.algorithms.basic.CenterParticleSwarmOptimization`
"""
Name = ['MutatedCenterUnifiedParticleSwarmOptimization', 'MCUPSO']
@staticmethod
def info():
r"""Get basic information of algorithm.
Returns:
str: Basic information of algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.info`
"""
return r"""Tsai, Hsing-Chih. "Unified particle swarm delivers high efficiency to particle swarm optimization." Applied Soft Computing 55 (2017): 371-383."""
def update_velocity(self, v, p, pb, gb, w, min_velocity, max_velocity, task, **kwargs):
r"""Update particle velocity.
Args:
v (numpy.ndarray): Current velocity of particle.
p (numpy.ndarray): Current position of particle.
pb (numpy.ndarray): Personal best position of particle.
gb (numpy.ndarray): Global best position of particle.
w (numpy.ndarray): Weights for velocity adjustment.
min_velocity (numpy.ndarray): Minimal velocity allowed.
max_velocity (numpy.ndarray): Maximal velocity allowed.
task (Task): Optimization task.
kwargs (dict): Additional arguments.
Returns:
numpy.ndarray: Updated velocity of particle.
"""
r3 = self.random(task.dimension)
return self.repair(
w * v + self.c1 * self.random(task.dimension) * (pb - p) * r3 + self.c2 * self.random(task.dimension) * (
gb - p) * (1 - r3),
min_velocity, max_velocity)
class ComprehensiveLearningParticleSwarmOptimizer(ParticleSwarmAlgorithm):
r"""Implementation of Mutated Particle Swarm Optimization.
Algorithm:
Comprehensive Learning Particle Swarm Optimizer
Date:
2019
Authors:
Klemen Berkovič
License:
MIT
Reference paper:
J. J. Liang, a. K. Qin, P. N. Suganthan and S. Baskar, "Comprehensive learning particle swarm optimizer for global optimization of multimodal functions," in IEEE Transactions on Evolutionary Computation, vol. 10, no. 3, pp. 281-295, June 2006. doi: 10.1109/TEVC.2005.857610
Reference URL:
http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1637688&isnumber=34326
Attributes:
w0 (float): Inertia weight.
w1 (float): Inertia weight.
c (float): Velocity constant.
m (int): Refresh rate.
See Also:
* :class:`niapy.algorithms.basic.ParticleSwarmAlgorithm`
"""
Name = ['ComprehensiveLearningParticleSwarmOptimizer', 'CLPSO']
@staticmethod
def info():
r"""Get basic information of algorithm.
Returns:
str: Basic information of algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.info`
"""
return r"""J. J. Liang, a. K. Qin, P. N. Suganthan and S. Baskar, "Comprehensive learning particle swarm optimizer for global optimization of multimodal functions," in IEEE Transactions on Evolutionary Computation, vol. 10, no. 3, pp. 281-295, June 2006. doi: 10.1109/TEVC.2005.857610 """
def __init__(self, m=10, w0=.9, w1=.4, c=1.49445, *args, **kwargs):
"""Initialize CLPSO."""
super().__init__(*args, **kwargs)
self.m = m
self.w0 = w0
self.w1 = w1
self.c = c
def set_parameters(self, m=10, w0=.9, w1=.4, c=1.49445, **kwargs):
r"""Set Particle Swarm Algorithm main parameters.
Args:
w0 (int): Inertia weight.
w1 (float): Inertia weight.
c (float): Velocity constant.
m (float): Refresh rate.
kwargs (dict): Additional arguments
See Also:
* :func:`niapy.algorithms.basic.ParticleSwarmAlgorithm.set_parameters`
"""
ParticleSwarmAlgorithm.set_parameters(self, **kwargs)
self.m = m
self.w0 = w0
self.w1 = w1
self.c = c
def get_parameters(self):
r"""Get value of parameters for this instance of algorithm.
Returns:
Dict[str, Union[int, float, numpy.ndarray]]: Dictionary which has parameters mapped to values.
See Also:
* :func:`niapy.algorithms.basic.ParticleSwarmAlgorithm.get_parameters`
"""
d = ParticleSwarmAlgorithm.get_parameters(self)
d.update({
'm': self.m,
'w0': self.w0,
'w1': self.w1,
'c': self.c
})
return d
def init(self, task):
r"""Initialize dynamic arguments of Particle Swarm Optimization algorithm.
Args:
task (Task): Optimization task.
Returns:
Dict[str, numpy.ndarray]:
* vMin: Minimal velocity.
* vMax: Maximal velocity.
* V: Initial velocity of particle.
* flag: Refresh gap counter.
"""
return {'min_velocity': full_array(self.min_velocity, task.dimension),
'max_velocity': full_array(self.max_velocity, task.dimension),
'v': np.full([self.population_size, task.dimension], 0.0), 'flag': np.full(self.population_size, 0),
'pc': np.asarray(
[.05 + .45 * (np.exp(10 * (i - 1) / (self.population_size - 1)) - 1) / (np.exp(10) - 1) for i in
range(self.population_size)])}
def generate_personal_best_cl(self, i, pc, personal_best, personal_best_fitness):
r"""Generate new personal best position for learning.
Args:
i (int): Current particle.
pc (float): Learning probability.
personal_best (numpy.ndarray): Personal best positions for population.
personal_best_fitness (numpy.ndarray): Personal best positions function/fitness values for personal best position.
Returns:
numpy.ndarray: Personal best for learning.
"""
pbest = []
for j in range(len(personal_best[i])):
if self.random() > pc:
pbest.append(personal_best[i, j])
else:
r1, r2 = int(self.random() * len(personal_best)), int(self.random() * len(personal_best))
if personal_best_fitness[r1] < personal_best_fitness[r2]:
pbest.append(personal_best[r1, j])
else:
pbest.append(personal_best[r2, j])
return np.asarray(pbest)
def update_velocity_cl(self, v, p, pb, w, min_velocity, max_velocity, task, **_kwargs):
r"""Update particle velocity.
Args:
v (numpy.ndarray): Current velocity of particle.
p (numpy.ndarray): Current position of particle.
pb (numpy.ndarray): Personal best position of particle.
w (numpy.ndarray): Weights for velocity adjustment.
min_velocity (numpy.ndarray): Minimal velocity allowed.
max_velocity (numpy.ndarray): Maximal velocity allowed.
task (Task): Optimization task.
Returns:
numpy.ndarray: Updated velocity of particle.
"""
return self.repair(w * v + self.c * self.random(task.dimension) * (pb - p), min_velocity, max_velocity)
def run_iteration(self, task, pop, fpop, xb, fxb, **params):
r"""Core function of algorithm.
Args:
task (Task): Optimization task.
pop (numpy.ndarray): Current populations.
fpop (numpy.ndarray): Current population fitness/function values.
xb (numpy.ndarray): Current best particle.
fxb (float): Current best particle fitness/function value.
params (dict): Additional function keyword arguments.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, list, dict]:
1. New population.
2. New population fitness/function values.
3. New global best position.
4. New global best positions function/fitness value.
5. Additional arguments.
6. Additional keyword arguments:
* personal_best: Particles best population.
* personal_best_fitness: Particles best positions function/fitness value.
* min_velocity: Minimal velocity.
* max_velocity: Maximal velocity.
* V: Initial velocity of particle.
* flag: Refresh gap counter.
* pc: Learning rate.
See Also:
* :class:`niapy.algorithms.basic.ParticleSwarmAlgorithm.run_iteration`
"""
personal_best = params.pop('personal_best')
personal_best_fitness = params.pop('personal_best_fitness')
min_velocity = params.pop('min_velocity')
max_velocity = params.pop('max_velocity')
v = params.pop('v')
flag = params.pop('flag')
pc = params.pop('pc')
w = self.w0 * (self.w0 - self.w1) * (task.iters + 1) / task.max_iters
for i in range(len(pop)):
if flag[i] >= self.m:
v[i] = self.update_velocity(v[i], pop[i], personal_best[i], xb, 1, min_velocity, max_velocity, task)
pop[i] = task.repair(pop[i] + v[i], rng=self.rng)
fpop[i] = task.eval(pop[i])
if fpop[i] < personal_best_fitness[i]:
personal_best[i], personal_best_fitness[i] = pop[i].copy(), fpop[i]
if fpop[i] < fxb:
xb, fxb = pop[i].copy(), fpop[i]
flag[i] = 0
pbest = self.generate_personal_best_cl(i, pc[i], personal_best, personal_best_fitness)
v[i] = self.update_velocity_cl(v[i], pop[i], pbest, w, min_velocity, max_velocity, task)
pop[i] = pop[i] + v[i]
if task.is_feasible(pop[i]):
fpop[i] = task.eval(pop[i])
if fpop[i] < personal_best_fitness[i]:
personal_best[i], personal_best_fitness[i] = pop[i].copy(), fpop[i]
if fpop[i] < fxb:
xb, fxb = pop[i].copy(), fpop[i]
return pop, fpop, xb, fxb, {'personal_best': personal_best, 'personal_best_fitness': personal_best_fitness,
'min_velocity': min_velocity,
'max_velocity': max_velocity, 'v': v, 'flag': flag, 'pc': pc}
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 37.981755
| 296
| 0.607635
| 4,920
| 43,717
| 5.295528
| 0.07561
| 0.056652
| 0.025524
| 0.016581
| 0.828472
| 0.793391
| 0.77182
| 0.754625
| 0.718316
| 0.690796
| 0
| 0.016135
| 0.288309
| 43,717
| 1,150
| 297
| 38.014783
| 0.821136
| 0.49061
| 0
| 0.513228
| 0
| 0.018519
| 0.149802
| 0.043887
| 0
| 0
| 0
| 0.00087
| 0
| 1
| 0.113757
| false
| 0
| 0.010582
| 0
| 0.243386
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
33a9925191b7fe6aac888b8349e3deacf7323443
| 222
|
py
|
Python
|
confplot/__init__.py
|
fcakyon/confplot
|
93e777aae4ace838a82c2f2420d489e7bf04b960
|
[
"Apache-2.0"
] | 5
|
2020-05-31T01:17:53.000Z
|
2022-02-09T06:17:48.000Z
|
confplot/__init__.py
|
fcakyon/confplot
|
93e777aae4ace838a82c2f2420d489e7bf04b960
|
[
"Apache-2.0"
] | 1
|
2022-01-06T21:48:30.000Z
|
2022-01-09T11:16:43.000Z
|
confplot/__init__.py
|
fcakyon/confplot
|
93e777aae4ace838a82c2f2420d489e7bf04b960
|
[
"Apache-2.0"
] | 1
|
2021-11-20T00:06:33.000Z
|
2021-11-20T00:06:33.000Z
|
from __future__ import absolute_import
__version__ = "0.1.1"
from confplot.confplot import plot_confusion_matrix_from_data
from confplot.confplot import pretty_plot_confusion_matrix as plot_confusion_matrix_from_matrix
| 27.75
| 95
| 0.878378
| 32
| 222
| 5.46875
| 0.4375
| 0.222857
| 0.325714
| 0.297143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014851
| 0.09009
| 222
| 7
| 96
| 31.714286
| 0.851485
| 0
| 0
| 0
| 0
| 0
| 0.022523
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
33b82c25751f0bb0ed5321e98a57dbb462544597
| 32,983
|
py
|
Python
|
core/quality_factors.py
|
lvikt/ekostat_calculator
|
499e3ad6c5c1ef757a854ab00b08a4a28d5866a8
|
[
"MIT"
] | 1
|
2017-08-29T06:44:22.000Z
|
2017-08-29T06:44:22.000Z
|
core/quality_factors.py
|
lvikt/ekostat_calculator
|
499e3ad6c5c1ef757a854ab00b08a4a28d5866a8
|
[
"MIT"
] | null | null | null |
core/quality_factors.py
|
lvikt/ekostat_calculator
|
499e3ad6c5c1ef757a854ab00b08a4a28d5866a8
|
[
"MIT"
] | 4
|
2017-08-23T14:08:35.000Z
|
2019-06-13T12:09:30.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 12 14:43:35 2017
@author: a001985
"""
import os
import core
import numpy as np
import pandas as pd
from functools import reduce
import re
###############################################################################
class ClassificationResult(dict):
"""
Class to hold result from a classification.
Jag kopierade denna från indicators.py
"""
def __init__(self):
super().__init__()
self['qualityfactor'] = None
self['type_area'] = None
self['status'] = None
self['EQR'] = None
self['qf_EQR'] = None
self['all_ok'] = False
self._set_attributes()
#==========================================================================
def _set_attributes(self):
for key in self.keys():
setattr(self, key, self[key])
#==========================================================================
def add_info(self, key, value):
self[key] = value
setattr(self, key, value)
###############################################################################
class QualityElementBase(object):
"""
Class to hold general information about quality factors.
"""
def __init__(self, subset_uuid, parent_workspace_object, quality_element):
self.name = ''
self.name = quality_element.lower()
print('********')
print(self.name)
self.class_result = None
self.subset = subset_uuid
self.step = 'step_3'
# from workspace
self.parent_workspace_object = parent_workspace_object
self.mapping_objects = self.parent_workspace_object.mapping_objects
self.index_handler = self.parent_workspace_object.index_handler
self.step_object = self.parent_workspace_object.get_step_object(step = 3, subset = self.subset)
self.wb_id_header = self.parent_workspace_object.wb_id_header
#paths and saving
self.result_directory = self.step_object.paths['step_directory']+'/output/results/'
self.sld = core.SaveLoadDelete(self.result_directory)
# from SettingsFile
self.tolerance_settings = self.parent_workspace_object.get_step_object(step = 2, subset = self.subset).get_indicator_tolerance_settings(self.name)
# To be read from config-file
self.indicator_list = list(self.parent_workspace_object.mapping_objects['quality_element'].get_indicator_list_for_quality_element(self.name.split('_')[0]))
if len(self.name.split('_')[0]) > 1:
self.indicator_list = self.indicator_list + list(self.parent_workspace_object.mapping_objects['quality_element'].get_indicator_list_for_quality_element(self.name))
self._load_indicator_results()
# perform checks before continuing
self._check()
self._set_directories()
#self._load_indicators()
#self.indicator_list = []
self.class_result = None
#==========================================================================
def _check(self):
pass
#==========================================================================
def _load_indicator_results(self):
self.indicator_dict = {}
# TODO update resultfilenames and here!
for indicator in self.indicator_list:
if not os.path.exists(self.sld.directory + indicator + '-by_period.pkl') or not os.path.exists(self.sld.directory +indicator + '-by_period.txt'):
# raise core.exceptions.NoResultsForIndicator()
pass #self.indicator_dict[indicator] = False
else:
self.indicator_dict[indicator] = self.sld.load_df(file_name = indicator + '-by_period')
# print('No status results for {}. Cannot calculate status without it'.format(indicator))
#==========================================================================
def _set_directories(self):
#set paths
self.paths = {}
self.paths['output'] = self.step_object.paths['directory_paths']['output']
self.paths['results'] = self.step_object.paths['directory_paths']['results']
#==========================================================================
def calculate_quality_factor(self):
"""
Updated 20180920 by Magnus
Calculates quality element based on included indicators
GAMLA FÖRESKRIFTEN
Ett medelvärde av de numeriska klassningarna (Nklass) beräknas för
DIN, DIP, tot-N, tot-P under vintern och ett medelvärde för tot-N, tot-P under sommaren.
Därefter beräknas medelvärdet av sommar och vinter, vilket blir den sammanvägda klassificeringen av näringsämnen.
NYA FÖRESKRIFTEN
Ett medelvärde av de numeriska klasserna (global_EQR) beräknas separat för N och P. Först ett medelvärde för vintern
(N_vinter = medel(din_vinter, ntot_vinter) reps P_vinter = medel(dip_vinter, ptot_vinter)).
Sedan beräknas medelvärde för N_vinter och ntot_summer respektive P_vinter och ptot_summer och efter det medelvärde av N och P,
vilket blir den sammanvägda klassificeringen av näringsämnen.
Statusklassificeringen avgörs av medelvärdet för den numeriska klassningen enligt tabell 2.1, ett värde 0-1.
Dessa värden kan sedan jämföras med övriga kvalitetsfaktorer och ingå i sammansvägningen.
"""
###### Results #####
# how keyword:
# - outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically
# - inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys
# TODO: replace merge by join?
merge_on = [self.wb_id_header, 'WATER_BODY_NAME', 'WATER_TYPE_AREA']
def mean_of_indicators(indicator_name):
parameters = self.mapping_objects['quality_element'].indicator_config.loc[indicator_name]['parameters'].split(', ')
if 'indicator_' not in parameters[0]:
# if 'qe_' not in parameters[0]:
return False
if not all([par in self.indicator_dict.keys() for par in parameters]):
return False
if len(parameters) == 2:
mean_of_indicators = self.indicator_dict[parameters[0]].\
merge(self.indicator_dict[parameters[1]], on=merge_on, how='inner', copy=True,
suffixes=['_' + par for par in parameters])
mean_of_indicators['ok_'+indicator_name] = \
mean_of_indicators['ok_' + parameters[0]] | mean_of_indicators['ok_' + parameters[1]]
mean_of_indicators['global_EQR_'+indicator_name] = \
mean_of_indicators[['global_EQR' + '_' +parameters[0], 'global_EQR' + '_' +
parameters[1]]].mean(axis=1, skipna=False)
mean_of_indicators['STATUS_'+indicator_name] = \
mean_of_indicators['global_EQR_'+indicator_name].apply(lambda x: self.get_status_from_global_EQR(x))
self.indicator_dict[indicator_name] = mean_of_indicators
if len(parameters) == 4:
mean_of_indicators1 = self.indicator_dict[parameters[0]].\
merge(self.indicator_dict[parameters[1]], on=merge_on, how='inner', copy=True,
suffixes=['_' + par for par in parameters[:2]])
mean_of_indicators2 = self.indicator_dict[parameters[2]].\
merge(self.indicator_dict[parameters[3]], on=merge_on, how='inner', copy=True,
suffixes = ['_' + par for par in parameters[2:]])
mean_of_indicators = mean_of_indicators1.merge(mean_of_indicators2, on=merge_on, how='inner', copy=True)
mean_of_indicators['ok_'+indicator_name] = \
mean_of_indicators['ok_' + parameters[0]] | \
mean_of_indicators['ok_' + parameters[1]] | \
mean_of_indicators['ok_' + parameters[2]] | mean_of_indicators['ok_' + parameters[3]]
mean_of_indicators['global_EQR_'+indicator_name] = \
mean_of_indicators[['global_EQR' + '_' + parameters[0],'global_EQR' +'_' +
parameters[1], 'global_EQR' + '_' + parameters[2],'global_EQR' + '_' + parameters[3]]].mean(axis = 1, skipna = False)
mean_of_indicators['STATUS_'+indicator_name] = \
mean_of_indicators['global_EQR_'+indicator_name].apply(lambda x: self.get_status_from_global_EQR(x))
self.indicator_dict[indicator_name] = mean_of_indicators
elif len(parameters) == 1:
col_list = list(self.indicator_dict[parameters[0]].columns)
[col_list.remove(r) for r in merge_on]
{k: k+'_'+parameters[0] for k in col_list}
self.indicator_dict[indicator_name] = self.indicator_dict[parameters[0]].\
rename(columns={k: k+'_'+indicator_name for k in col_list})
return True
def cut_results(df, indicator_name):
#pick out columns for only this indicator
these_cols = [col for col in df.columns if re.search(indicator_name + r'$', col)]
# return df[these_cols + merge_on].rename(columns = {col: col.strip(indicator_name) for col in these_cols})
return df[these_cols + merge_on].rename(columns={col: col.replace('_'+indicator_name, '') for col in these_cols})
for indicator in self.mapping_objects['quality_element'].indicator_config.index:
if self.mapping_objects['quality_element'].indicator_config.loc[indicator]['quality element'] == self.name:
# calculate mean for the included sub-indicators
if mean_of_indicators(indicator):
df = cut_results(self.indicator_dict[indicator], indicator)
self.sld.save_df(df, indicator + '-by_period')
if 'qe_'+self.name in self.indicator_dict.keys():
self.sld.save_df(self.indicator_dict['qe_'+self.name], self.name+'_all_results')
#==========================================================================
def get_status_from_global_EQR(self, global_EQR):
if global_EQR >= 0.8:
return 'HIGH'
elif global_EQR >= 0.6:
return 'GOOD'
elif global_EQR >= 0.4:
return 'MODERATE'
elif global_EQR >= 0.2:
return 'POOR'
elif global_EQR >= 0:
return 'BAD'
else:
return ''
###############################################################################
class QualityElementNutrientsWinterSummer(QualityElementBase):
"""
Class calculate the quality factor for Nutrients.
"""
def __init__(self, subset_uuid, parent_workspace_object, quality_element):
super().__init__(subset_uuid, parent_workspace_object, quality_element)
#==========================================================================
def calculate_quality_factor(self):
"""
5) EK vägs samman för ingående parametrar (tot-N, tot-P, DIN och DIP) för slutlig statusklassificering av
hela kvalitetsfaktorn Näringsämnen. Utförs enligt föreskrift där
- vinter numerisk klass för TN, DIN och TP, DIP vägs samman till vinter numeriskklass
- sommar numerisk klass för TN och TP vägs samman till sommar numeriskklass
- numeriskklass för sommar och vinter vägs samman till numeriskklass för kvalitetsfaktorn
"""
"""
GAMLA FÖRESKRIFTEN
Ett medelvärde av de numeriska klassningarna (Nklass) beräknas för
DIN, DIP, tot-N, tot-P under vintern och ett medelvärde för tot-N, tot-P under sommaren.
Därefter beräknas medelvärdet av sommar och vinter, vilket blir den sammanvägda klassificeringen av näringsämnen.
NYA FÖRESKRIFTEN
Ett medelvärde av de numeriska klasserna (global_EQR) beräknas separat för N och P. Först ett medelvärde för vintern
(N_vinter = medel(din_vinter, ntot_vinter) reps P_vinter = medel(dip_vinter, ptot_vinter)).
Sedan beräknas medelvärde för N_vinter och ntot_summer respektive P_vinter och ptot_summer och efter det medelvärde av N och P,
vilket blir den sammanvägda klassificeringen av näringsämnen.
Statusklassificeringen avgörs av medelvärdet för den numeriska klassningen enligt tabell 2.1, ett värde 0-1.
Dessa värden kan sedan jämföras med övriga kvalitetsfaktorer och ingå i sammansvägningen.
"""
###### Results #####
# how keyword:
# - outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically
# - inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys
# TODO: replace merge by join?
merge_on = ['VISS_EU_CD', 'WATER_BODY_NAME', 'WATER_TYPE_AREA']
# for indicator in self.indicator_list:
# col_list = list(self.indicator_dict[indicator].columns)
# [col_list.remove(r) for r in merge_on]
# {k: k+'_'+indicator for k in col_list}
# self.indicator_dict[indicator].rename(columns = {k: k+'_'+indicator for k in col_list}, inplace = True)
#
def mean_of_indicators(indicator_name):
# print(self.mapping_objects['quality_element'].indicator_config.loc[indicator_name]['parameters'])
parameters = self.mapping_objects['quality_element'].indicator_config.loc[indicator_name]['parameters'].split(', ')
if 'indicator_' not in parameters[0]:
if 'qe_' not in parameters[0]:
return False
# print(indicator_name, parameters)
if not all([par in self.indicator_dict.keys() for par in parameters]):
return False
if len(parameters) == 2:
# print(self.indicator_dict[parameters[0]].columns)
mean_of_indicators = self.indicator_dict[parameters[0]].merge(self.indicator_dict[parameters[1]], on = merge_on, how = 'inner', copy=True, suffixes = ['_' + par for par in parameters])
# print('columns 1 merge', mean_of_indicators.columns)
mean_of_indicators['global_EQR_'+indicator_name] = mean_of_indicators[['global_EQR' + '_' + parameters[0],'global_EQR' +'_' + parameters[1]]].mean(axis = 1, skipna = False)
mean_of_indicators['STATUS_'+indicator_name] = mean_of_indicators['global_EQR_'+indicator_name].apply(lambda x: self.get_status_from_global_EQR(x))
# print(mean_of_indicators.loc[mean_of_indicators['VISS_EU_CD'] == 'SE622500-172430'][['global_EQR_'+indicator_name, 'STATUS_'+indicator_name, 'global_EQR_indicator_dip_winter', 'global_EQR_indicator_ptot_winter']])
# print('columns 2', mean_of_indicators.columns)
self.indicator_dict[indicator_name] = mean_of_indicators
# self.sld.save_df(mean_of_indicators, indicator_name)
elif len(parameters) == 1:
col_list = list(self.indicator_dict[parameters[0]].columns)
[col_list.remove(r) for r in merge_on]
{k: k+'_'+parameters[0] for k in col_list}
self.indicator_dict[indicator_name] = self.indicator_dict[parameters[0]].rename(columns = {k: k+'_'+indicator_name for k in col_list})
# self.sld.save_df(self.indicator_dict[indicator_name], indicator_name)
return True
def cut_results(df, indicator_name):
#pick out columns for only this indicator
these_cols = [col for col in df.columns if re.search(indicator_name + r'$', col)]
# df[these_cols + merge_on].rename(columns = {col: col.strip(indicator_name) for col in these_cols})
return df[these_cols + merge_on].rename(columns = {col: col.strip(indicator_name) for col in these_cols})
for indicator in self.mapping_objects['quality_element'].indicator_config.index:
if self.mapping_objects['quality_element'].indicator_config.loc[indicator]['quality element'] == self.name:#'nutrients':
# calculate mean for the included sub-indicators
if mean_of_indicators(indicator):
df = cut_results(self.indicator_dict[indicator], indicator)
self.sld.save_df(df, indicator)
if 'qe_'+self.name in self.indicator_dict.keys():
self.sld.save_df(self.indicator_dict['qe_'+self.name], self.name+'_all_results')
# mean_of_indicators('indicator_p_winter')
# mean_of_indicators('indicator_p_summer')
# mean_of_indicators('indicator_p')
# mean_of_indicators('indicator_n_winter')
# mean_of_indicators('indicator_n_summer')
# mean_of_indicators('indicator_n')
# mean_of_indicators('qe_nutrients')
###############################################################################
class QualityElementNutrients(QualityElementBase):
"""
Class calculate the quality factor for Nutrients.
"""
def __init__(self, subset_uuid, parent_workspace_object, quality_element):
super().__init__(subset_uuid, parent_workspace_object, quality_element)
#==========================================================================
def calculate_quality_factor(self):
"""
5) EK vägs samman för ingående parametrar (tot-N, tot-P, DIN och DIP) för slutlig statusklassificering av
hela kvalitetsfaktorn Näringsämnen.
"""
"""
GAMLA FÖRESKRIFTEN
Ett medelvärde av de numeriska klassningarna (Nklass) beräknas för
DIN, DIP, tot-N, tot-P under vintern och ett medelvärde för tot-N, tot-P under sommaren.
Därefter beräknas medelvärdet av sommar och vinter, vilket blir den sammanvägda klassificeringen av näringsämnen.
NYA FÖRESKRIFTEN
Ett medelvärde av de numeriska klasserna (global_EQR) beräknas separat för N och P. Först ett medelvärde för vintern
(N_vinter = medel(din_vinter, ntot_vinter) reps P_vinter = medel(dip_vinter, ptot_vinter)).
Sedan beräknas medelvärde för N_vinter och ntot_summer respektive P_vinter och ptot_summer och efter det medelvärde av N och P,
vilket blir den sammanvägda klassificeringen av näringsämnen.
Statusklassificeringen avgörs av medelvärdet för den numeriska klassningen enligt tabell 2.1, ett värde 0-1.
Dessa värden kan sedan jämföras med övriga kvalitetsfaktorer och ingå i sammansvägningen.
"""
###### Results #####
# how keyword:
# - outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically
# - inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys
# TODO: replace merge by join?
merge_on = ['VISS_EU_CD', 'WATER_BODY_NAME', 'WATER_TYPE_AREA']
# for indicator in self.indicator_list:
# col_list = list(self.indicator_dict[indicator].columns)
# [col_list.remove(r) for r in merge_on]
# {k: k+'_'+indicator for k in col_list}
# self.indicator_dict[indicator].rename(columns = {k: k+'_'+indicator for k in col_list}, inplace = True)
#
def mean_of_indicators(indicator_name):
# print(self.mapping_objects['quality_element'].indicator_config.loc[indicator_name]['parameters'])
parameters = self.mapping_objects['quality_element'].indicator_config.loc[indicator_name]['parameters'].split(', ')
if 'indicator_' not in parameters[0]:
if 'qe_' not in parameters[0]:
return False
# print(indicator_name, parameters)
if not all([par in self.indicator_dict.keys() for par in parameters]):
return False
if len(parameters) == 2:
# print(self.indicator_dict[parameters[0]].columns)
mean_of_indicators = self.indicator_dict[parameters[0]].merge(self.indicator_dict[parameters[1]], on = merge_on, how = 'inner', copy=True, suffixes = ['_' + par for par in parameters])
# print('columns 1 merge', mean_of_indicators.columns)
mean_of_indicators['global_EQR_'+indicator_name] = mean_of_indicators[['global_EQR' + '_' + parameters[0],'global_EQR' +'_' + parameters[1]]].mean(axis = 1, skipna = False)
mean_of_indicators['STATUS_'+indicator_name] = mean_of_indicators['global_EQR_'+indicator_name].apply(lambda x: self.get_status_from_global_EQR(x))
# print(mean_of_indicators.loc[mean_of_indicators['VISS_EU_CD'] == 'SE622500-172430'][['global_EQR_'+indicator_name, 'STATUS_'+indicator_name, 'global_EQR_indicator_dip_winter', 'global_EQR_indicator_ptot_winter']])
# print('columns 2', mean_of_indicators.columns)
self.indicator_dict[indicator_name] = mean_of_indicators
# self.sld.save_df(mean_of_indicators, indicator_name)
elif len(parameters) == 1:
col_list = list(self.indicator_dict[parameters[0]].columns)
[col_list.remove(r) for r in merge_on]
{k: k+'_'+parameters[0] for k in col_list}
self.indicator_dict[indicator_name] = self.indicator_dict[parameters[0]].rename(columns = {k: k+'_'+indicator_name for k in col_list})
# self.sld.save_df(self.indicator_dict[indicator_name], indicator_name)
return True
def cut_results(df, indicator_name):
#pick out columns for only this indicator
these_cols = [col for col in df.columns if re.search(indicator_name + r'$', col)]
# df[these_cols + merge_on].rename(columns = {col: col.strip(indicator_name) for col in these_cols})
return df[these_cols + merge_on].rename(columns = {col: col.strip(indicator_name) for col in these_cols})
for indicator in self.mapping_objects['quality_element'].indicator_config.index:
if self.mapping_objects['quality_element'].indicator_config.loc[indicator]['quality element'] == self.name:#'nutrients':
# calculate mean for the included sub-indicators
if mean_of_indicators(indicator):
df = cut_results(self.indicator_dict[indicator], indicator)
self.sld.save_df(df, indicator)
if 'qe_'+self.name in self.indicator_dict.keys():
self.sld.save_df(self.indicator_dict['qe_'+self.name], self.name+'_all_results')
# mean_of_indicators('indicator_p_winter')
# mean_of_indicators('indicator_p_summer')
# mean_of_indicators('indicator_p')
# mean_of_indicators('indicator_n_winter')
# mean_of_indicators('indicator_n_summer')
# mean_of_indicators('indicator_n')
# mean_of_indicators('qe_nutrients')
#==========================================================================
def old_calculate_quality_factor(self):
"""
5) EK vägs samman för ingående parametrar (tot-N, tot-P, DIN och DIP) för slutlig statusklassificering av
hela kvalitetsfaktorn Näringsämnen.
"""
"""
GAMLA FÖRESKRIFTEN
Ett medelvärde av de numeriska klassningarna (Nklass) beräknas för
DIN, DIP, tot-N, tot-P under vintern och ett medelvärde för tot-N, tot-P under sommaren.
Därefter beräknas medelvärdet av sommar och vinter, vilket blir den sammanvägda klassificeringen av näringsämnen.
NYA FÖRESKRIFTEN
Ett medelvärde av de numeriska klasserna (global_EQR) beräknas separat för N och P. Först ett medelvärde för vintern
(N_vinter = medel(din_vinter, ntot_vinter) reps P_vinter = medel(dip_vinter, ptot_vinter)).
Sedan beräknas medelvärde för N_vinter och ntot_summer respektive P_vinter och ptot_summer och efter det medelvärde av N och P,
vilket blir den sammanvägda klassificeringen av näringsämnen.
Statusklassificeringen avgörs av medelvärdet för den numeriska klassningen enligt tabell 2.1, ett värde 0-1.
Dessa värden kan sedan jämföras med övriga kvalitetsfaktorer och ingå i sammansvägningen.
"""
# #==========================================================================
# def get_status_from_global_EQR(global_EQR):
#
# if global_EQR >= 0.8:
# return 'HIGH'
# elif global_EQR >= 0.6:
# return 'GOOD'
# elif global_EQR >= 0.4:
# return 'MODERATE'
# elif global_EQR >= 0.2:
# return 'POOR'
# elif global_EQR >= 0:
# return 'BAD'
# else:
# return ''
def mean_EQR(df, winter_values, summer_values) :
df['winter_EQR'] = df[winter_values].mean(axis = 1, skipna = False)
df['summer_EQR'] = df[summer_values].mean(axis = 1, skipna = False)
df['mean_EQR'] = df[['winter_EQR','summer_EQR']].mean(axis = 1, skipna = False)
###### Results #####
# how keyword:
# - outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically
# - inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys
# TODO: replace merge by join?
merge_on = ['VISS_EU_CD', 'WATER_BODY_NAME', 'WATER_TYPE_AREA']
for indicator in self.indicator_list:
if self.indicator_dict[indicator] is None:
continue
col_list = list(self.indicator_dict[indicator].columns)
[col_list.remove(r) for r in merge_on]
{k: k+'_'+indicator for k in col_list}
self.indicator_dict[indicator].rename(columns = {k: k+'_'+indicator for k in col_list}, inplace = True)
#print(list(self.indicator_dict[indicator].columns))
# print(self.indicator_dict['dip_winter'].columns)
# print(self.indicator_dict['ptot_winter'].columns)
# P_results = self.indicator_dict['dip_winter'].merge(self.indicator_dict['ptot_winter'], on = merge_on, how = 'outer', suffixes = ['_dip', '_ptot'], copy=True)
# print(P_results.columns)
# print(self.indicator_dict['ptot_summer'].columns, len(self.indicator_dict['ptot_summer']))
# P_results.merge(self.indicator_dict['ptot_summer'], on = merge_on, how = 'outer', suffixes = ['_winter', '_ptot_summer'], copy=True)
# print(P_results.columns)
# mean_EQR(P_results, ['dip_winter','ptot_winter'], ['ptot_summer'])
# N_results = self.indicator_dict['din_winter'].merge(self.indicator_dict['ntot_winter'], on = merge_on, how = 'inner', suffixes = ['din', 'ntot'], copy=True)
# N_results.merge(self.indicator_dict['ntot_summer'], on = merge_on, how = 'inner', suffixes = ['winter', 'summer'], copy=True)
# mean_EQR(N_results, ['din_winter','ntot_winter'], ['ntot_summer'])
P_winter = self.indicator_dict['indicator_dip_winter'].merge(self.indicator_dict['indicator_ptot_winter'], on = merge_on, how = 'inner', copy=True)
#print('P_winter columns 1', P_winter.columns)
P_winter['EQR_P_winter_mean'] = P_winter[['global_EQR_indicator_dip_winter','global_EQR_indicator_ptot_winter']].mean(axis = 1, skipna = False)
P_winter['STATUS_P_winter'] = P_winter['EQR_P_winter_mean'].apply(lambda x: self.get_status_from_global_EQR(x))
#print('P_winter columns 2', P_winter.columns)
N_winter = self.indicator_dict['indicator_din_winter'].merge(self.indicator_dict['indicator_ntot_winter'], on = merge_on, how = 'inner', copy=True)
N_winter['EQR_N_winter_mean'] = N_winter[['global_EQR_indicator_din_winter','global_EQR_indicator_ntot_winter']].mean(axis = 1, skipna = False)
N_winter['STATUS_N_winter'] = N_winter['EQR_N_winter_mean'].apply(lambda x: self.get_status_from_global_EQR(x))
###### QualityElement results #####
P_results = P_winter.merge(self.indicator_dict['indicator_ptot_summer'], on = merge_on, how = 'inner', copy=True)
#print('P_summer columns', self.indicator_dict['ptot_summer'].columns)
#print('P merged columns 1', P_results.columns)
P_results['MEAN_P_EQR'] = P_results[['EQR_P_winter_mean','global_EQR_indicator_ptot_summer']].mean(axis = 1, skipna = False)
P_results['STATUS_P'] = P_results['MEAN_P_EQR'].apply(lambda x: self.get_status_from_global_EQR(x))
#print('P merged columns 2', P_results.columns)
N_results = N_winter.merge(self.indicator_dict['indicator_ntot_summer'], on = merge_on, how = 'inner', copy=True)
N_results['MEAN_N_EQR'] = N_results[['EQR_N_winter_mean','global_EQR_indicator_ntot_summer']].mean(axis = 1, skipna = False)
#print(N_results.columns)
N_results['STATUS_N'] = N_results['MEAN_N_EQR'].apply(lambda x: self.get_status_from_global_EQR(x))
results = P_results.merge(N_results, on = merge_on, how = 'inner', suffixes = ['P', 'N'], copy=True)
results['mean_EQR'] = results[['MEAN_P_EQR','MEAN_N_EQR']].mean(axis = 1, skipna = False)
results['STATUS_NUTRIENTS'] = results['mean_EQR'].apply(lambda x: self.get_status_from_global_EQR(x))
self.results = results
###############################################################################
class QualityElementPhytoplankton(QualityElementBase):
"""
Class calculate the quality element for Phytoplankton.
"""
def __init__(self, subset_uuid, parent_workspace_object, quality_element):
super().__init__(subset_uuid, parent_workspace_object, quality_element)
def calculate_quality_factor(self):
print(self.name)
merge_on = ['VISS_EU_CD', 'WATER_BODY_NAME', 'WATER_TYPE_AREA']
def mean_of_indicators(indicator_name):
# print(self.mapping_objects['quality_element'].indicator_config.loc[indicator_name]['parameters'])
parameters = self.mapping_objects['quality_element'].indicator_config.loc[indicator_name]['parameters'].split(', ')
if 'indicator_' not in parameters[0]:
if 'qe_' not in parameters[0]:
return False
# print(indicator_name, parameters)
if len(parameters) == 2:
# print(self.indicator_dict[parameters[0]].columns)
mean_of_indicators = self.indicator_dict[parameters[0]].merge(self.indicator_dict[parameters[1]], on = merge_on, how = 'inner', copy=True, suffixes = ['_' + par for par in parameters])
# print('columns 1 merge', mean_of_indicators.columns)
mean_of_indicators['global_EQR_'+indicator_name] = mean_of_indicators[['global_EQR' + '_' + parameters[0],'global_EQR' +'_' + parameters[1]]].mean(axis = 1, skipna = False)
mean_of_indicators['STATUS_'+indicator_name] = mean_of_indicators['global_EQR_'+indicator_name].apply(lambda x: self.get_status_from_global_EQR(x))
# print('columns 2', mean_of_indicators.columns)
self.indicator_dict[indicator_name] = mean_of_indicators
self.sld.save_df(mean_of_indicators, indicator_name)
elif len(parameters) == 1:
col_list = list(self.indicator_dict[parameters[0]].columns)
[col_list.remove(r) for r in merge_on]
{k: k+'_'+parameters[0] for k in col_list}
self.indicator_dict[indicator_name] = self.indicator_dict[parameters[0]].rename(columns = {k: k+'_'+indicator_name for k in col_list})
self.sld.save_df(self.indicator_dict[indicator_name], indicator_name)
for indicator in self.mapping_objects['quality_element'].indicator_config.index:
if self.mapping_objects['quality_element'].indicator_config.loc[indicator]['quality element'] == self.name:
mean_of_indicators(indicator)
###############################################################################
if __name__ == '__main__':
nr_marks = 60
print('='*nr_marks)
print('Running module "quality_factor.py"')
print('-'*nr_marks)
print('')
| 61.420857
| 231
| 0.619653
| 3,926
| 32,983
| 4.937341
| 0.080998
| 0.055664
| 0.065776
| 0.041581
| 0.835638
| 0.810565
| 0.77466
| 0.754695
| 0.74644
| 0.743242
| 0
| 0.007473
| 0.241276
| 32,983
| 537
| 232
| 61.420857
| 0.767113
| 0.30546
| 0
| 0.523622
| 0
| 0
| 0.098734
| 0.014576
| 0
| 0
| 0
| 0.009311
| 0
| 1
| 0.094488
| false
| 0.007874
| 0.023622
| 0
| 0.212598
| 0.027559
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1d129e78d679ac9132df9023228c48322cc15f6b
| 2,476
|
py
|
Python
|
tests/tests.py
|
awmath/django-bulk-signals
|
b365715c586b22f47d884883f73122d1a3bd855b
|
[
"MIT"
] | 1
|
2022-02-25T08:44:57.000Z
|
2022-02-25T08:44:57.000Z
|
tests/tests.py
|
awmath/django-bulk-signals
|
b365715c586b22f47d884883f73122d1a3bd855b
|
[
"MIT"
] | 2
|
2021-12-09T10:00:31.000Z
|
2021-12-09T12:40:52.000Z
|
tests/tests.py
|
awmath/django-bulk-signals
|
b365715c586b22f47d884883f73122d1a3bd855b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from django.db.models import Sum
from .models import BulkTestModel
pytestmark = pytest.mark.django_db
@pytest.fixture
def objects():
BulkTestModel.objects.bulk_create([BulkTestModel() for _ in range(10)])
return BulkTestModel.objects.all()
def test_fixture(objects):
assert BulkTestModel.objects.count() == 10
assert BulkTestModel.objects.aggregate(sum=Sum("num"))["sum"] == 0
def test_bulk_create(mocker):
create_stub = mocker.patch("tests.models.create_stub")
bulk_update_stub = mocker.patch("tests.models.update_stub")
update_stub = mocker.patch("tests.models.query_update_stub")
objects = BulkTestModel.objects.bulk_create([BulkTestModel() for _ in range(10)])
assert bulk_update_stub.call_count == 0
assert update_stub.call_count == 0
assert create_stub.call_count == 1
def test_bulk_update(mocker, objects):
create_stub = mocker.patch("tests.models.create_stub")
bulk_update_stub = mocker.patch("tests.models.update_stub")
update_stub = mocker.patch("tests.models.query_update_stub")
for o in objects:
o.num = 1
BulkTestModel.objects.bulk_update(objects, ["num"])
assert BulkTestModel.objects.aggregate(sum=Sum("num"))["sum"] == 10
assert bulk_update_stub.call_count == 1
assert update_stub.call_count == 0
assert create_stub.call_count == 0
def test_update(mocker, objects):
create_stub = mocker.patch("tests.models.create_stub")
bulk_update_stub = mocker.patch("tests.models.update_stub")
update_stub = mocker.patch("tests.models.query_update_stub")
BulkTestModel.objects.update(num=1)
assert BulkTestModel.objects.aggregate(sum=Sum("num"))["sum"] == 10
assert bulk_update_stub.call_count == 0
assert update_stub.call_count == 1
assert create_stub.call_count == 0
def test_no_action(objects, mocker):
create_stub = mocker.patch("tests.models.create_stub")
bulk_update_stub = mocker.patch("tests.models.update_stub")
update_stub = mocker.patch("tests.models.query_update_stub")
for o in objects:
o.num = 1
BulkTestModel.objects.bulk_update(objects, ["num"], no_action=True)
BulkTestModel.objects.update(num=2, no_action=True)
BulkTestModel.objects.bulk_create(
[BulkTestModel() for _ in range(10)], no_action=True
)
assert bulk_update_stub.call_count == 0
assert update_stub.call_count == 0
assert create_stub.call_count == 0
| 30.95
| 85
| 0.725767
| 339
| 2,476
| 5.064897
| 0.126844
| 0.139779
| 0.104834
| 0.139779
| 0.824112
| 0.798486
| 0.797321
| 0.797321
| 0.746651
| 0.714619
| 0
| 0.014333
| 0.154685
| 2,476
| 79
| 86
| 31.341772
| 0.80602
| 0.008481
| 0
| 0.509434
| 0
| 0
| 0.136975
| 0.127191
| 0
| 0
| 0
| 0
| 0.301887
| 1
| 0.113208
| false
| 0
| 0.056604
| 0
| 0.188679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1d4e327ba70d23ec63c7bc76de064966a3826e82
| 6,099
|
py
|
Python
|
test/test_packet_filter.py
|
idresearchdev/SecureTea-Project
|
6ddd47f4897c0d22ade520bcc07197dcd3a0e2a4
|
[
"MIT"
] | 1
|
2019-03-26T11:01:03.000Z
|
2019-03-26T11:01:03.000Z
|
test/test_packet_filter.py
|
idresearchdev/SecureTea-Project
|
6ddd47f4897c0d22ade520bcc07197dcd3a0e2a4
|
[
"MIT"
] | null | null | null |
test/test_packet_filter.py
|
idresearchdev/SecureTea-Project
|
6ddd47f4897c0d22ade520bcc07197dcd3a0e2a4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from securetea.lib.firewall.packet_filter import PacketFilter
import scapy.all as scapy
class TestPacket_Filter(unittest.TestCase):
"""Test class for PacketFilter module."""
def setUp(self):
"""
Set-up PacketFilter object.
"""
payload = b"""E\x00\x004Q\xc8@\x00@\x06Z\x87\xc0\xa8\x89\x7fh\
x82\xdb\xca\x94\xc0\x01\xbb=L\xd3\x97\x14\t\xc9q\
x80\x10\x00\xf5\xe7B\x00\x00\x01\x01\x08\n\xeb7\xc9\
xa6bjc\xed"""
self.pf1 = PacketFilter()
self.scapy_pkt = scapy.IP(payload)
def test_inbound_IPRule(self):
"""
Test inbound_IPRule.
"""
self.pf1._action_inbound_IPRule = 0
result = self.pf1.inbound_IPRule(self.scapy_pkt)
self.assertEqual(result, 1)
self.pf1._action_inbound_IPRule = 1
result = self.pf1.inbound_IPRule(self.scapy_pkt)
self.assertEqual(result, 0)
self.pf1._IP_INBOUND = ['104.32.32.32']
self.pf1._action_inbound_IPRule = 1
result = self.pf1.inbound_IPRule(self.scapy_pkt)
self.assertEqual(result, 0)
self.pf1._IP_INBOUND = ['104.32.32.32']
self.pf1._action_inbound_IPRule = 0
result = self.pf1.inbound_IPRule(self.scapy_pkt)
self.assertEqual(result, 1)
self.pf1._IP_INBOUND = ['192.168.137.127']
self.pf1._action_inbound_IPRule = 0
result = self.pf1.inbound_IPRule(self.scapy_pkt)
self.assertEqual(result, 0)
self.pf1._IP_INBOUND = ['192.168.137.127']
self.pf1._action_inbound_IPRule = 1
result = self.pf1.inbound_IPRule(self.scapy_pkt)
self.assertEqual(result, 1)
def test_outbound_IPRule(self):
"""
Test outbound IPRule.
"""
self.pf1._action_outbound_IPRule = 0
result = self.pf1.outbound_IPRule(self.scapy_pkt)
self.assertEqual(result, 1)
self.pf1._action_outbound_IPRule = 1
result = self.pf1.outbound_IPRule(self.scapy_pkt)
self.assertEqual(result, 0)
self.pf1._IP_OUTBOUND = ['192.168.137.127']
self.pf1._action_outbound_IPRule = 1
result = self.pf1.outbound_IPRule(self.scapy_pkt)
self.assertEqual(result, 0)
self.pf1._IP_OUTBOUND = ['192.168.137.127']
self.pf1._action_outbound_IPRule = 0
result = self.pf1.outbound_IPRule(self.scapy_pkt)
self.assertEqual(result, 1)
self.pf1._IP_OUTBOUND = ['104.32.32.32']
self.pf1._action_outbound_IPRule = 0
result = self.pf1.outbound_IPRule(self.scapy_pkt)
self.assertEqual(result, 0)
self.pf1._IP_OUTBOUND = ['104.32.32.32']
self.pf1._action_outbound_IPRule = 1
result = self.pf1.outbound_IPRule(self.scapy_pkt)
self.assertEqual(result, 1)
def test_protocolRule(self):
"""
Test protocolRule.
"""
result = self.pf1.protocolRule(self.scapy_pkt)
self.assertEqual(result, 1)
self.pf1._action_protocolRule = 1
result = self.pf1.protocolRule(self.scapy_pkt)
self.assertEqual(result, 0)
self.pf1._PROTCOLS = ['6']
self.pf1._action_protocolRule = 1
result = self.pf1.protocolRule(self.scapy_pkt)
self.assertEqual(result, 1)
self.pf1._action_protocolRule = 0
result = self.pf1.protocolRule(self.scapy_pkt)
self.assertEqual(result, 0)
self.pf1._PROTCOLS = ['1']
self.pf1._action_protocolRule = 1
result = self.pf1.protocolRule(self.scapy_pkt)
self.assertEqual(result, 0)
self.pf1._PROTCOLS = ['1']
self.pf1._action_protocolRule = 0
result = self.pf1.protocolRule(self.scapy_pkt)
self.assertEqual(result, 1)
def test_DNSRule(self):
"""
Test DNSRule.
"""
result = self.pf1.DNSRule(self.scapy_pkt)
self.assertEqual(result, 1)
def test_source_portRule(self):
"""
Test source_portRule.
"""
result = self.pf1.source_portRule(self.scapy_pkt)
self.assertEqual(result, 1)
self.pf1._action_source_portRule = 1
result = self.pf1.source_portRule(self.scapy_pkt)
self.assertEqual(result, 0)
self.pf1._SPORTS = ['8224']
result = self.pf1.source_portRule(self.scapy_pkt)
self.assertEqual(result, 1)
self.pf1._action_source_portRule = 0
result = self.pf1.source_portRule(self.scapy_pkt)
self.assertEqual(result, 0)
def test_dest_portRule(self):
"""
Test dest_portRule.
"""
result = self.pf1.dest_portRule(self.scapy_pkt)
self.assertEqual(result, 1)
self.pf1._action_dest_portRule = 1
result = self.pf1.dest_portRule(self.scapy_pkt)
self.assertEqual(result, 0)
self.pf1._DPORTS = ['8224']
result = self.pf1.dest_portRule(self.scapy_pkt)
self.assertEqual(result, 1)
self.pf1._action_dest_portRule = 0
result = self.pf1.dest_portRule(self.scapy_pkt)
self.assertEqual(result, 0)
def test_HTTPRequest(self):
"""
Test HTTPRequest.
"""
result = self.pf1.HTTPRequest(self.scapy_pkt)
self.assertEqual(result, 1)
def test_HTTPResponse(self):
"""
Test HTTPResponse.
"""
result = self.pf1.HTTPResponse(self.scapy_pkt)
self.assertEqual(result, 1)
def test_scanLoad(self):
"""
Test scanLoad.
"""
result = self.pf1.scanLoad(self.scapy_pkt)
self.assertEqual(result, 1)
self.pf1._action_scanLoad = 1
result = self.pf1.scanLoad(self.scapy_pkt)
self.assertEqual(result, 0)
self.pf1._action_scanLoad = 0
self.pf1._EXTENSIONS = [".exe"]
result = self.pf1.scanLoad(self.scapy_pkt)
self.assertEqual(result, 1)
self.pf1._action_scanLoad = 1
result = self.pf1.scanLoad(self.scapy_pkt)
self.assertEqual(result, 0)
| 31.438144
| 74
| 0.618954
| 754
| 6,099
| 4.797082
| 0.123342
| 0.139342
| 0.112801
| 0.145977
| 0.783522
| 0.782416
| 0.782416
| 0.782416
| 0.782416
| 0.74454
| 0
| 0.061593
| 0.265289
| 6,099
| 193
| 75
| 31.601036
| 0.745593
| 0.041974
| 0
| 0.741935
| 0
| 0.024194
| 0.0626
| 0.026574
| 0
| 0
| 0
| 0
| 0.266129
| 1
| 0.080645
| false
| 0
| 0.024194
| 0
| 0.112903
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1da759c67428b3e7a9d1bf07bf2a5215c41e9a8c
| 341
|
py
|
Python
|
Roran/modules/test.py
|
mpaulon/ircbots
|
e141d690ffb2e5ced4012baa5ca243e9d901ce53
|
[
"MIT"
] | null | null | null |
Roran/modules/test.py
|
mpaulon/ircbots
|
e141d690ffb2e5ced4012baa5ca243e9d901ce53
|
[
"MIT"
] | null | null | null |
Roran/modules/test.py
|
mpaulon/ircbots
|
e141d690ffb2e5ced4012baa5ca243e9d901ce53
|
[
"MIT"
] | null | null | null |
def start():
return
def stop():
return
def apply_command(self, c, e, command, arguments):
pass
def on_welcome(self, c, e):
pass
def on_invite(self, c, e):
pass
def on_join(self, c, e):
pass
def on_namreply(self, c, e):
pass
def on_pubmsg(self, c, e):
pass
def on_privmsg(self, c, e):
pass
| 9.742857
| 50
| 0.595308
| 57
| 341
| 3.438596
| 0.315789
| 0.178571
| 0.214286
| 0.306122
| 0.382653
| 0.382653
| 0
| 0
| 0
| 0
| 0
| 0
| 0.278592
| 341
| 34
| 51
| 10.029412
| 0.796748
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.388889
| 0
| 0.111111
| 0.611111
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
d58e5b4b56bfa07661fe7440185c9471808eefa1
| 40
|
py
|
Python
|
spyne/layers/__init__.py
|
bwhitesell/SpyNN
|
52ade7c9f54fa81abc6f6d9133ecccafed69e5dc
|
[
"BSD-3-Clause"
] | 12
|
2019-08-16T15:20:47.000Z
|
2021-12-08T03:18:20.000Z
|
spyne/layers/__init__.py
|
aiden27/SpyNE
|
52ade7c9f54fa81abc6f6d9133ecccafed69e5dc
|
[
"BSD-3-Clause"
] | null | null | null |
spyne/layers/__init__.py
|
aiden27/SpyNE
|
52ade7c9f54fa81abc6f6d9133ecccafed69e5dc
|
[
"BSD-3-Clause"
] | 1
|
2019-08-28T14:30:07.000Z
|
2019-08-28T14:30:07.000Z
|
from .layers import FullyConnectedLayer
| 20
| 39
| 0.875
| 4
| 40
| 8.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.972222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d58ede92cbfa53665fac8b3894391d04ea60e94c
| 159
|
py
|
Python
|
vmraid/patches/v6_24/set_language_as_code.py
|
sowrisurya/vmraid
|
f833e00978019dad87af80b41279c0146c063ed5
|
[
"MIT"
] | null | null | null |
vmraid/patches/v6_24/set_language_as_code.py
|
sowrisurya/vmraid
|
f833e00978019dad87af80b41279c0146c063ed5
|
[
"MIT"
] | null | null | null |
vmraid/patches/v6_24/set_language_as_code.py
|
sowrisurya/vmraid
|
f833e00978019dad87af80b41279c0146c063ed5
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import vmraid
from vmraid.translate import get_lang_dict
# migrate language from name to code
def execute():
return
| 17.666667
| 42
| 0.823899
| 23
| 159
| 5.391304
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144654
| 159
| 8
| 43
| 19.875
| 0.911765
| 0.213836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.6
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
d5913bb90154ae793d9694b45e011d71a8b6512b
| 165
|
py
|
Python
|
zonetruck/ZoneFilter.py
|
pv2b/zonetruck
|
d1aa094c9b0988c12100c8300aae4b390bb276f8
|
[
"MIT"
] | null | null | null |
zonetruck/ZoneFilter.py
|
pv2b/zonetruck
|
d1aa094c9b0988c12100c8300aae4b390bb276f8
|
[
"MIT"
] | null | null | null |
zonetruck/ZoneFilter.py
|
pv2b/zonetruck
|
d1aa094c9b0988c12100c8300aae4b390bb276f8
|
[
"MIT"
] | null | null | null |
class ZoneFilter:
def __init__(self, rules):
self.rules = rules
def filter(self, record):
# TODO Dummy implementation
return [record]
| 27.5
| 35
| 0.624242
| 18
| 165
| 5.5
| 0.666667
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.290909
| 165
| 6
| 36
| 27.5
| 0.846154
| 0.151515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.