hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bddda4c040819b870403559ed00ebaab7190ea8f | 679 | py | Python | rxsci/internal/utils.py | maki-nage/rxsci | 64c9956752cbdd4c65aa9f054b6b28318a056625 | [
"MIT"
] | 3 | 2021-05-03T13:40:46.000Z | 2022-03-06T07:59:30.000Z | rxsci/internal/utils.py | maki-nage/rxsci | 64c9956752cbdd4c65aa9f054b6b28318a056625 | [
"MIT"
] | 9 | 2020-10-22T21:08:10.000Z | 2021-08-05T09:01:26.000Z | rxsci/internal/utils.py | maki-nage/rxsci | 64c9956752cbdd4c65aa9f054b6b28318a056625 | [
"MIT"
] | 2 | 2021-01-05T16:48:54.000Z | 2021-08-07T12:51:01.000Z |
class NotSet(object):
"""Sentinel value."""
def __eq__(self, other):
return self is other
def __repr__(self):
return 'NotSet'
class StateNotSet(object):
def __eq__(self, other):
return self is other
def __repr__(self):
return 'NotSet'
def value(self):
return 0
class StateSet(object):
def __eq__(self, other):
return self is other
def __repr__(self):
return 'Set'
def value(self):
return 1
class StateCleared(object):
def __eq__(self, other):
return self is other
def __repr__(self):
return 'Cleared'
def value(self):
return 2
| 15.431818 | 28 | 0.584683 | 81 | 679 | 4.506173 | 0.246914 | 0.191781 | 0.09863 | 0.153425 | 0.608219 | 0.608219 | 0.608219 | 0.608219 | 0.608219 | 0.608219 | 0 | 0.006508 | 0.32106 | 679 | 43 | 29 | 15.790698 | 0.785249 | 0.022091 | 0 | 0.653846 | 0 | 0 | 0.033537 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.423077 | false | 0 | 0 | 0.423077 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 3 |
bdde05efcf874ead71f10d44b9b94987c03fce5e | 995 | py | Python | tests/test_git.py | graycarl/hbk | d4c90807b2558a2b61fb1253d9804fbaf373443f | [
"MIT"
] | 1 | 2021-07-22T05:25:35.000Z | 2021-07-22T05:25:35.000Z | tests/test_git.py | graycarl/hbk | d4c90807b2558a2b61fb1253d9804fbaf373443f | [
"MIT"
] | 37 | 2017-07-27T06:07:25.000Z | 2020-12-11T12:57:31.000Z | tests/test_git.py | graycarl/hbk | d4c90807b2558a2b61fb1253d9804fbaf373443f | [
"MIT"
] | 1 | 2019-04-02T08:36:32.000Z | 2019-04-02T08:36:32.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from builtins import * # noqa
import pytest
from hbkit import libs
@pytest.fixture
def git_config():
content = \
"""
[core]
repositoryformatversion = 0
filemode = true
bare = false
logallrefupdates = true
ignorecase = true
precomposeunicode = true
[remote "origin"]
url = https://github.com/graycarl/hbkit.git
fetch = +refs/heads/*:refs/remotes/origin/*
[remote "other"]
url = https://gitlab.com/graycarl/hbkit.git
fetch = +refs/heads/*:refs/remotes/origin/*
[branch "master"]
remote = origin
merge = refs/heads/master
[branch "Github-Check-CI"]
remote = origin
merge = refs/heads/Github-Check-CI
"""
return content
def test_iter_remote_from_git_config(git_config):
remotes = list(libs.git.iter_remotes_from_git_config(git_config))
expect = [
'https://github.com/graycarl/hbkit.git',
'https://gitlab.com/graycarl/hbkit.git'
]
assert remotes == expect
| 23.690476 | 69 | 0.706533 | 127 | 995 | 5.393701 | 0.417323 | 0.065693 | 0.093431 | 0.110949 | 0.405839 | 0.265693 | 0.145985 | 0.145985 | 0.145985 | 0.145985 | 0 | 0.002398 | 0.161809 | 995 | 41 | 70 | 24.268293 | 0.818945 | 0.026131 | 0 | 0 | 0 | 0 | 0.15914 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.133333 | false | 0 | 0.266667 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bddf37e95b95085df635e34da4f5fe3ecddbccb0 | 1,751 | py | Python | src/richie/apps/courses/migrations/0013_migrate_data_translated_licence_fields.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 174 | 2018-04-14T23:36:01.000Z | 2022-03-10T09:27:01.000Z | src/richie/apps/courses/migrations/0013_migrate_data_translated_licence_fields.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 631 | 2018-04-04T11:28:53.000Z | 2022-03-31T11:18:31.000Z | src/richie/apps/courses/migrations/0013_migrate_data_translated_licence_fields.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 64 | 2018-06-27T08:35:01.000Z | 2022-03-10T09:27:43.000Z | # Generated by Django 2.2.8 on 2020-01-02 13:56
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import migrations
def forwards_func(apps, schema_editor):
Licence = apps.get_model("courses", "Licence")
LicenceTranslation = apps.get_model("courses", "LicenceTranslation")
for licence in Licence.objects.all():
LicenceTranslation.objects.create(
master_id=licence.pk,
language_code=settings.LANGUAGE_CODE,
name=licence.name_deprecated,
)
def backwards_func(apps, schema_editor):
Licence = apps.get_model("courses", "Licence")
LicenceTranslation = apps.get_model("courses", "LicenceTranslation")
for licence in Licence.objects.all():
translation = _get_translation(licence, LicenceTranslation)
licence.name_deprecated = translation.name
licence.save() # Note this only calls Model.save()
def _get_translation(licence, LicenceTranslation):
translations = LicenceTranslation.objects.filter(master_id=licence.pk)
try:
# Try default translation
return translations.get(language_code=settings.LANGUAGE_CODE)
except ObjectDoesNotExist:
try:
# Try default language
return translations.get(language_code=settings.PARLER_DEFAULT_LANGUAGE_CODE)
except ObjectDoesNotExist:
# Maybe the object was translated only in a specific language?
# Take the first existing translation
return translations.first()
class Migration(migrations.Migration):
dependencies = [("courses", "0012_add_translation_model_for_licence_fields")]
operations = [migrations.RunPython(forwards_func, backwards_func)]
| 35.02 | 88 | 0.718447 | 191 | 1,751 | 6.418848 | 0.39267 | 0.058728 | 0.039152 | 0.06199 | 0.326264 | 0.290375 | 0.223491 | 0.223491 | 0.223491 | 0.223491 | 0 | 0.013542 | 0.198744 | 1,751 | 49 | 89 | 35.734694 | 0.860299 | 0.126214 | 0 | 0.322581 | 1 | 0 | 0.085358 | 0.029547 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.096774 | 0 | 0.387097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
bddf54c37693012c2ebee8e890c2bc5f10dfd58d | 5,510 | py | Python | responsible_ai/gan_data_debiased/main.py | AaratiAkkapeddi/nnabla-examples | db9e5ad850303c158773aeb275e5c3821b4a3935 | [
"Apache-2.0"
] | null | null | null | responsible_ai/gan_data_debiased/main.py | AaratiAkkapeddi/nnabla-examples | db9e5ad850303c158773aeb275e5c3821b4a3935 | [
"Apache-2.0"
] | null | null | null | responsible_ai/gan_data_debiased/main.py | AaratiAkkapeddi/nnabla-examples | db9e5ad850303c158773aeb275e5c3821b4a3935 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from nnabla.ext_utils import get_extension_context
import nnabla as nn
import args
import data_loader as dl
import classifier as clf
from utils import utils
def model_train_setting(opt):
"""
Get the model train settings
Args:
opt : variables that containing values for all of your options
Returns:
variables which you need to train
"""
attr_list = utils.get_all_attr()
if opt['model_train'] == 'baseline':
data_params = {
"train_beg": opt['train_beg'],
"valid_beg": opt['valid_beg'],
"test_beg": opt['test_beg'],
}
data_setting = {
'path': opt['base_img_path'],
'protected_attribute': opt['protected_attribute'],
'attribute': opt['attribute'],
'data_params': data_params,
'batch_size': opt['batch_size'],
'learning_rate': opt['learning_rate'],
'max_iter': opt['max_iter_base']
}
opt['data_setting'] = data_setting
if opt['model_train'] == 'gan_debiased':
data_params = {
"train_beg": opt['train_beg'],
"valid_beg": opt['valid_beg'],
"test_beg": opt['test_beg'],
}
real_params = {
'path': opt['base_img_path'],
'attribute': opt['attribute'],
'protected_attribute': opt['protected_attribute'],
'data_params': data_params
}
generated_images = "{}/AllGenImages".format(opt["fake_data_dir"])
flipped_images = "{}/{}/".format(opt["fake_data_dir"],
attr_list[opt['attribute']])
label_score = "{}/all_{}_scores.pkl".format(opt['fake_data_dir'],
attr_list[opt['attribute']])
domain_score = "{}/all_{}_scores.pkl".format(opt['fake_data_dir'],
attr_list[opt['protected_attribute']])
generated_params = {
'generated_image_path': generated_images,
'flipped_images_path': flipped_images,
'label_path': label_score,
'domain_path': domain_score,
# flipped the images from 15000 to 175000
'flipped_image_range': (15000, 175000),
'orig_label_range': (160000, 320000), # original label range
'new_range': (0, 160000), # new images
}
data_setting = {
'real_params': real_params,
'gen_params': generated_params,
'batch_size': opt['batch_size'],
'learning_rate': opt['learning_rate'],
'max_iter': opt['max_iter_gan_debiased']
}
opt['data_setting'] = data_setting
return opt
def main():
"""
main method
"""
opt = args.get_args()
opt = model_train_setting(opt)
ctx = get_extension_context(
opt['context'], device_id=opt['device_id'], type_config=opt['type_config'])
nn.set_default_context(ctx)
# model configurations
batch_size = opt['data_setting']['batch_size']
learning_rate = opt['data_setting']['learning_rate']
max_iter = opt['data_setting']['max_iter']
if (opt["model_train"] == 'baseline'):
train = dl.actual_celeba_dataset(opt['data_setting'], batch_size,
augment=True, split='train', shuffle=True)
val = dl.actual_celeba_dataset(opt['data_setting'], batch_size,
augment=False, split='valid', shuffle=False)
val_weight = None
elif (opt["model_train"] == 'gan_debiased'):
train = dl.debiased_celeba_dataset(opt['data_setting'], batch_size,
augment=True, split='train', shuffle=True)
val = dl.actual_celeba_dataset(opt['data_setting']['real_params'], batch_size,
augment=False, split='valid', shuffle=False)
val_weight = utils.compute_class_weight(val)
else:
print("please provide proper argument")
sys.exit(0)
attr_list = utils.get_all_attr()
if not os.path.exists(opt['model_save_path']):
os.makedirs(opt['model_save_path'])
monitor_path = os.path.join(
opt['model_save_path'], attr_list[opt['attribute']])
if not os.path.exists(monitor_path):
os.makedirs(monitor_path)
attribute_classifier_model = clf.attribute_classifier(batch_size=batch_size,
learning_rate=learning_rate,
max_iter=max_iter,
monitor_path=monitor_path,
val_weight=val_weight)
attribute_classifier_model.train(train, val)
if __name__ == '__main__':
main()
| 38.531469 | 91 | 0.580581 | 622 | 5,510 | 4.853698 | 0.287781 | 0.047367 | 0.041736 | 0.027824 | 0.398145 | 0.262007 | 0.262007 | 0.245446 | 0.245446 | 0.229215 | 0 | 0.013089 | 0.306715 | 5,510 | 142 | 92 | 38.802817 | 0.777225 | 0.147913 | 0 | 0.294118 | 0 | 0 | 0.229275 | 0.004534 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.078431 | 0 | 0.107843 | 0.009804 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bde0ad9f17012d7ebc6ee66313fe41b54189ab35 | 5,109 | py | Python | hstools/utilities.py | saisiddu/pub_bandaragoda_etal_ems | d06e23c7c5dfa772d5dfe55c33bcf7abbd5e2060 | [
"MIT"
] | 1 | 2019-09-24T15:22:05.000Z | 2019-09-24T15:22:05.000Z | hstools/utilities.py | saisiddu/pub_bandaragoda_etal_ems | d06e23c7c5dfa772d5dfe55c33bcf7abbd5e2060 | [
"MIT"
] | null | null | null | hstools/utilities.py | saisiddu/pub_bandaragoda_etal_ems | d06e23c7c5dfa772d5dfe55c33bcf7abbd5e2060 | [
"MIT"
] | null | null | null | from __future__ import print_function
import os
from IPython.core.display import display, HTML
import glob
from .compat import *
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def get_hs_content(resid):
resdir = find_resource_directory(resid)
content = {}
for f in glob.glob('%s/*/data/contents/*' % resdir):
fname = os.path.basename(f)
content[fname] = f
return content
def find_resource_directory(resid):
download_dir = os.environ.get('JUPYTER_DOWNLOADS', 'hs_downloads')
# loop over all the files in userspace
for dirpath, dirnames, filenames in os.walk(download_dir):
for dirname in [d for d in dirnames]:
if dirname == resid:
return os.path.join(dirpath, dirname)
return None
def check_for_ipynb(content_files):
links = {}
for f, p in content_files.items():
if f[-5:] == 'ipynb':
fname = os.path.basename(p)
url = urlencode(p)
links[fname] = url
return links
def display_tree(resid):
# todo: display a tree view of the resource bagit, based on id
pass
def display_resource_content_files(content_file_dictionary,
text='Found the following content when parsing the HydroShare resource:'):
# get ipynb files
nbs = check_for_ipynb(content_file_dictionary)
if len(nbs.keys()) > 0:
display(HTML('<b>Found the following notebook(s) associated with this HydroShare resource.</b><br>Click the link(s) below to launch the notebook.'))
for name, url in nbs.items():
display(HTML('<a href=%s target="_blank">%s<a>' % (url, name)))
# print the remaining files
if len(content_file_dictionary.keys()) > 0:
display(HTML('<b>Found the following file(s) associated with this HydroShare resource.</b>'))
text = '<br>'.join(content_file_dictionary.keys())
display(HTML(text))
if (len(content_file_dictionary.keys()) + len(nbs.keys())) > 0:
display(HTML('These files are stored in a dictionary called <b>hs.content</b> for your convenience. To access a file, simply issue the following command where MY_FILE is one of the files listed above: <pre>hs.content["MY_FILE"] </pre> '))
def load_environment(env_path=None):
# load the environment path (if it exists)
if env_path is None:
env_path = os.path.join(os.environ.get('NOTEBOOK_HOME', './'), '.env' )
if not os.path.exists(env_path):
return
with open(env_path, 'r') as f:
lines = f.readlines()
print('Adding the following system variables:')
for line in lines:
k, v = line.strip().split('=')
os.environ[k] = v
print(' %s = %s' % (k, v))
print('\nThese can be accessed using the following command: ')
print(' os.environ[key]')
print('\n (e.g.)\n os.environ["HS_USR_NAME"] => %s' % os.environ['HS_USR_NAME'])
def get_env_var(varname):
if varname in os.environ.keys():
return os.environ[varname]
else:
return input('Could not find %s, please specify a value: ' % varname).strip()
def get_server_url_for_path(p):
"""
gets the url corresponding to a given file or directory path
p : path to convert into a url
returns the url path for the filepath p
"""
load_environment()
rel_path = os.path.relpath(p, os.environ['NOTEBOOK_HOME'])
url = urlencode(rel_path)
return url
def get_relative_path(p):
"""
gets the path relative to the jupyter home directory
p: path to convert into relative path
returns the path relative to the default jupyter home directory
"""
return os.path.relpath(p, os.environ['NOTEBOOK_HOME'])
def _realname(path, root=None):
if root is not None:
path = os.path.join(root, path)
result = os.path.basename(path)
if os.path.islink(path):
realpath = os.readlink(path)
result = '%s -> %s' % (os.path.basename(path), realpath)
return result
def tree(startpath, depth=-1):
prefix = 0
if startpath != '/':
if startpath.endswith('/'):
startpath = startpath[:-1]
prefix = len(startpath)
for root, dirs, files in os.walk(startpath):
level = root[prefix:].count(os.sep)
if depth > -1 and level > depth:
continue
indent = subindent = ''
if level > 0:
indent = '| ' * (level-1) + '|-- '
subindent = '| ' * (level) + '|-- '
print('{}{}/'.format(indent, _realname(root)))
# print dir only if symbolic link; otherwise, will be printed as root
for d in dirs:
if os.path.islink(os.path.join(root, d)):
print('{}{}'.format(subindent, _realname(d, root=root)))
for f in files:
print('{}{}'.format(subindent, _realname(f, root=root)))
| 30.963636 | 247 | 0.607164 | 702 | 5,109 | 4.323362 | 0.294872 | 0.0257 | 0.034596 | 0.015815 | 0.135091 | 0.098188 | 0.070511 | 0.04547 | 0 | 0 | 0 | 0.006094 | 0.261304 | 5,109 | 164 | 248 | 31.152439 | 0.798092 | 0.106087 | 0 | 0 | 0 | 0.019417 | 0.204873 | 0.020819 | 0 | 0 | 0 | 0.006098 | 0 | 1 | 0.116505 | false | 0.009709 | 0.048544 | 0 | 0.281553 | 0.087379 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bde1d05ede2b3e57e640969726cd7f09fb8e5559 | 155 | py | Python | assignment2/earth_electrons.py | guozhonghao1994/ec602 | e8f6b61e5cdad64e9fe943fc4f61d1fc9ad85f74 | [
"Unlicense"
] | 3 | 2018-11-14T16:07:31.000Z | 2018-11-15T16:44:51.000Z | assignment2/earth_electrons.py | guozhonghao1994/ec602 | e8f6b61e5cdad64e9fe943fc4f61d1fc9ad85f74 | [
"Unlicense"
] | null | null | null | assignment2/earth_electrons.py | guozhonghao1994/ec602 | e8f6b61e5cdad64e9fe943fc4f61d1fc9ad85f74 | [
"Unlicense"
] | null | null | null | #Copyright 2017 Zhonghao Guo gzh1994@bu.edu
import sys
estimate=3.91*10**38
lower=3.72*10**38
upper=4.11*10**38
print(estimate)
print(lower)
print(upper)
| 17.222222 | 43 | 0.754839 | 30 | 155 | 3.9 | 0.666667 | 0.102564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.205674 | 0.090323 | 155 | 8 | 44 | 19.375 | 0.624113 | 0.270968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.428571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 3 |
bde5bd2cb3f7fdf8cc6f96a4c93e07d27f29156e | 16,286 | py | Python | activity/activity_IngestDigestToEndpoint.py | elifesciences/elife-bot | d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9 | [
"MIT"
] | 17 | 2015-02-10T07:10:29.000Z | 2021-05-14T22:24:45.000Z | activity/activity_IngestDigestToEndpoint.py | elifesciences/elife-bot | d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9 | [
"MIT"
] | 459 | 2015-03-31T18:24:23.000Z | 2022-03-30T19:44:40.000Z | activity/activity_IngestDigestToEndpoint.py | elifesciences/elife-bot | d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9 | [
"MIT"
] | 9 | 2015-04-18T16:57:31.000Z | 2020-10-30T11:49:13.000Z | import os
import time
import json
from collections import OrderedDict
from digestparser import json_output
from provider.execution_context import get_session
from provider.article_processing import download_jats
from provider import digest_provider, email_provider, lax_provider, utils
from activity.objects import Activity
class activity_IngestDigestToEndpoint(Activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
super(activity_IngestDigestToEndpoint, self).__init__(
settings, logger, conn, token, activity_task
)
self.name = "IngestDigestToEndpoint"
self.pretty_name = "Ingest Digest to API endpoint"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = (
"Send Digest JSON to an API endpoint,"
+ " to be run when a research article is ingested"
)
# Local directory settings
self.directories = {
"TEMP_DIR": os.path.join(self.get_tmp_dir(), "tmp_dir"),
"INPUT_DIR": os.path.join(self.get_tmp_dir(), "input_dir"),
}
# Track the success of some steps
self.statuses = OrderedDict(
[
("approve", None),
("download", None),
("generate", None),
("ingest", None),
]
)
# Digest JSON content
self.digest_content = None
# Load the config
self.digest_config = digest_provider.digest_config(
self.settings.digest_config_section, self.settings.digest_config_file
)
def do_activity(self, data=None):
self.logger.info("data: %s" % json.dumps(data, sort_keys=True, indent=4))
success, run, session, article_id, version = self.session_data(data)
self.make_activity_directories()
# get session data
if success is not True:
self.logger.error("Failed to parse session data in %s" % self.pretty_name)
return self.ACTIVITY_PERMANENT_FAILURE
# emit start message
success = self.emit_start_message(article_id, version, run)
if success is not True:
self.logger.error("Failed to emit a start message in %s" % self.pretty_name)
return self.ACTIVITY_PERMANENT_FAILURE
# Approve for ingestion
self.statuses["approve"] = self.approve(
article_id,
session.get_value("status"),
version,
session.get_value("run_type"),
)
if self.statuses.get("approve") is not True:
self.logger.info(
"Digest for article %s was not approved for ingestion" % article_id
)
self.emit_end_message(article_id, version, run)
return self.ACTIVITY_SUCCESS
try:
digest_details = self.gather_digest_details(
article_id, version, session.get_value("expanded_folder")
)
except Exception as exception:
# send email error if any error message is returned
message = "Error in gathering digest details: %s" % str(exception)
self.logger.exception(message)
return self.email_error_return(article_id, message)
# generate the digest content
try:
self.digest_content = self.generate_digest_content(
article_id, digest_details
)
except Exception as exception:
# send email error if unable to generate digest content
message = "Error in generating digest content for article: %s" % str(
exception
)
self.logger.exception(message)
return self.email_error_return(article_id, message)
# issue put to the endpoint
digest_id = self.digest_content.get("id")
# set the stage attribute depending on silent correction or not
if (
session.get_value("run_type")
and session.get_value("run_type") == "silent-correction"
):
digest_provider.set_stage(self.digest_content, "published")
else:
digest_provider.set_stage(self.digest_content, "preview")
self.logger.info(
"Digest stage value %s" % str(self.digest_content.get("stage"))
)
try:
put_response = digest_provider.put_digest_to_endpoint(
self.logger, digest_id, self.digest_content, self.settings
)
if put_response:
self.statuses["ingest"] = True
except Exception as exception:
# email error message and return self.ACTIVITY_SUCCESS
message = "Failed to ingest digest json to endpoint %s in %s: %s" % (
article_id,
self.pretty_name,
str(exception),
)
self.logger.exception(message)
return self.email_error_return(article_id, message)
self.logger.info(
"%s for article_id %s statuses: %s"
% (self.name, str(article_id), self.statuses)
)
self.emit_end_message(article_id, version, run)
return self.ACTIVITY_SUCCESS
def session_data(self, data):
"get session data and return basic values"
run = None
session = None
version = None
article_id = None
success = None
try:
run = data["run"]
session = get_session(self.settings, data, run)
version = session.get_value("version")
article_id = session.get_value("article_id")
success = True
except (TypeError, KeyError) as exception:
self.logger.exception(
"Exception when getting the session for Starting ingest digest "
+ " to endpoint. Details: %s" % str(exception)
)
return success, run, session, article_id, version
def email_error_return(self, article_id, message):
"""log exception, email error message and return activity result"""
send_error_email(article_id, message, self.settings, self.logger)
return self.ACTIVITY_SUCCESS
def emit_message(self, article_id, version, run, status, message):
"emit message to the queue"
try:
self.emit_monitor_event(
self.settings,
article_id,
version,
run,
self.pretty_name,
status,
message,
)
return True
except Exception as exception:
self.logger.exception(
"Exception emitting %s message. Details: %s"
% (str(status), str(exception))
)
def emit_start_message(self, article_id, version, run):
"emit the start message to the queue"
return self.emit_message(
article_id,
version,
run,
"start",
"Starting ingest digest to endpoint for " + str(article_id),
)
def digest_preview_link(self, article_id):
"preview link for the digest using the preview base url"
return "%s/digests/%s" % (
self.settings.journal_preview_base_url,
utils.pad_msid(article_id),
)
def activity_end_message(self, article_id, statuses):
"different end message to emit based on the ingest status"
if statuses.get("ingest") is True:
return (
"Finished ingest digest to endpoint for %s. Statuses %s Preview link %s"
% (article_id, statuses, self.digest_preview_link(article_id))
)
return "No digest ingested for %s. Statuses %s" % (article_id, statuses)
def emit_end_message(self, article_id, version, run):
"emit the end message to the queue"
return self.emit_message(
article_id,
version,
run,
"end",
self.activity_end_message(article_id, self.statuses),
)
def emit_error_message(self, article_id, version, run, message):
"emit an error message to the queue"
return self.emit_message(article_id, version, run, "error", message)
def approve(self, article_id, status, version, run_type):
"should we ingest based on some basic attributes"
approve_status = True
# check by status
return_status = digest_provider.approve_by_status(
self.logger, article_id, status
)
if return_status is False:
approve_status = return_status
# check silent corrections and consider the first vor version
run_type_status = digest_provider.approve_by_run_type(
self.settings, self.logger, article_id, run_type, version
)
first_vor_status = digest_provider.approve_by_first_vor(
self.settings, self.logger, article_id, version, status
)
if first_vor_status is False and run_type != "silent-correction":
# not the first vor and not a silent correction, do not approve
approve_status = False
elif run_type_status is False:
# otherwise depend on the silent correction run_type logic
approve_status = False
# check if there is a digest docx in the bucket for this article
if approve_status:
if not digest_provider.docx_exists_in_s3(
self.settings, article_id, self.settings.bot_bucket, self.logger
):
self.logger.info(
"Digest docx file does not exist in S3 for article %s" % article_id
)
approve_status = False
return approve_status
def gather_digest_details(self, article_id, version, expanded_folder):
digest_details = OrderedDict()
# Download digest from the S3 outbox
digest_details["docx_file"] = digest_download_docx_from_s3(
article_id,
self.settings.bot_bucket,
self.directories.get("INPUT_DIR"),
self.settings,
self.logger,
)
self.statuses["download"] = True
# find the image file name
digest_details["image_file"] = digest_image_file_name_from_s3(
article_id, self.settings.bot_bucket, self.settings
)
# download jats file
digest_details["jats_file"] = download_jats_for_digest(
expanded_folder,
self.settings,
self.directories.get("TEMP_DIR"),
self.logger,
)
# related article data
digest_details["related"] = get_related_from_lax(
article_id, version, self.settings, self.pretty_name, self.logger
)
return digest_details
def generate_digest_content(self, article_id, digest_details):
digest_content = None
try:
digest_content = self.digest_json(
digest_details.get("docx_file"),
digest_details.get("jats_file"),
digest_details.get("image_file"),
digest_details.get("related"),
)
except Exception as exception:
# email error message and return self.ACTIVITY_SUCCESS
message = "Failed to generate digest json for %s in %s: %s" % (
article_id,
self.pretty_name,
str(exception),
)
raise Exception(message)
if digest_content:
self.statuses["generate"] = True
else:
# email error message and return self.ACTIVITY_SUCCESS
message = (
"Unable to generate Digest content for docx_file %s, "
+ "jats_file %s, image_file %s"
) % (
digest_details.get("docx_file"),
digest_details.get("jats_file"),
digest_details.get("image_file"),
)
raise Exception(message)
return digest_content
def digest_json(self, docx_file, jats_file=None, image_file=None, related=None):
"generate the digest json content from the docx file and other data"
json_content = None
try:
json_content = json_output.build_json(
docx_file,
self.directories.get("TEMP_DIR"),
self.digest_config,
jats_file,
image_file,
related,
)
except Exception as exception:
self.logger.exception(
"Exception generating digest json for docx_file %s. Details: %s"
% (str(docx_file), str(exception))
)
return json_content
def digest_download_docx_from_s3(article_id, bucket_name, input_dir, settings, logger):
try:
return digest_provider.download_docx_from_s3(
settings, article_id, bucket_name, input_dir, logger
)
except Exception as exception:
message = "Unable to download digest docx file for article %s: %s" % (
article_id,
str(exception),
)
raise Exception(message)
def digest_image_file_name_from_s3(article_id, bucket_name, settings):
try:
return digest_provider.image_file_name_from_s3(
settings, article_id, bucket_name
)
except Exception as exception:
message = "Failed to get image file name from S3 for digest %s: %s" % (
article_id,
str(exception),
)
raise Exception(message)
def download_jats_for_digest(expanded_folder, settings, temp_dir, logger):
try:
return download_jats(settings, expanded_folder, temp_dir, logger)
except Exception as exception:
message = "Failed to download JATS from expanded folder %s: %s" % (
expanded_folder,
str(exception),
)
raise Exception(message)
def get_related_from_lax(article_id, version, settings, pretty_name, logger):
try:
return related_from_lax(article_id, version, settings, logger)
except Exception as exception:
message = "Failed to get related from lax for digest %s in %s: %s" % (
article_id,
pretty_name,
str(exception),
)
raise Exception(message)
def related_from_lax(article_id, version, settings, logger=None, auth=True):
"get article json from Lax and return as a list of related data"
related = None
related_json = None
try:
related_json = lax_provider.article_snippet(article_id, version, settings, auth)
except Exception as exception:
logger.exception(
(
"Exception in getting article snippet from Lax for article_id"
" %s, version %s. Details: %s"
)
% (str(article_id), str(version), str(exception))
)
raise
if related_json:
related = [related_json]
return related
def error_email_subject(article_id):
"email subject for an error email"
return u"Error ingesting digest to endpoint: {article_id}".format(
article_id=article_id
)
def send_error_email(article_id, message, settings, logger):
"email error message to the recipients"
datetime_string = time.strftime(utils.DATE_TIME_FORMAT, time.gmtime())
body = email_provider.simple_email_body(datetime_string, message)
subject = error_email_subject(article_id)
sender_email = settings.digest_sender_email
recipient_email_list = email_provider.list_email_recipients(
settings.digest_validate_error_recipient_email
)
messages = email_provider.simple_messages(
sender_email, recipient_email_list, subject, body, logger=logger
)
logger.info("Formatted %d email error messages" % len(messages))
details = email_provider.smtp_send_messages(settings, messages, logger)
logger.info("Email sending details: %s" % str(details))
| 36.191111 | 88 | 0.608805 | 1,860 | 16,286 | 5.109677 | 0.12043 | 0.063447 | 0.035354 | 0.021991 | 0.384785 | 0.306923 | 0.246107 | 0.202757 | 0.140257 | 0.126368 | 0 | 0.001973 | 0.315179 | 16,286 | 449 | 89 | 36.271715 | 0.850175 | 0.09106 | 0 | 0.277473 | 0 | 0 | 0.148821 | 0.001437 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.024725 | 0 | 0.156593 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bde6c2d4e221af5daf9ceb3a165e32e65089ccfe | 249 | py | Python | utils/forgiveness_of_the_offender.py | bbt-t/simple-bot_discord | 46fa629e8278e8e453b3c272b2e838d0762aaaf8 | [
"MIT"
] | null | null | null | utils/forgiveness_of_the_offender.py | bbt-t/simple-bot_discord | 46fa629e8278e8e453b3c272b2e838d0762aaaf8 | [
"MIT"
] | null | null | null | utils/forgiveness_of_the_offender.py | bbt-t/simple-bot_discord | 46fa629e8278e8e453b3c272b2e838d0762aaaf8 | [
"MIT"
] | null | null | null | from discord import Member, utils
async def unmute_user(member: Member):
role = utils.get(member.guild.roles, id=809817869914341396)
await member.edit(roles=())
await member.add_roles(role)
await member.send('Ты размучен! :)')
| 19.153846 | 63 | 0.706827 | 33 | 249 | 5.272727 | 0.636364 | 0.189655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.087379 | 0.172691 | 249 | 12 | 64 | 20.75 | 0.757282 | 0 | 0 | 0 | 0 | 0 | 0.060484 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bde798fb51621c003debde76678c82dcde2604d3 | 443 | py | Python | mailing/urls.py | Aladom/django-mailing | aa18963b1902e4b7f066b0064a832e26e725f643 | [
"MIT"
] | null | null | null | mailing/urls.py | Aladom/django-mailing | aa18963b1902e4b7f066b0064a832e26e725f643 | [
"MIT"
] | 13 | 2016-02-04T14:56:11.000Z | 2021-06-10T20:39:51.000Z | mailing/urls.py | Aladom/django-mailing | aa18963b1902e4b7f066b0064a832e26e725f643 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import url
from .views import MirrorView, SubscriptionsManagementView
__all__ = [
'app_name', 'urlpatterns',
]
app_name = 'mailing'
urlpatterns = [
url(r'^mirror/(?P<signed_pk>[0-9]+:[a-zA-Z0-9_-]+)/$',
MirrorView.as_view(), name='mirror'),
url(r'^subscriptions/(?P<signed_email>.+:[a-zA-Z0-9_-]+)/$',
SubscriptionsManagementView.as_view(), name='subscriptions'),
]
| 26.058824 | 69 | 0.643341 | 54 | 443 | 5.055556 | 0.574074 | 0.051282 | 0.03663 | 0.043956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01847 | 0.14447 | 443 | 16 | 70 | 27.6875 | 0.701847 | 0.047404 | 0 | 0 | 0 | 0 | 0.340476 | 0.233333 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdea2dca87fdb221f4d7b5d7f20709164e7c3a60 | 1,029 | py | Python | code2/day07/demo03.py | picktsh/python | 0f758dcdf9eee3580d8f6e2241ef557b6320ef54 | [
"MIT"
] | 1 | 2019-12-31T16:44:06.000Z | 2019-12-31T16:44:06.000Z | code2/day07/demo03.py | picktsh/python | 0f758dcdf9eee3580d8f6e2241ef557b6320ef54 | [
"MIT"
] | null | null | null | code2/day07/demo03.py | picktsh/python | 0f758dcdf9eee3580d8f6e2241ef557b6320ef54 | [
"MIT"
] | 1 | 2022-01-13T10:32:22.000Z | 2022-01-13T10:32:22.000Z | # 引入requests
import requests
# 封装headers
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'}
# 写入网址
url = 'https://www.zhihu.com/api/v4/members/zhang-jia-wei/articles?'
# 封装参数
params = {
'include': 'data[*].comment_count,suggest_edit,is_normal,thumbnail_extra_info,thumbnail,can_comment,comment_permission,admin_closed_comment,content,voteup_count,created,updated,upvoted_followees,voting,review_info,is_labeled,label_info;data[*].author.badge[?(type=best_answerer)].topics',
'offset': '10',
'limit': '10',
'sort_by': 'voteups',
}
# 发送请求,并把响应内容赋值到变量res里面
res = requests.get(url, headers=headers, params=params)
# 确认请求成功,即这个response对象状态正确
print(res.status_code)
# 用json()方法解析response对象,并赋值给变量articles
articles = res.json()
# 打印这个json文件
print(articles)
# 取出键为data的值
data = articles['data']
# 遍历列表,拿到的是列表里的每一个元素,这些元素都是字典,再通过键把值取出来
for i in data:
print(i['title'])
print(i['url'])
print(i['excerpt'])
| 32.15625 | 292 | 0.739553 | 140 | 1,029 | 5.3 | 0.742857 | 0.024259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033659 | 0.104956 | 1,029 | 31 | 293 | 33.193548 | 0.771987 | 0.169096 | 0 | 0 | 0 | 0.157895 | 0.614929 | 0.324645 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0.263158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
bdeb63bd228672aa0d61f1e5f7d0335e8f073585 | 12,597 | py | Python | pykit/codegen/llvm/llvm_codegen.py | ContinuumIO/pyk | 1730d7b831e0cf12a641ac23b5cf03e17e0dc550 | [
"BSD-3-Clause"
] | 9 | 2015-06-23T00:13:49.000Z | 2022-02-23T02:46:43.000Z | pykit/codegen/llvm/llvm_codegen.py | ContinuumIO/pyk | 1730d7b831e0cf12a641ac23b5cf03e17e0dc550 | [
"BSD-3-Clause"
] | 1 | 2017-08-30T08:13:12.000Z | 2017-08-31T06:36:32.000Z | pykit/codegen/llvm/llvm_codegen.py | ContinuumIO/pyk | 1730d7b831e0cf12a641ac23b5cf03e17e0dc550 | [
"BSD-3-Clause"
] | 7 | 2015-05-08T10:17:47.000Z | 2021-04-01T15:00:57.000Z | from functools import partial
from pykit.ir import vvisit, ArgLoader, verify_lowlevel
from pykit.ir import defs, opgrouper
from pykit.types import Boolean, Integral, Real, Pointer, Function, Int64
from pykit.codegen.llvm.llvm_types import llvm_type
import llvm.core as lc
from llvm.core import Type, Constant
#===------------------------------------------------------------------===
# Definitions
#===------------------------------------------------------------------===
compare_float = {
'>': lc.FCMP_OGT,
'<': lc.FCMP_OLT,
'==': lc.FCMP_OEQ,
'>=': lc.FCMP_OGE,
'<=': lc.FCMP_OLE,
'!=': lc.FCMP_ONE,
}
compare_signed_int = {
'>': lc.ICMP_SGT,
'<': lc.ICMP_SLT,
'==': lc.ICMP_EQ,
'>=': lc.ICMP_SGE,
'<=': lc.ICMP_SLE,
'!=': lc.ICMP_NE,
}
compare_unsiged_int = {
'>': lc.ICMP_UGT,
'<': lc.ICMP_ULT,
'==': lc.ICMP_EQ,
'>=': lc.ICMP_UGE,
'<=': lc.ICMP_ULE,
'!=': lc.ICMP_NE,
}
compare_bool = {
'==' : lc.ICMP_EQ,
'!=' : lc.ICMP_NE
}
# below based on from npm/codegen
def integer_invert(builder, val):
return builder.xor(val, Constant.int_signextend(val.type, -1))
def integer_usub(builder, val):
return builder.sub(Constant.int(val.type, 0), val)
def integer_not(builder, value):
return builder.icmp(lc.ICMP_EQ, value, Constant.int(value.type, 0))
def float_usub(builder, val):
return builder.fsub(Constant.real(val.type, 0), val)
def float_not(builder, val):
return builder.fcmp(lc.FCMP_OEQ, val, Constant.real(val.type, 0))
binop_int = {
'+': (lc.Builder.add, lc.Builder.add),
'-': (lc.Builder.sub, lc.Builder.sub),
'*': (lc.Builder.mul, lc.Builder.mul),
'/': (lc.Builder.sdiv, lc.Builder.udiv),
'//': (lc.Builder.sdiv, lc.Builder.udiv),
'%': (lc.Builder.srem, lc.Builder.urem),
'&': (lc.Builder.and_, lc.Builder.and_),
'|': (lc.Builder.or_, lc.Builder.or_),
'^': (lc.Builder.xor, lc.Builder.xor),
'<<': (lc.Builder.shl, lc.Builder.shl),
'>>': (lc.Builder.ashr, lc.Builder.lshr),
}
binop_float = {
'+': lc.Builder.fadd,
'-': lc.Builder.fsub,
'*': lc.Builder.fmul,
'/': lc.Builder.fdiv,
'//': lc.Builder.fdiv,
'%': lc.Builder.frem,
}
unary_bool = {
'!': integer_not,
}
unary_int = {
'~': integer_invert,
'!': integer_not,
"+": lambda builder, arg: arg,
"-": integer_usub,
}
unary_float = {
'!': float_not,
"+": lambda builder, arg: arg,
"-": float_usub,
}
#===------------------------------------------------------------------===
# Utils
#===------------------------------------------------------------------===
i1, i16, i32, i64 = map(Type.int, [1, 16, 32, 64])
def const_int(type, value):
return Constant.int(type, value)
const_i32 = partial(const_int, i32)
const_i64 = partial(const_int, i64)
zero = partial(const_int, value=0)
one = partial(const_int, value=1)
def sizeof(builder, ty, intp):
ptr = Type.pointer(ty)
null = Constant.null(ptr)
offset = builder.gep(null, [Constant.int(Type.int(), 1)])
return builder.ptrtoint(offset, intp)
#===------------------------------------------------------------------===
# Translator
#===------------------------------------------------------------------===
class Translator(object):
"""
Translate a function in low-level form.
This means it can only use values of type Bool, Int, Float, Struct or
Pointer. Values of type Function may be called.
"""
def __init__(self, func, env, lfunc, llvm_typer, llvm_module):
self.func = func
self.env = env
self.lfunc = lfunc
self.llvm_type = llvm_typer
self.lmod = llvm_module
self.builder = None
self.phis = [] # [pykit_phi]
def blockswitch(self, newblock):
if not self.builder:
self.builder = lc.Builder.new(newblock)
self.builder.position_at_end(newblock)
# __________________________________________________________________
def op_arg(self, arg):
return self.lfunc.args[self.func.args.index(arg)]
# __________________________________________________________________
def op_unary(self, op, arg):
opmap = { Boolean: unary_bool,
Integral: unary_int,
Real: unary_float }[type(op.type)]
unop = defs.unary_opcodes[op.opcode]
return opmap[unop](self.builder, arg)
def op_binary(self, op, left, right):
binop = defs.binary_opcodes[op.opcode]
if op.type.is_int:
genop = binop_int[binop][op.type.unsigned]
else:
genop = binop_float[binop]
return genop(self.builder, left, right, op.result)
def op_compare(self, op, left, right):
cmpop = defs.compare_opcodes[op.opcode]
type = op.args[0].type
if type.is_int and type.unsigned:
cmp, lop = self.builder.icmp, compare_unsiged_int[cmpop]
elif type.is_int or type.is_bool:
cmp, lop = self.builder.icmp, compare_signed_int[cmpop]
else:
cmp, lop = self.builder.fcmp, compare_float[cmpop]
return cmp(lop, left, right, op.result)
# __________________________________________________________________
def op_convert(self, op, arg):
from llpython.byte_translator import LLVMCaster
unsigned = op.type.is_int and op.type.unsigned
# The float cast doens't accept this keyword argument
kwds = {'unsigned': unsigned} if unsigned else {}
return LLVMCaster.build_cast(self.builder, arg,
self.llvm_type(op.type), **kwds)
# __________________________________________________________________
def op_call(self, op, function, args):
# Get the callee LLVM function from the cache. This is put there by
# pykit.codegen.codegen
cache = self.env["codegen.cache"]
lfunc = cache[function]
return self.builder.call(lfunc, args)
def op_call_math(self, op, name, args):
# Math is resolved by an LLVM postpass
argtypes = [arg.type for arg in args]
lfunc_type = self.llvm_type(Function(op.type, argtypes))
lfunc = self.lmod.get_or_insert_function(
lfunc_type, 'pykit.math.%s.%s' % (map(str, argtypes), name.lower()))
return self.builder.call(lfunc, args, op.result)
# __________________________________________________________________
def op_getfield(self, op, struct, attr):
index = const_i32(op.type.names.index(attr))
return self.builder.extract_value(struct, index, op.result)
def op_setfield(self, op, struct, attr, value):
index = const_i32(op.type.names.index(attr))
return self.builder.insert_element(struct, value, index, op.result)
# __________________________________________________________________
def op_getindex(self, op, array, indices):
return self.builder.gep(array, indices, op.result)
def op_setindex(self, op, array, indices, value):
ptr = self.builder.gep(array, indices)
self.builder.store(ptr, value)
# __________________________________________________________________
def op_getindex(self, op, array, indices):
return self.builder.gep(array, indices, op.result)
# __________________________________________________________________
def op_alloca(self, op):
llvm_pointer_type = self.llvm_type(op.type)
return self.builder.alloca(llvm_pointer_type.pointee, op.result)
def op_load(self, op, stackvar):
return self.builder.load(stackvar, op.result)
def op_store(self, op, value, stackvar):
self.builder.store(value, stackvar)
# __________________________________________________________________
def op_jump(self, op, block):
self.builder.branch(block)
def op_cbranch(self, op, test, true_block, false_block):
self.builder.cbranch(test, true_block, false_block)
def op_phi(self, op):
phi = self.builder.phi(self.llvm_type(op.type), op.result)
self.phis.append(op)
return phi
def op_ret(self, op, value):
if value is None:
assert self.func.type.restype.is_void
self.builder.ret_void()
else:
self.builder.ret(value)
# __________________________________________________________________
def op_sizeof(self, op, type):
int_type = self.llvm_type(op.type)
item_type = self.llvm_type(type)
return sizeof(self.builder, item_type, int_type, op.result)
def op_addressof(self, op, func):
assert func.address
addr = const_int(i64, func.address)
return self.builder.inttoptr(addr, self.llvm_type(Pointer(func.type)))
# __________________________________________________________________
def op_ptradd(self, op, ptr, val):
return self.builder.gep(ptr, [val], op.result)
def op_ptrload(self, op, ptr):
return self.builder.load(ptr, op.result)
def op_ptrstore(self, op, ptr, val):
return self.builder.store(val, ptr, op.result)
def op_ptrcast(self, op, val):
return self.builder.bitcast(val, self.llvm_type(op.type), op.result)
def op_ptr_isnull(self, op, val):
intval = self.builder.ptrtoint(val, self.llvm_type(Int64))
return self.builder.icmp(lc.ICMP_EQ, intval, zero(intval.type), op.result)
# __________________________________________________________________
def allocate_blocks(llvm_func, pykit_func):
"""Return a dict mapping pykit blocks to llvm blocks"""
blocks = {}
for block in pykit_func.blocks:
blocks[block] = llvm_func.append_basic_block(pykit_func.name)
return blocks
def update_phis(phis, valuemap, argloader):
"""
Update LLVM phi values given a list of pykit phi values and block and
value dicts mapping pykit values to LLVM values
"""
for phi in phis:
llvm_phi = valuemap[phi.result]
llvm_blocks = map(argloader.load_op, phi.args[0])
llvm_values = map(argloader.load_op, phi.args[1])
for llvm_block, llvm_value in zip(llvm_blocks, llvm_values):
llvm_phi.add_incoming(llvm_value, llvm_block)
#===------------------------------------------------------------------===
# Pass to group operations such as add/mul
#===------------------------------------------------------------------===
class LLVMArgLoader(ArgLoader):
"""
Load Operation arguments as LLVM values passed and extra *args to the
Translator.
"""
def __init__(self, store, engine, llvm_module, lfunc, blockmap):
super(LLVMArgLoader, self).__init__(store)
self.engine = engine
self.llvm_module = llvm_module
self.lfunc = lfunc
self.blockmap = blockmap
def load_GlobalValue(self, arg):
if arg.external:
value = self.lmod.get_or_insert_function(llvm_type(arg.type))
if arg.address:
self.engine.add_global_mapping(value, arg.address)
else:
assert arg.value
value = arg.value.const
return value
def load_Block(self, arg):
return self.blockmap[arg]
def load_Constant(self, arg):
ty = type(arg.type)
lty = llvm_type(arg.type)
if ty == Pointer:
if arg.const == 0:
return lc.Constant.null(lty)
else:
return const_i64(arg.const).inttoptr(i64)
elif ty == Integral:
if arg.type.unsigned:
return lc.Constant.int(lty, arg.const)
else:
return lc.Constant.int_signextend(lty, arg.const)
elif ty == Real:
return lc.Constant.real(lty, arg.const)
else:
raise NotImplementedError("Constants for", ty)
def load_Undef(self, arg):
return lc.Constant.undef(llvm_type(arg.type))
def initialize(func, env):
verify_lowlevel(func)
llvm_module = env["codegen.llvm.module"]
return llvm_module.add_function(llvm_type(func.type), func.name)
def translate(func, env, lfunc):
engine, llvm_module = env["codegen.llvm.engine"], env["codegen.llvm.module"]
blockmap = allocate_blocks(lfunc, func)
### Create visitor ###
translator = Translator(func, env, lfunc, llvm_type, llvm_module)
visitor = opgrouper(translator)
### Codegen ###
argloader = LLVMArgLoader(None, engine, llvm_module, lfunc, blockmap)
valuemap = vvisit(visitor, func, argloader)
update_phis(translator.phis, valuemap, argloader)
return lfunc | 32.135204 | 82 | 0.618322 | 1,515 | 12,597 | 4.479208 | 0.179538 | 0.055113 | 0.024315 | 0.02682 | 0.220159 | 0.107722 | 0.062334 | 0.046124 | 0.037135 | 0.037135 | 0 | 0.004887 | 0.220291 | 12,597 | 392 | 83 | 32.135204 | 0.686011 | 0.166945 | 0 | 0.091954 | 0 | 0 | 0.016368 | 0 | 0 | 0 | 0 | 0 | 0.011494 | 1 | 0.168582 | false | 0 | 0.030651 | 0.061303 | 0.356322 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdebe8ab095dd6dc14338c32ad38db4e4ac43ada | 1,056 | py | Python | ads_dashboard/ads/models.py | vintage/ads_dashboard | 85a8540411a9af0a2e41dd3730b52e3c6b3805d4 | [
"MIT"
] | null | null | null | ads_dashboard/ads/models.py | vintage/ads_dashboard | 85a8540411a9af0a2e41dd3730b52e3c6b3805d4 | [
"MIT"
] | 5 | 2020-02-12T09:18:05.000Z | 2021-09-22T18:05:21.000Z | ads_dashboard/ads/models.py | vintage/ads_dashboard | 85a8540411a9af0a2e41dd3730b52e3c6b3805d4 | [
"MIT"
] | null | null | null | from django.db import models
class Campaign(models.Model):
name = models.CharField("name", unique=True, max_length=255)
class Meta:
verbose_name = "campaign"
verbose_name_plural = "campaigns"
def __str__(self):
return self.name
class DataSource(models.Model):
name = models.CharField("name", unique=True, max_length=255)
class Meta:
verbose_name = "data source"
verbose_name_plural = "data sources"
def __str__(self):
return self.name
class CampaignStats(models.Model):
date = models.DateField("date")
data_source = models.ForeignKey(DataSource, on_delete=models.CASCADE)
campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE)
clicks = models.IntegerField("clicks")
impressions = models.IntegerField("impressions")
class Meta:
verbose_name = "campaign stats"
verbose_name_plural = "campaign stats"
unique_together = (("date", "data_source", "campaign",),)
def __str__(self):
return f"Stats #{self.pk}"
| 26.4 | 73 | 0.673295 | 121 | 1,056 | 5.644628 | 0.338843 | 0.096633 | 0.070278 | 0.087848 | 0.360176 | 0.307467 | 0.307467 | 0.222548 | 0.222548 | 0.222548 | 0 | 0.007238 | 0.214962 | 1,056 | 39 | 74 | 27.076923 | 0.816647 | 0 | 0 | 0.37037 | 0 | 0 | 0.128788 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.037037 | 0.111111 | 0.740741 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 2 |
bdeef5ecb135e522f7c40abc5e24bd958b8ff052 | 1,859 | py | Python | DatabaseHandler/sqlite_operations.py | utkarsh7236/SCILLA | e11e4d753823ad522a1b3168283b6e6ffe3ea393 | [
"Apache-2.0"
] | 17 | 2019-12-09T19:09:07.000Z | 2021-08-29T01:11:13.000Z | DatabaseHandler/sqlite_operations.py | utkarsh7236/SCILLA | e11e4d753823ad522a1b3168283b6e6ffe3ea393 | [
"Apache-2.0"
] | 1 | 2021-04-14T15:08:18.000Z | 2021-04-14T15:08:18.000Z | DatabaseHandler/sqlite_operations.py | utkarsh7236/SCILLA | e11e4d753823ad522a1b3168283b6e6ffe3ea393 | [
"Apache-2.0"
] | 2 | 2020-06-05T03:01:06.000Z | 2020-07-09T07:13:12.000Z | #!/usr/bin/env python
__author__ = 'Florian Hase'
#========================================================================
import time
import sqlalchemy as sql
#========================================================================
class AddEntry(object):
def __init__(self, database, table, entry):
self.db = database
self.table = table
self.entry = entry
def execute(self):
start = time.time()
with self.db.connect() as conn:
conn.execute(self.table.insert(), self.entry)
conn.close()
end = time.time()
#========================================================================
class FetchEntries(object):
def __init__(self, database, table, selection, name = 'test'):
self.db = database
self.table = table
self.selection = selection
self.entries = None
self.executed = False
self.entries_fetched = False
self.name = name
def execute(self):
start = time.time()
with self.db.connect() as conn:
selected = conn.execute(self.selection)
entries = selected.fetchall()
conn.close()
self.entries = entries
self.executed = True
end = time.time()
def get_entries(self):
iteration_index = 0
while not self.executed:
pass
self.entries_fetched = True
return self.entries
#========================================================================
class UpdateEntries(object):
def __init__(self, database, table, updates):
self.db = database
self.table = table
self.updates = updates
def execute(self):
start = time.time()
if isinstance(self.updates, list):
with self.db.connect() as conn:
for updates in self.updates:
updated = conn.execute(updates)
conn.close()
else:
with self.db.connect() as conn:
updated = conn.execute(self.updates)
conn.close()
end = time.time()
| 23.833333 | 73 | 0.550834 | 201 | 1,859 | 4.995025 | 0.293532 | 0.041833 | 0.039841 | 0.067729 | 0.39741 | 0.35757 | 0.195219 | 0.099602 | 0.099602 | 0.099602 | 0 | 0.000669 | 0.196342 | 1,859 | 77 | 74 | 24.142857 | 0.671352 | 0.166218 | 0 | 0.418182 | 0 | 0 | 0.010356 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127273 | false | 0.018182 | 0.036364 | 0 | 0.236364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdf105752f21bbc068ce977d28dde3f6db125f50 | 8,818 | py | Python | main.py | omegaBionic/pysparkPower | 1354247e4ec085a65f288a1f31a05875f003da72 | [
"Apache-2.0"
] | null | null | null | main.py | omegaBionic/pysparkPower | 1354247e4ec085a65f288a1f31a05875f003da72 | [
"Apache-2.0"
] | null | null | null | main.py | omegaBionic/pysparkPower | 1354247e4ec085a65f288a1f31a05875f003da72 | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import pylab as pl
import pandas as pd
from pyspark import SQLContext
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType
from data.process_initial_file import dict_education, list_education, list_race
def elbow_method_evaluation(df):
# Calculate cost and plot
cost = np.zeros(10)
for k in range(2, 10):
kmeans = KMeans().setK(k).setSeed(1).setFeaturesCol("features")
model = kmeans.fit(df)
cost[k] = model.summary.trainingCost
# Plot the cost
df_cost = pd.DataFrame(cost[2:])
df_cost.columns = ["cost"]
new_col = [2, 3, 4, 5, 6, 7, 8, 9]
df_cost.insert(0, 'cluster', new_col)
pl.plot(df_cost.cluster, df_cost.cost)
pl.xlabel('Number of Clusters')
pl.ylabel('Score')
pl.title('Elbow Curve')
pl.show()
spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# Define information
nullable = True
schema = StructType([
StructField("age", IntegerType(), nullable),
StructField("workclass", IntegerType(), nullable),
StructField("fnlwgt", IntegerType(), nullable),
StructField("education", IntegerType(), nullable),
StructField("marital-status", IntegerType(), nullable),
StructField("occupation", IntegerType(), nullable),
StructField("relationship", IntegerType(), nullable),
StructField("race", IntegerType(), nullable),
StructField("sex", IntegerType(), nullable),
StructField("capital-gain", IntegerType(), nullable),
StructField("capital-loss", IntegerType(), nullable),
StructField("hours-per-week", IntegerType(), nullable),
StructField("native-country", IntegerType(), nullable),
StructField("is-upper-than-50k", IntegerType(), nullable)
])
# Connect to bdd
sqlContext = SQLContext(sparkContext=spark.sparkContext, sparkSession=spark)
# Read file
df = sqlContext.read.csv("data/adult_processed_data.data", header=True, sep=",", schema=schema)
# Display all columns
# print(df.collect())
# Display columns
print(df.columns)
# df.select("is-upper-than-50k").show()
df.select("*").show()
# Create features column, assembling together the numeric data
col1_name = 'education'
col2_name = 'capital-gain'
col3_name = 'race'
col4_name = 'hours-per-week'
inputCols = [col1_name, col2_name, col3_name]
vecAssembler = VectorAssembler(
inputCols=inputCols,
outputCol="features")
adults_with_features = vecAssembler.transform(df)
# Figure 1
# Do K-means
# Evaluate number of clusters with the elbow method
elbow_method_evaluation(adults_with_features)
k = 3
kmeans_algo = KMeans().setK(k).setSeed(1).setFeaturesCol("features")
model = kmeans_algo.fit(adults_with_features)
centers = model.clusterCenters()
# Assign clusters to adults
# Cluster prediction, named prediction and used after for color
adults_with_clusters = model.transform(adults_with_features)
# Display Centers
print("Centers: '{}'".format(centers))
# Convert Spark Data Frame to Pandas Data Frame
adults_for_viz = adults_with_clusters.toPandas()
print("STARTING PRINTING ADULTS_for")
print("adults_for_viz.prediction.value_counts(): '{}'".format(adults_for_viz.prediction.value_counts()))
# Vizualize
A = adults_for_viz[adults_for_viz["is-upper-than-50k"] == 0]
B = adults_for_viz[adults_for_viz["is-upper-than-50k"] == 1]
# Colors code k-means results, cluster numbers
colors = {0: 'red', 1: 'blue', 2: 'orange'}
# Draw dots
fig = plt.figure().add_subplot()
fig.scatter(A[col1_name],
A[col2_name],
c=A.prediction.map(colors),
marker='.')
fig.scatter(B[col1_name],
B[col2_name],
c=B.prediction.map(colors),
marker='x')
# Draw grid
plt.grid()
# Set text
plt.title("Combined Statistics 1")
plt.xlabel(col1_name)
plt.ylabel(col2_name)
# TODO To change in case col1_name is changed
plt.xticks(range(0, len(list_education)), list_education, rotation='vertical')
plt.legend(['is-upper-than-50k: False', 'is-upper-than-50k: True'])
# Save figure
plt.savefig("picture1.png", bbox_inches='tight')
# Show fig
plt.show()
# Figure 2
# Draw dots
fig = plt.figure().add_subplot()
fig.scatter(A[col1_name],
A[col2_name],
c=A.prediction.map(colors),
marker='.')
fig.scatter(B[col1_name],
B[col2_name],
c=B.prediction.map(colors),
marker='x')
# fig.set_yscale('log', base=2)
# Draw grid
plt.grid()
# Set text
plt.title("Combined Statistics 2")
plt.xlabel(col1_name)
plt.ylabel(col2_name)
plt.xticks(range(0, len(list_education)), list_education, rotation='vertical')
plt.legend(['is-upper-than-50k: False', 'is-upper-than-50k: True'])
# Save figure
plt.savefig("picture2.png", bbox_inches='tight')
# Show fig
plt.show()
# Figure 3
inputCols = [col2_name, col3_name]
vecAssembler = VectorAssembler(
inputCols=inputCols,
outputCol="features")
adults_with_features = vecAssembler.transform(df)
# Do K-means
k = 3
kmeans_algo = KMeans().setK(k).setSeed(1).setFeaturesCol("features")
model = kmeans_algo.fit(adults_with_features)
centers = model.clusterCenters()
# Assign clusters to flowers
# Cluster prediction, named prediction and used after for color
adults_with_clusters = model.transform(adults_with_features)
# Display Centers
print("Centers: '{}'".format(centers))
# Convert Spark Data Frame to Pandas Data Frame
adults_for_viz = adults_with_clusters.toPandas()
print("STARTING PRINTING ADULTS_for")
print("adults_for_viz.prediction.value_counts(): '{}'".format(adults_for_viz.prediction.value_counts()))
# Vizualize
A = adults_for_viz[adults_for_viz["is-upper-than-50k"] == 0]
B = adults_for_viz[adults_for_viz["is-upper-than-50k"] == 1]
# Colors code k-means results, cluster numbers
colors = {0: 'red', 1: 'blue', 2: 'orange'}
# Draw dots
fig = plt.figure().add_subplot()
fig.scatter(A[col3_name],
A[col2_name],
c=A.prediction.map(colors),
marker='.')
fig.scatter(B[col3_name],
B[col2_name],
c=B.prediction.map(colors),
marker='x')
# fig.set_yscale('log', base=2)
# Draw grid
plt.grid()
# Set text
plt.title("Combined Statistics 3")
plt.xlabel(col3_name)
plt.ylabel(col2_name)
plt.xticks(range(0, len(list_race)), list_race, rotation='vertical')
plt.legend(['is-upper-than-50k: False', 'is-upper-than-50k: True'])
# Save figure
plt.savefig("picture3.png", bbox_inches='tight')
# Show fig
plt.show()
# TODO PUT HERE
# Figure 4
inputCols = [col1_name, col3_name, col4_name]
vecAssembler = VectorAssembler(
inputCols=inputCols,
outputCol="features")
adults_with_features = vecAssembler.transform(df)
elbow_method_evaluation(adults_with_features)
# Do K-means
k = 3
kmeans_algo = KMeans().setK(k).setSeed(1).setFeaturesCol("features")
model = kmeans_algo.fit(adults_with_features)
centers = model.clusterCenters()
# Assign clusters to flowers
# Cluster prediction, named prediction and used after for color
adults_with_clusters = model.transform(adults_with_features)
# Display Centers
print("Centers: '{}'".format(centers))
# Convert Spark Data Frame to Pandas Data Frame
adults_for_viz = adults_with_clusters.toPandas()
print("STARTING PRINTING ADULTS_for")
print("adults_for_viz.prediction.value_counts(): '{}'".format(adults_for_viz.prediction.value_counts()))
# Vizualize
A = adults_for_viz[adults_for_viz["is-upper-than-50k"] == 0]
B = adults_for_viz[adults_for_viz["is-upper-than-50k"] == 1]
# Colors code k-means results, cluster numbers
colors = {0: 'red', 1: 'blue', 2: 'orange'}
# Draw dots
fig_3d = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel(col1_name)
ax.set_ylabel(col3_name)
ax.set_zlabel(col4_name)
ax.set_xticks(range(0, len(list_education)))
ax.set_xticklabels(list_education, rotation=90,
verticalalignment='baseline',
horizontalalignment='left')
ax.set_yticks(range(0, len(list_race)))
ax.set_yticklabels(list_race, rotation=-15,
verticalalignment='baseline',
horizontalalignment='left')
# Data for three-dimensional scattered points
ax.scatter3D(A[col1_name], A[col3_name], A[col4_name], c=A.prediction.map(colors), cmap='Greens', marker='.')
ax.scatter3D(B[col1_name], B[col3_name], B[col4_name], c=B.prediction.map(colors), cmap='Greens', marker='x')
# Save figure
plt.savefig("picture4.png", bbox_inches='tight')
plt.show()
# DEBUG: Display stats
print("k: '{}'".format(k))
print("A.prediction.value_counts(): '{}'".format(A.prediction.value_counts()))
print("B.prediction.value_counts(): '{}'".format(B.prediction.value_counts()))
| 29.790541 | 109 | 0.713087 | 1,203 | 8,818 | 5.075644 | 0.202826 | 0.035375 | 0.041271 | 0.0321 | 0.590894 | 0.587455 | 0.556666 | 0.556666 | 0.543727 | 0.522764 | 0 | 0.017317 | 0.142096 | 8,818 | 295 | 110 | 29.891525 | 0.789822 | 0.150488 | 0 | 0.536313 | 0 | 0 | 0.160409 | 0.031355 | 0 | 0 | 0 | 0.00339 | 0 | 1 | 0.005587 | false | 0 | 0.055866 | 0 | 0.061453 | 0.072626 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdf19052bf08ce92ba043372f3b11061a349f71e | 408 | py | Python | app/pages/urls.py | julianpetrich/jpdotcom | ba9dac9c86f6d15374da9a37aac68963e56bcd93 | [
"MIT"
] | null | null | null | app/pages/urls.py | julianpetrich/jpdotcom | ba9dac9c86f6d15374da9a37aac68963e56bcd93 | [
"MIT"
] | null | null | null | app/pages/urls.py | julianpetrich/jpdotcom | ba9dac9c86f6d15374da9a37aac68963e56bcd93 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import AboutView, ContactView, HomePageView, PrivacyView, TermsView
urlpatterns = [
path("", HomePageView.as_view(), name="home"),
path("about", AboutView.as_view(), name="about"),
path("contact", ContactView.as_view(), name="contact"),
path("terms", TermsView.as_view(), name="terms"),
path("privacy", PrivacyView.as_view(), name="privacy"),
]
| 34 | 79 | 0.688725 | 48 | 408 | 5.75 | 0.416667 | 0.108696 | 0.181159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129902 | 408 | 11 | 80 | 37.090909 | 0.777465 | 0 | 0 | 0 | 0 | 0 | 0.127451 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
bdf4b884e53e55033540d679eaf6e95f48c085d7 | 118 | py | Python | tests/test_crawler.py | Yotamho/nba-analytics | 13174040198d44aab035de58cf785bce6926958a | [
"MIT"
] | null | null | null | tests/test_crawler.py | Yotamho/nba-analytics | 13174040198d44aab035de58cf785bce6926958a | [
"MIT"
] | null | null | null | tests/test_crawler.py | Yotamho/nba-analytics | 13174040198d44aab035de58cf785bce6926958a | [
"MIT"
] | null | null | null | from nba_analytics.crawler import pbp_for_range
def test_crawler():
assert pbp_for_range(3, 2008, 2009) != None
| 19.666667 | 47 | 0.762712 | 19 | 118 | 4.421053 | 0.789474 | 0.142857 | 0.261905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09 | 0.152542 | 118 | 5 | 48 | 23.6 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
bdf5bfe6b045a2bc243a77cfba2030c81bcde42d | 3,781 | py | Python | src/open3DTool/visualizer.py | MobileRoboticsSkoltech/plane-segmentation-research | 0627512c4cb53326de1aabf815e755d9e4484c9c | [
"Apache-2.0"
] | 1 | 2021-10-15T08:18:55.000Z | 2021-10-15T08:18:55.000Z | src/open3DTool/visualizer.py | MobileRoboticsSkoltech/plane-segmentation-research | 0627512c4cb53326de1aabf815e755d9e4484c9c | [
"Apache-2.0"
] | 1 | 2021-11-18T16:37:28.000Z | 2021-11-18T16:37:28.000Z | src/open3DTool/visualizer.py | MobileRoboticsSkoltech/plane-segmentation-research | 0627512c4cb53326de1aabf815e755d9e4484c9c | [
"Apache-2.0"
] | null | null | null | from src.open3DTool.planeUtils import (
segment_points_on_plane_by_picked_points,
pick_points_utils,
)
from src.algorithmsForPointCloud.fileUtils import (
get_point_cloud_from_bin_file,
generate_labels_and_object_files,
)
from src.open3DTool.fileUtils import update_label_files
import numpy as np
import open3d as o3d
class Visualizer:
point_cloud = o3d.geometry.PointCloud()
path_to_pcd_file = ""
path_to_label_file = ""
path_to_object_file = ""
main_visualizer = o3d.visualization.VisualizerWithKeyCallback()
picked_indexes = []
distance = 0
pick_points_count = 3
def __init__(
self,
path_to_bin_file: str,
path_to_save_file_label: str,
path_to_save_file_object: str,
path_to_pcd_file: str,
distance: np.intc,
pick_points_count: np.intc,
):
self.point_cloud = get_point_cloud_from_bin_file(path_to_bin_file)
self.point_cloud.paint_uniform_color([0.51, 0.51, 0.51])
self.path_to_pcd_file = path_to_pcd_file
self.path_to_label_file = path_to_save_file_label
self.path_to_object_file = path_to_save_file_object
self.distance = distance
self.pick_points_count = pick_points_count
self.generate_label_files([])
def generate_label_files(self, indexes: list):
generate_labels_and_object_files(
len(self.point_cloud.points),
indexes,
self.path_to_label_file,
self.path_to_object_file,
)
def update_pcd_and_label_files(self, count_of_points: int, is_append_right: bool):
update_label_files(
self.point_cloud,
count_of_points,
self.path_to_pcd_file,
self.path_to_label_file,
self.path_to_object_file,
is_append_right,
)
def run(self):
self.main_visualizer = o3d.visualization.VisualizerWithKeyCallback()
self.main_visualizer.create_window()
self.main_visualizer.add_geometry(self.point_cloud)
self.set_hotkeys()
self.main_visualizer.run()
self.main_visualizer.destroy_window()
def set_hotkeys(self):
self.main_visualizer.register_key_callback(32, self.pick_points) # Space
self.main_visualizer.register_key_callback(
259, self.get_previous_snapshot
) # Backspace
def pick_points(self, visualizer):
indexes_of_points = pick_points_utils(self.point_cloud)
assert len(indexes_of_points) == self.pick_points_count
self.update_main_window_by_plane(indexes_of_points)
def get_previous_snapshot(self, visualizer):
if len(self.picked_indexes) == 0:
return
number_of_last_indexes = self.picked_indexes[-1]
self.picked_indexes = self.picked_indexes[:-1]
point_cloud_len = len(self.point_cloud.points)
last_indexes = [
i for i in range(point_cloud_len - number_of_last_indexes, point_cloud_len)
]
picked_cloud = self.point_cloud.select_by_index(last_indexes)
picked_cloud.paint_uniform_color([0.51, 0.51, 0.51])
self.point_cloud = picked_cloud + self.point_cloud.select_by_index(
last_indexes, invert=True
)
self.update_pcd_and_label_files(number_of_last_indexes, False)
visualizer.clear_geometries()
visualizer.add_geometry(self.point_cloud)
def update_main_window_by_plane(self, picked_points: list):
self.point_cloud, indexes = segment_points_on_plane_by_picked_points(
self.point_cloud, picked_points, self.distance
)
self.picked_indexes.append(len(indexes))
self.update_pcd_and_label_files(len(indexes), True)
self.run()
| 34.372727 | 87 | 0.691351 | 497 | 3,781 | 4.802817 | 0.197183 | 0.079598 | 0.076246 | 0.027231 | 0.455802 | 0.274822 | 0.150398 | 0.12191 | 0.12191 | 0.103058 | 0 | 0.012081 | 0.233801 | 3,781 | 109 | 88 | 34.688073 | 0.811874 | 0.003967 | 0 | 0.043011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010753 | 1 | 0.086022 | false | 0 | 0.053763 | 0 | 0.247312 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfa7f51aa6bca9797c581b745c48d3a51fc0b8d | 8,868 | py | Python | submission_utils.py | ameyagodbole/multihop_inference_explanation_regeneration | ab742433034b251a819b6eb898686530bd055cd0 | [
"MIT"
] | 7 | 2019-08-31T22:58:41.000Z | 2021-02-06T17:41:38.000Z | submission_utils.py | ameyagodbole/multihop_inference_explanation_regeneration | ab742433034b251a819b6eb898686530bd055cd0 | [
"MIT"
] | 2 | 2020-02-19T13:32:03.000Z | 2020-07-29T09:24:53.000Z | submission_utils.py | ameyagodbole/multihop_inference_explanation_regeneration | ab742433034b251a819b6eb898686530bd055cd0 | [
"MIT"
] | 1 | 2020-10-01T09:48:07.000Z | 2020-10-01T09:48:07.000Z | import argparse
import logging
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_distances
import torch
def create_predictions_file(questions_file, facts_file, examples_file, logits_file, pred_output_file,
mcq_choices="correct", write_debug_file=False):
"""
Utility to generate submission file from predictions (logits scores)
"""
df_questions = pd.read_csv(questions_file, sep='\t')
df_facts = pd.read_csv(facts_file, sep='\t').drop_duplicates(subset=["uid"], keep="first").reset_index()
examples = torch.load(examples_file)
logits = np.load(logits_file)
logit_1 = logits[:, 1] - logits[:, 0]
if write_debug_file:
f_tmp = open(pred_output_file + "-as-text", "w")
# Remove wrong choices
def remove_wrong_answer_choices(row, choices):
correct_choice = row["AnswerKey"]
option_start_loc = row["Question"].rfind("(A)")
split0, split1 = row["Question"][:option_start_loc], row["Question"][option_start_loc:]
if choices == "none":
return split0
if correct_choice == "A" and "(B)" in split1:
split0 += (split1[3:split1.rfind("(B)")])
elif correct_choice == "A":
split0 += (split1[3:])
elif correct_choice == "B" and "(C)" in split1:
split0 += (split1[split1.rfind("(B)") + 3:split1.rfind("(C)")])
elif correct_choice == "B":
split0 += (split1[split1.rfind("(B)") + 3:])
elif correct_choice == "C" and "(D)" in split1:
split0 += (split1[split1.rfind("(C)") + 3:split1.rfind("(D)")])
elif correct_choice == "C":
split0 += (split1[split1.rfind("(C)") + 3:])
elif correct_choice == "D" and "(E)" in split1:
split0 += (split1[split1.rfind("D)") + 3:split1.rfind("(E)")])
elif correct_choice == "D":
split0 += (split1[split1.rfind("D)") + 3:])
elif correct_choice == "E" and "(F)" in split1:
split0 += (split1[split1.rfind("(E)") + 3:split1.rfind("(F)")])
elif correct_choice == "E":
split0 += (split1[split1.rfind("(E)") + 3:])
else:
raise ValueError("Unhandled option type: {}".format(correct_choice))
return split0
if mcq_choices != "all":
df_questions["ProcessedQuestion"] = df_questions.apply(remove_wrong_answer_choices, 1,
choices=mcq_choices)
else:
df_questions["ProcessedQuestion"] = df_questions["Question"]
vectorizer = TfidfVectorizer().fit(df_questions['Question']).fit(df_facts['text'])
X_q = vectorizer.transform(df_questions['ProcessedQuestion'])
X_e = vectorizer.transform(df_facts['text'])
X_dist = cosine_distances(X_q, X_e)
idx_start = 0
predictions = []
prev_query = examples[0].text_a
for i, example in enumerate(examples):
if example.text_a == prev_query:
continue
qid = examples[idx_start].guid.split('###')[0]
q = df_questions.loc[df_questions["questionID"] == qid]
assert q["ProcessedQuestion"].item() == examples[idx_start].text_a
relevant_logits = logit_1[idx_start:i]
relevant_examples = examples[idx_start:i]
sorted_preds, sorted_examples = zip(*sorted(zip(relevant_logits, relevant_examples), key=lambda e: e[0],
reverse=True))
added_uids = set()
example_preds = []
for se in sorted_examples:
for fid in se.guid.split('###')[1:]:
if fid not in added_uids:
added_uids.add(fid)
example_preds.append('\t'.join([qid, fid]))
for dist_idx in np.argsort(X_dist[q.index.to_numpy()[0]]):
fid = df_facts.loc[dist_idx, "uid"]
if fid not in added_uids:
added_uids.add(fid)
example_preds.append('\t'.join([qid, fid]))
predictions.extend(example_preds)
if write_debug_file:
f_tmp.write(q["questionID"].item())
f_tmp.write('\n')
f_tmp.write(q["Question"].item())
f_tmp.write('\n')
f_tmp.write(q["ProcessedQuestion"].item())
f_tmp.write("\n*************\n")
for i_tmp in range(40):
f_tmp.write(sorted_examples[i_tmp].guid.split('###')[1:].__str__())
f_tmp.write(' Score:{:.3f}\n'.format(sorted_preds[i_tmp]))
f_tmp.write(sorted_examples[i_tmp].text_b.__str__())
f_tmp.write('\n')
f_tmp.write("*************\n")
for i_tmp in range(40):
f_tmp.write(df_facts.loc[df_facts["uid"] == example_preds[i_tmp].split('\t')[1], "text"].item())
f_tmp.write('\n')
f_tmp.write("*************\n")
for expl in q["explanation"].item().split(' '):
f_tmp.write(df_facts.loc[df_facts["uid"] == expl.split('|')[0], "text"].item())
f_tmp.write('\n')
f_tmp.write("*************\n")
prev_query = example.text_a
idx_start = i
qid = examples[idx_start].guid.split('###')[0]
q = df_questions.loc[df_questions["questionID"] == qid]
assert q["ProcessedQuestion"].item() == examples[idx_start].text_a
relevant_logits = logit_1[idx_start:]
relevant_examples = examples[idx_start:]
sorted_preds, sorted_examples = zip(*sorted(zip(relevant_logits, relevant_examples), key=lambda e: e[0],
reverse=True))
added_uids = set()
example_preds = []
for se in sorted_examples:
for fid in se.guid.split('###')[1:]:
if fid not in added_uids:
added_uids.add(fid)
example_preds.append('\t'.join([qid, fid]))
for dist_idx in np.argsort(X_dist[q.index.to_numpy()[0]]):
fid = df_facts.loc[dist_idx, "uid"]
if fid not in added_uids:
added_uids.add(fid)
example_preds.append('\t'.join([qid, fid]))
predictions.extend(example_preds)
if write_debug_file:
f_tmp.write(q["questionID"].item())
f_tmp.write('\n')
f_tmp.write(q["Question"].item())
f_tmp.write('\n')
f_tmp.write(q["ProcessedQuestion"].item())
f_tmp.write("\n*************\n")
for i_tmp in range(40):
f_tmp.write(sorted_examples[i_tmp].guid.split('###')[1:].__str__())
f_tmp.write(' Score:{:.3f}\n'.format(sorted_preds[i_tmp]))
f_tmp.write(sorted_examples[i_tmp].text_b.__str__())
f_tmp.write('\n')
f_tmp.write("*************\n")
for i_tmp in range(40):
f_tmp.write(df_facts.loc[df_facts["uid"] == example_preds[i_tmp].split('\t')[1], "text"].item())
f_tmp.write('\n')
f_tmp.write("*************\n")
for expl in q["explanation"].item().split(' '):
f_tmp.write(df_facts.loc[df_facts["uid"] == expl.split('|')[0], "text"].item())
f_tmp.write('\n')
f_tmp.write("*************\n")
f_tmp.close()
logging.info("Writing to file")
with open(pred_output_file, "w") as f:
f.write('\n'.join(predictions))
f.write('\n')
logging.info("len(df_questions)={}".format(len(df_questions)))
logging.info("len(predictions)={}".format(len(predictions)))
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--questions_file", type=str, required=True,
help="The tsv file containing the evaluation")
parser.add_argument("--facts_file", type=str, required=True,
help="The tsv file containing the common sense facts")
parser.add_argument("--examples_file", type=str, help="Examples file that is being evaluated")
parser.add_argument("--logits_file", type=str, help="Model predictions (liekly some file of the form *_preds.npy)")
parser.add_argument("--pred_output_file", type=str, required=True,
help="Name of the file where predictions will be written")
parser.add_argument("--mcq_choices", type=str, choices=['none', 'correct', 'all'], default="correct",
help="The choices to keep in the questions")
parser.add_argument("--write_debug_file", action='store_true')
args = parser.parse_args()
create_predictions_file(questions_file=args.questions_file, facts_file=args.facts_file,
examples_file=args.examples_file, logits_file=args.logits_file,
pred_output_file=args.pred_output_file, mcq_choices=args.mcq_choices,
write_debug_file=args.write_debug_file) | 45.948187 | 119 | 0.579725 | 1,130 | 8,868 | 4.309735 | 0.158407 | 0.029569 | 0.062834 | 0.036961 | 0.558111 | 0.482752 | 0.424641 | 0.424641 | 0.424641 | 0.424641 | 0 | 0.012739 | 0.256428 | 8,868 | 193 | 120 | 45.948187 | 0.725811 | 0.010149 | 0 | 0.491124 | 0 | 0 | 0.124501 | 0 | 0 | 0 | 0 | 0 | 0.011834 | 1 | 0.011834 | false | 0 | 0.04142 | 0 | 0.065089 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfb7f7d975d38d147cc79c67eb8466db9daf8e8 | 1,884 | py | Python | pysm/semantic_modeling/assembling/autolabel/auto_label.py | binh-vu/semantic-modeling | b387584502ba1daa6abd6b7573828416f6426b49 | [
"MIT"
] | 3 | 2019-10-31T15:26:20.000Z | 2022-03-03T06:04:03.000Z | pysm/semantic_modeling/assembling/autolabel/auto_label.py | binh-vu/semantic-modeling | b387584502ba1daa6abd6b7573828416f6426b49 | [
"MIT"
] | 1 | 2021-10-05T14:57:29.000Z | 2022-03-27T01:58:41.000Z | pysm/semantic_modeling/assembling/autolabel/auto_label.py | binh-vu/semantic-modeling | b387584502ba1daa6abd6b7573828416f6426b49 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from typing import Dict, Tuple, List, Set, Union, Optional
from data_structure import Graph
from semantic_modeling.assembling.autolabel.heuristic import preserved_structure_with_heuristic, get_gold_semantic_types
from semantic_modeling.assembling.autolabel.maxf1 import get_gold_triples, max_f1, max_f1_no_ambiguous
from semantic_modeling.assembling.autolabel.preserved_structure import preserved_structure
class AutoLabel:
@staticmethod
def auto_label_max_f1(gold_sm: Graph, pred_sm: Graph,
is_blurring_label: bool) -> Tuple[Dict[int, bool], Dict[int, Optional[int]], float]:
gold_triples = get_gold_triples(gold_sm, is_blurring_label)
return max_f1(gold_sm, pred_sm, is_blurring_label, gold_triples)
@staticmethod
def auto_label_max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool
) -> Tuple[Dict[int, bool], Dict[int, Optional[int]], float]:
gold_triples = get_gold_triples(gold_sm, is_blurring_label)
return max_f1_no_ambiguous(gold_sm, pred_sm, is_blurring_label, gold_triples)
@staticmethod
def auto_label_preserved_structure(gold_sm: Graph,
pred_sm: Graph) -> Tuple[Dict[int, bool], Dict[int, Optional[int]]]:
gold_triples = get_gold_triples(gold_sm, is_blurring_label=False)
return preserved_structure(gold_sm, pred_sm, gold_triples)
@staticmethod
def auto_label_preserved_structure_heuristic_fix(
gold_sm: Graph, pred_sm: Graph) -> Tuple[Dict[int, bool], Dict[int, Optional[int]]]:
gold_triples = get_gold_triples(gold_sm, is_blurring_label=False)
gold_stypes = get_gold_semantic_types(gold_sm)
return preserved_structure_with_heuristic(gold_sm, pred_sm, gold_triples, gold_stypes)
| 49.578947 | 120 | 0.728769 | 255 | 1,884 | 4.992157 | 0.196078 | 0.112333 | 0.094266 | 0.080126 | 0.699921 | 0.608013 | 0.536528 | 0.536528 | 0.480754 | 0.480754 | 0 | 0.005225 | 0.187367 | 1,884 | 37 | 121 | 50.918919 | 0.826257 | 0.02017 | 0 | 0.296296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0.185185 | 0 | 0.518519 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfb84bba555606efcd2d3ca97385378284beca7 | 8,203 | py | Python | tests/test_levdistresult.py | ZenulAbidin/bip39validator | b78f2db6f46b56b408eef3a51e921e96247a9b46 | [
"MIT"
] | 3 | 2021-02-11T20:37:56.000Z | 2021-06-11T03:29:15.000Z | tests/test_levdistresult.py | ZenulAbidin/bip39validator | b78f2db6f46b56b408eef3a51e921e96247a9b46 | [
"MIT"
] | 4 | 2020-10-04T23:11:08.000Z | 2020-12-23T00:32:52.000Z | tests/test_levdistresult.py | ZenulAbidin/bip39validator | b78f2db6f46b56b408eef3a51e921e96247a9b46 | [
"MIT"
] | null | null | null | from unittest import TestCase
from bip39validator import ValidationFailed
from bip39validator.BIP39WordList import BIP39WordList
levdist_gt2 = """brown
brpyt"""
levdist_le2 = """brow
brol"""
# Expected results *must* be in word alphabetical order.
class TestLevDistResult(TestCase):
def test_getwordpairs_eq(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [("brol", "brow")]
self.assertEqual(expected_res, res.getwordpairs_eq(1))
try:
res.getwordpairs_eq(2)
self.fail()
except AssertionError as e:
pass
def test_getlinepairs_eq(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(2,1)]
self.assertEqual(expected_res, res.getlinepairs_eq(1))
try:
res.getwordpairs_eq(0)
self.fail()
except AssertionError as e:
pass
def test_getwordpairs_lt(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [("brol", "brow")]
self.assertEqual(expected_res, res.getwordpairs_lt(2))
try:
res.getwordpairs_lt(0)
self.fail()
except AssertionError as e:
pass
def test_getlinepairs_lt(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(2, 1)]
self.assertEqual(expected_res, res.getlinepairs_lt(2))
try:
res.getlinepairs_lt(0)
self.fail()
except AssertionError as e:
pass
def test_getwordpairs_gt(self):
bip39 = BIP39WordList("levdist_gt2", string=levdist_gt2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [("brown", "brpyt")]
self.assertEqual(expected_res, res.getwordpairs_gt(2))
try:
res.getwordpairs_gt(0)
self.fail()
except AssertionError as e:
pass
def test_getlinepairs_gt(self):
bip39 = BIP39WordList("levdist_gt2", string=levdist_gt2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(1, 2)]
self.assertEqual(expected_res, res.getlinepairs_gt(2))
try:
res.getlinepairs_gt(0)
self.fail()
except AssertionError as e:
pass
def test_getwordpairs_list(self):
concat = "\n".join([levdist_le2]+["zzyzx"])
bip39 = BIP39WordList("levdist_concat", string=concat)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [("brol", "brow")]
self.assertEqual(expected_res, res.getwordpairs_list([1,2]))
for t in ["abc", [], ["a"], 0]:
try:
res.getwordpairs_list(t)
self.fail()
except AssertionError as e:
pass
def test_getlinepairs_list(self):
concat = "\n".join([levdist_le2]+["zzyzx"])
bip39 = BIP39WordList("levdist_concat", string=concat)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(2, 1)]
self.assertEqual(expected_res, res.getlinepairs_list([1,2]))
for t in ["abc", [], ["a"], 0]:
try:
res.getlinepairs_list(t)
self.fail()
except AssertionError as e:
pass
def test_getdist(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = 1
self.assertEqual(expected_res, res.getdist("brow", "brol"))
for t in [(1, "abc"), ("", "abc"), ("ABC", "abc"),
("abc", 1), ("abc", ""), ("abc", "ABC")]:
try:
res.getdist(*t)
self.fail()
except AssertionError as e:
pass
def test_getdist_all(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(("brol", "brow"), (2, 1), 1)]
self.assertEqual(expected_res, res.getdist_all("brow"))
for t in [1, "", "ABC"]:
try:
res.getdist_all(t)
self.fail()
except AssertionError as e:
pass
def test_getdist_all_eq(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(("brol", "brow"), (2, 1), 1)]
self.assertEqual(expected_res, res.getdist_all_eq("brow", 1))
for t in [1, "", "ABC"]:
try:
res.getdist_all_eq(t, 1)
self.fail()
except AssertionError as e:
pass
except KeyError as e:
pass
try:
res.getdist_all_eq("abc", 0)
self.fail()
except AssertionError as e:
pass
except KeyError as e:
pass
def test_getdist_all_lt(self):
bip39 = BIP39WordList("levdist_le2", string=levdist_le2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(("brol", "brow"), (2, 1), 1)]
self.assertEqual(expected_res, res.getdist_all_lt("brow", 2))
for t in [1, "", "ABC"]:
try:
res.getdist_all_lt(t, 1)
self.fail()
except AssertionError as e:
pass
except KeyError as e:
pass
try:
res.getdist_all_lt("abc", 0)
self.fail()
except AssertionError as e:
pass
except KeyError as e:
pass
def test_getdist_all_gt(self):
bip39 = BIP39WordList("levdist_gt2", string=levdist_gt2)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(("brpyt", "brown"), (2, 1), 3)]
self.assertEqual(expected_res, res.getdist_all_gt("brown", 2))
for t in [1, "", "ABC"]:
try:
res.getdist_all_gt(t, 1)
self.fail()
except AssertionError as e:
pass
try:
res.getdist_all_gt("abc", 0)
self.fail()
except AssertionError as e:
pass
except KeyError as e:
pass
def test_getdist_all_list(self):
concat = "\n".join([levdist_le2]+["zzyzx"])
bip39 = BIP39WordList("concat", string=concat)
try:
res = bip39.test_lev_distance(2)
except ValidationFailed as e:
res = e.status_obj
expected_res = [(("brol", "brow"), (2, 1), 1)]
self.assertEqual(expected_res, res.getdist_all_list("brow", [1]))
for t in [1, "", "ABC"]:
for u in ["abc", [], ["a"], 0]:
try:
res.getdist_all_list(t, u)
self.fail()
except AssertionError as e:
pass
except KeyError as e:
pass
| 33.076613 | 73 | 0.536633 | 922 | 8,203 | 4.590022 | 0.069414 | 0.026229 | 0.038043 | 0.112476 | 0.889887 | 0.884688 | 0.851371 | 0.825142 | 0.825142 | 0.804112 | 0 | 0.036539 | 0.356089 | 8,203 | 247 | 74 | 33.210526 | 0.764672 | 0.006583 | 0 | 0.734783 | 0 | 0 | 0.044311 | 0 | 0 | 0 | 0 | 0 | 0.134783 | 1 | 0.06087 | false | 0.1 | 0.013043 | 0 | 0.078261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
bdfb8afb236fa2a59d4614b476d34a5d38aae988 | 694 | py | Python | landscapes/scripts/convert_fitness_to_s.py | Peyara/Evolution-Counterdiabatic-Driving | e695fad703b2d339bed0013e5b4254ba2365c105 | [
"MIT"
] | 3 | 2020-08-24T20:24:41.000Z | 2020-08-26T02:16:16.000Z | landscapes/scripts/convert_fitness_to_s.py | hincz-lab/Evolution-Counterdiabatic-Driving | e695fad703b2d339bed0013e5b4254ba2365c105 | [
"MIT"
] | null | null | null | landscapes/scripts/convert_fitness_to_s.py | hincz-lab/Evolution-Counterdiabatic-Driving | e695fad703b2d339bed0013e5b4254ba2365c105 | [
"MIT"
] | null | null | null | import sys
import numpy as np
# This script takes in a file with fitness values separated by commas
# and converts the values to be s values (relative fitness as used in
# the model) instead.
# WARNING: Overwrites given file!
if len(sys.argv) < 2:
print("Usage: python convert_fitness_to_s.py [name of file to convert]")
data = []
# Read in fitness values
with open(sys.argv[1]) as infile:
data = [float(i.strip()) for i in infile.readline().split(",")]
# Do conversion
data = [np.format_float_positional(data[-1]/i - 1) if i != 0 else 10000000000000 for i in data]
# Write out s values
with open(sys.argv[1], "w") as outfile:
outfile.write(",".join([str(i) for i in data])) | 30.173913 | 95 | 0.695965 | 119 | 694 | 4.016807 | 0.546218 | 0.043933 | 0.037657 | 0.07113 | 0.09205 | 0.09205 | 0 | 0 | 0 | 0 | 0 | 0.035149 | 0.180115 | 694 | 23 | 96 | 30.173913 | 0.804921 | 0.353026 | 0 | 0 | 0 | 0 | 0.149321 | 0.052036 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfc1be607446f5c58bd0a13c161e7d2e69d78e2 | 4,629 | py | Python | test/test_ui/test_render.py | hrnciar/poezio | 12b8af11df35dda535412b0c02ba792890095a7d | [
"Zlib"
] | 50 | 2015-02-11T12:00:25.000Z | 2022-01-18T05:26:40.000Z | test/test_ui/test_render.py | hrnciar/poezio | 12b8af11df35dda535412b0c02ba792890095a7d | [
"Zlib"
] | 3 | 2017-11-27T20:55:42.000Z | 2020-03-20T18:05:53.000Z | test/test_ui/test_render.py | hrnciar/poezio | 12b8af11df35dda535412b0c02ba792890095a7d | [
"Zlib"
] | 15 | 2015-04-22T14:33:36.000Z | 2021-09-29T21:33:50.000Z | import pytest
from contextlib import contextmanager
from datetime import datetime
from poezio.theming import get_theme
from poezio.ui.render import build_lines, Line, write_pre
from poezio.ui.types import BaseMessage, Message, StatusMessage, XMLLog
def test_simple_build_basemsg():
msg = BaseMessage(txt='coucou')
line = build_lines(msg, 100, True, 10)[0]
assert (line.start_pos, line.end_pos) == (0, 6)
def test_simple_render_message():
msg = Message(txt='coucou', nickname='toto')
line = build_lines(msg, 100, True, 10)[0]
assert (line.start_pos, line.end_pos) == (0, 6)
def test_simple_render_xmllog():
msg = XMLLog(txt='coucou', incoming=True)
line = build_lines(msg, 100, True, 10)[0]
assert (line.start_pos, line.end_pos) == (0, 6)
def test_simple_render_separator():
line = build_lines(None, 100, True, 10)[0]
assert line is None
def test_simple_render_status():
class Obj:
name = 'toto'
msg = StatusMessage("Coucou {name}", {'name': lambda: Obj.name})
assert msg.txt == "Coucou toto"
Obj.name = 'titi'
build_lines(msg, 100, True, 10)[0]
assert msg.txt == "Coucou titi"
class FakeBuffer:
def __init__(self):
self.text = ''
@contextmanager
def colored_text(self, *args, **kwargs):
yield None
def addstr(self, txt):
self.text += txt
@pytest.fixture(scope='function')
def buffer():
return FakeBuffer()
@pytest.fixture
def time():
return datetime.strptime('2019-09-27 10:11:12', '%Y-%m-%d %H:%M:%S')
def test_write_pre_basemsg(buffer):
str_time = '10:11:12'
time = datetime.strptime(str_time, '%H:%M:%S')
msg = BaseMessage(txt='coucou', time=time)
size = write_pre(msg, buffer, True, 10)
assert buffer.text == '10:11:12 '
assert size == len(buffer.text)
def test_write_pre_message_simple(buffer, time):
msg = Message(txt='coucou', nickname='toto', time=time)
size = write_pre(msg, buffer, True, 10)
assert buffer.text == '10:11:12 toto> '
assert size == len(buffer.text)
def test_write_pre_message_simple_history(buffer, time):
msg = Message(txt='coucou', nickname='toto', time=time, history=True)
size = write_pre(msg, buffer, True, 10)
assert buffer.text == '2019-09-27 10:11:12 toto> '
assert size == len(buffer.text)
def test_write_pre_message_highlight(buffer, time):
msg = Message(txt='coucou', nickname='toto', time=time, highlight=True)
size = write_pre(msg, buffer, True, 10)
assert buffer.text == '10:11:12 toto> '
assert size == len(buffer.text)
def test_write_pre_message_no_timestamp(buffer):
msg = Message(txt='coucou', nickname='toto')
size = write_pre(msg, buffer, False, 10)
assert buffer.text == 'toto> '
assert size == len(buffer.text)
def test_write_pre_message_me(buffer, time):
msg = Message(txt='/me coucou', nickname='toto', time=time)
size = write_pre(msg, buffer, True, 10)
assert buffer.text == '10:11:12 * toto '
assert size == len(buffer.text)
def test_write_pre_message_revisions(buffer, time):
msg = Message(txt='coucou', nickname='toto', time=time, revisions=5)
size = write_pre(msg, buffer, True, 10)
assert buffer.text == '10:11:12 toto5> '
assert size == len(buffer.text)
def test_write_pre_message_revisions_me(buffer, time):
msg = Message(txt='/me coucou', nickname='toto', time=time, revisions=5)
size = write_pre(msg, buffer, True, 10)
assert buffer.text == '10:11:12 * toto5 '
assert size == len(buffer.text)
def test_write_pre_message_ack(buffer, time):
ack = get_theme().CHAR_ACK_RECEIVED
expected = '10:11:12 %s toto> ' % ack
msg = Message(txt='coucou', nickname='toto', time=time, ack=1)
size = write_pre(msg, buffer, True, 10)
assert buffer.text == expected
assert size == len(buffer.text)
def test_write_pre_message_nack(buffer, time):
nack = get_theme().CHAR_NACK
expected = '10:11:12 %s toto> ' % nack
msg = Message(txt='coucou', nickname='toto', time=time, ack=-1)
size = write_pre(msg, buffer, True, 10)
assert buffer.text == expected
assert size == len(buffer.text)
def test_write_pre_xmllog_in(buffer):
msg = XMLLog(txt="coucou", incoming=True)
size = write_pre(msg, buffer, True, 10)
assert buffer.text == '%s IN ' % msg.time.strftime('%H:%M:%S')
assert size == len(buffer.text)
def test_write_pre_xmllog_out(buffer):
msg = XMLLog(txt="coucou", incoming=False)
size = write_pre(msg, buffer, True, 10)
assert buffer.text == '%s OUT ' % msg.time.strftime('%H:%M:%S')
assert size == len(buffer.text)
| 31.924138 | 76 | 0.669259 | 684 | 4,629 | 4.378655 | 0.141813 | 0.066778 | 0.04808 | 0.0601 | 0.696828 | 0.689816 | 0.611018 | 0.611018 | 0.601336 | 0.601336 | 0 | 0.039164 | 0.183625 | 4,629 | 144 | 77 | 32.145833 | 0.753374 | 0 | 0 | 0.321101 | 0 | 0 | 0.093109 | 0 | 0 | 0 | 0 | 0 | 0.275229 | 1 | 0.201835 | false | 0 | 0.055046 | 0.018349 | 0.302752 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
bdfcac80e4077fb1f2378b55cba1401431e2ffec | 1,099 | py | Python | eats/behave/driver_steps.py | Etiqa/eats | 8c8e2da93d0014f6fbb208185712c5526dba1174 | [
"BSD-2-Clause"
] | null | null | null | eats/behave/driver_steps.py | Etiqa/eats | 8c8e2da93d0014f6fbb208185712c5526dba1174 | [
"BSD-2-Clause"
] | 5 | 2021-03-18T21:34:44.000Z | 2022-03-11T23:35:23.000Z | eats/behave/driver_steps.py | Etiqa/eats | 8c8e2da93d0014f6fbb208185712c5526dba1174 | [
"BSD-2-Clause"
] | null | null | null | from behave import *
from hamcrest import *
from selenium.common.exceptions import RemoteDriverServerException
from eats.pyhamcrest import array_equal_to_by_key
from eats.utils.mapping import table_mapping
from ..users import Users
@when(u'I press "{key}" key')
@when(u'{user_name:Username} presses "{key}" key')
def step_impl(context, key, user_name=Users.DEFAULT_USERNAME):
user = context.users.get(user_name)
application = user.current_application
assert_that(
calling(application.driver.send_special_key).with_args(key),
not(raises(RemoteDriverServerException)),
"{unsupported} key is not supported".format(unsupported=key)
)
@then(u'{user_name:Username} should have "{name}" meta contents element')
def step_impl(context, user_name, name):
user = context.users.get(user_name)
application = user.current_application
contents = application.driver.get_metadata_elements_content_by_name(name)
keys = context.table.headings
assert_that(table_mapping(contents, keys=keys), array_equal_to_by_key(table_mapping(context.table), "content")) | 40.703704 | 115 | 0.767971 | 147 | 1,099 | 5.52381 | 0.408163 | 0.059113 | 0.029557 | 0.034483 | 0.189655 | 0.147783 | 0.147783 | 0.147783 | 0.147783 | 0.147783 | 0 | 0 | 0.128298 | 1,099 | 27 | 115 | 40.703704 | 0.847599 | 0 | 0 | 0.173913 | 0 | 0 | 0.148182 | 0 | 0 | 0 | 0 | 0 | 0.086957 | 1 | 0.086957 | false | 0 | 0.26087 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfcb80abeeba8ef801afb6b8c9b9a48834e2016 | 5,526 | py | Python | homebytwo/routes/utils.py | drixselecta/homebytwo | 29d26ce9f5586943e3b64c95aa4ce9ea7263bd10 | [
"MIT"
] | 7 | 2018-03-10T20:58:59.000Z | 2021-08-22T17:18:09.000Z | homebytwo/routes/utils.py | HomebyTwo/homebytwo | 29d26ce9f5586943e3b64c95aa4ce9ea7263bd10 | [
"MIT"
] | 69 | 2017-02-01T21:15:43.000Z | 2022-02-26T09:33:27.000Z | homebytwo/routes/utils.py | drixselecta/homebytwo | 29d26ce9f5586943e3b64c95aa4ce9ea7263bd10 | [
"MIT"
] | null | null | null | from collections import namedtuple
from itertools import accumulate, chain, islice, tee
from pathlib import Path
from django.contrib.gis.db.models.functions import Distance, LineLocatePoint
from django.contrib.gis.measure import D
from .fields import LineSubstring
from .models import ActivityType, Place
# named tuple to handle Urls
Link = namedtuple("Link", ["url", "text"])
GARMIN_ACTIVITY_TYPE_MAP = {
ActivityType.ALPINESKI: "resort_skiing_snowboarding",
ActivityType.BACKCOUNTRYSKI: "backcountry_skiing_snowboarding",
ActivityType.ELLIPTICAL: "elliptical",
ActivityType.HANDCYCLE: "cycling",
ActivityType.HIKE: "hiking",
ActivityType.ICESKATE: "skating",
ActivityType.INLINESKATE: "skating",
ActivityType.NORDICSKI: "cross_country_skiing",
ActivityType.RIDE: "cycling",
ActivityType.ROCKCLIMBING: "rock_climbing",
ActivityType.ROWING: "rowing",
ActivityType.RUN: "running",
ActivityType.SNOWBOARD: "resort_skiing_snowboarding",
ActivityType.SNOWSHOE: "hiking",
ActivityType.STAIRSTEPPER: "fitness_equipment",
ActivityType.STANDUPPADDLING: "stand_up_paddleboarding",
ActivityType.SWIM: "swimming",
ActivityType.VIRTUALRIDE: "cycling",
ActivityType.VIRTUALRUN: "running",
ActivityType.WALK: "walk",
ActivityType.WEIGHTTRAINING: "fitness_equipment",
ActivityType.WORKOUT: "strength_training",
}
def get_image_path(instance, filename):
"""
callable to define the image upload path according
to the type of object: segment, route, etc.. as well as
the id of the object.
"""
return Path("images", instance.__class__.__name__, str(instance.id), filename)
def current_and_next(some_iterable):
"""
use itertools to make current and next item of an iterable available:
http://stackoverflow.com/questions/1011938/python-previous-and-next-values-inside-a-loop
used to 'create_segments_from_checkpoints'.
"""
items, nexts = tee(some_iterable, 2)
nexts = chain(islice(nexts, 1, None), [None])
return zip(items, nexts)
def create_segments_from_checkpoints(checkpoints, start=0, end=1):
"""
returns a list of segments as tuples with start and end locations
along the original line.
"""
# sorted list of line_locations from the list of places as
# well as the start and the end location of the segment where
# the places were found.
line_locations = chain(
[start], [checkpoint.line_location for checkpoint in checkpoints], [end]
)
# use the custom iterator, exclude segments where start and end
# locations are the same. Also exclude segment where 'nxt == None`.
segments = [
(crt, nxt)
for crt, nxt in current_and_next(line_locations)
if crt != nxt and nxt
]
return segments
def get_places_from_segment(segment, line, max_distance):
"""
find places within the segment of a line and annotate them with
the line location on the original line.
"""
start, end = segment
# create the Linestring geometry
subline = LineSubstring(line, start, end)
# find places within max_distance of the linestring
places = get_places_from_line(subline, max_distance)
# iterate over found places to change the line_location
# from the location on the segment to the location on
# the original linestring.
for place in places:
# relative line location to the start point of the subline
length = place.line_location * (end - start)
# update attribute with line location on the original line
place.line_location = start + length
return places
def get_places_from_line(line, max_distance):
"""
returns places within a max_distance of a Linestring Geometry
ordered by, and annotated with the `line_location` and the
`distance_from_line`:
* `line_location` is the location on the line expressed as a
float between 0.0 and 1.0.
* `distance_from_line` is a geodjango Distance object.
"""
# convert max_distance to Distance object
max_d = D(m=max_distance)
# find all places within max distance from line
places = Place.objects.filter(geom__dwithin=(line, max_d))
# annotate with distance to line
places = places.annotate(distance_from_line=Distance("geom", line))
# annotate with location along the line between 0 and 1
places = places.annotate(line_location=LineLocatePoint(line, "geom"))
# remove start and end places within 1% of start and end location
places = places.filter(
line_location__gt=0.01,
line_location__lt=0.99,
)
# sort in order of appearance along the line
places = places.order_by("line_location")
return places
def get_places_within(point, max_distance=100):
# make range a distance object
max_d = D(m=max_distance)
# get places within range
places = Place.objects.filter(geom__distance_lte=(point, max_d))
# annotate with distance
places = places.annotate(distance_from_line=Distance("geom", point))
# sort by distance
places = places.order_by(
"distance_from_line",
)
return places
def get_distances(points):
"""
Return a list with the distance of each point relative to the previous one in the list.
"""
def get_relative_distances():
if points:
yield 0
yield from (p2.distance(p1) for p1, p2 in zip(points[1:], points))
return list(accumulate(get_relative_distances()))
| 31.758621 | 92 | 0.707926 | 711 | 5,526 | 5.35865 | 0.298172 | 0.040945 | 0.025197 | 0.016535 | 0.096588 | 0.056693 | 0.04147 | 0.04147 | 0 | 0 | 0 | 0.00755 | 0.209012 | 5,526 | 173 | 93 | 31.942197 | 0.864104 | 0.344734 | 0 | 0.061728 | 0 | 0 | 0.097864 | 0.0306 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098765 | false | 0 | 0.08642 | 0 | 0.271605 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfe275d909128740904498e8c3a21dcaa2bafb4 | 263 | py | Python | aureaSym.py | osmartormena/introMATLAB | 6e505a17d6666d92b4502eff746f4b4dcdcd3c1c | [
"CC0-1.0"
] | null | null | null | aureaSym.py | osmartormena/introMATLAB | 6e505a17d6666d92b4502eff746f4b4dcdcd3c1c | [
"CC0-1.0"
] | null | null | null | aureaSym.py | osmartormena/introMATLAB | 6e505a17d6666d92b4502eff746f4b4dcdcd3c1c | [
"CC0-1.0"
] | null | null | null | # Cálculo da razão áurea (phi)
import sympy
d = 20
phi = sympy.symbols('phi', nonnegative=True)
eqn = sympy.Eq(1/phi, phi - 1)
sol = sympy.solve(eqn)
sympy.pprint(sol)
phiAprox = sympy.N(sol[0], d)
print('Para ', d, ' dígitos significativos, ϕ = ', phiAprox)
| 18.785714 | 60 | 0.669202 | 42 | 263 | 4.190476 | 0.619048 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022727 | 0.163498 | 263 | 13 | 61 | 20.230769 | 0.777273 | 0.106464 | 0 | 0 | 0 | 0 | 0.158798 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdfe32d084754eda156889373513889dc3a4c1f0 | 15,732 | py | Python | core/src/structs_classes/extract_structs.py | azurlane-doujin/AzurLanePaintingExtract-v1.0 | ef4f25e70b3ca1b9df4304132cc7612c8f5efebb | [
"MIT"
] | 144 | 2019-06-13T06:43:43.000Z | 2022-03-29T15:07:57.000Z | core/src/structs_classes/extract_structs.py | Shabi1213/AzurLanePaintingExtract-v1.0 | ef4f25e70b3ca1b9df4304132cc7612c8f5efebb | [
"MIT"
] | 2 | 2020-08-02T15:08:58.000Z | 2021-11-29T02:34:18.000Z | core/src/structs_classes/extract_structs.py | Shabi1213/AzurLanePaintingExtract-v1.0 | ef4f25e70b3ca1b9df4304132cc7612c8f5efebb | [
"MIT"
] | 19 | 2020-03-01T10:06:52.000Z | 2022-02-06T13:49:26.000Z | import collections
import os
import re
import time
from itertools import filterfalse
import wx
from core.src.static_classes.static_data import GlobalData
from core.src.structs_classes.basic_class import BasicInfo, BasicInfoList
class PerInfo(BasicInfo):
def __init__(self, name, val, has_cn):
super(PerInfo, self).__init__(name, val)
self.sub_data = 1
self.tex_step = 2
self.mesh_step=2
self.data = GlobalData()
# tree储存结构组
self._tex_path = "Empty"
self.more_tex = ["Empty"]
self._mesh_path = "Empty"
self.more_mesh = ["Empty"]
# 目标文件位置
self.lay_in = ""
# 是否可以使用还原
self._is_able_work = False
# 导出目标位置
self._save_path: str = ""
# 中文名称
self.cn_name = val
self.has_cn = has_cn
# 父组件
self.parent = None
self.must_able = False
# tree ID
self.key = ...
self.tree_ID = ...
self.tex_id = ...
self.more_tex_per_id = []
self.mesh_id = ...
self.more_mesh_per_id = []
self.action_group = [
"independent",
"face_match",
"atlas_split",
"set_able",
"split_only",
"remove_item",
"sprite_spilt"
]
# 是否以中文保存
self._is_save_as_cn = True
def __contains__(self, item):
if self.name in item or self.cn_name in item:
return True
else:
return False
@property
def is_able_work(self):
if self.must_able:
return True
else:
return self._is_able_work
@property
def tex_path(self):
return self._tex_path
@tex_path.setter
def tex_path(self, value):
self._tex_path = value
self._is_able_work = self.is_able()
@property
def mesh_path(self):
return self._mesh_path
@mesh_path.setter
def mesh_path(self, value):
self._mesh_path = value
self._is_able_work = self.is_able()
@property
def save_path(self):
return self._save_path
@save_path.setter
def save_path(self, value):
if self._is_save_as_cn:
self._save_path = os.path.join(value, self.cn_name + ".png")
else:
self._save_path = os.path.join(value, self.name + ".png")
@property
def is_save_as_cn(self):
return self._is_save_as_cn
@is_save_as_cn.setter
def is_save_as_cn(self, value):
if isinstance(value, bool):
self._is_save_as_cn = value
@staticmethod
def is_def(val):
return bool(val)
def get_is_able_work(self):
return self._is_able_work
def is_able(self):
if os.path.isfile(self.tex_path) and os.path.isfile(self.mesh_path):
return True
else:
return False
def transform_able(self):
self.must_able = not self.must_able
def set_single_path(self, path):
self._save_path = path
def append_item_tree(self, tree: wx.TreeCtrl):
# 名称
self.key = key = tree.AppendItem(self.tree_ID, f"名称:{self.cn_name}")
if self.is_able_work:
tree.SetItemTextColour(key, wx.Colour(253, 86, 255))
tree.AppendItem(self.tree_ID, f"索引名称:{self.name}")
# texture
self.tex_id = tree.AppendItem(self.tree_ID, f"Texture文件路径:{self.tex_path}")
more_tex_id = tree.AppendItem(self.tree_ID, f"其他Texture路径({len(self.more_tex)})")
for each_path in self.more_tex:
val = tree.AppendItem(more_tex_id, each_path)
self.more_tex_per_id.append(val)
# mesh
self.mesh_id = tree.AppendItem(self.tree_ID, f"Mesh文件路径:{self.mesh_path}")
more_mesh_id = tree.AppendItem(self.tree_ID, f"其他Mesh路径({len(self.more_mesh)})")
for each_path in self.more_mesh:
val = tree.AppendItem(more_mesh_id, each_path)
self.more_mesh_per_id.append(val)
action_root = tree.AppendItem(self.tree_ID, "功能按键")
# 功能键
independent = self.action_group[self.data.at_independent] = tree.AppendItem(action_root, "将当前的组合独立")
tree.SetItemTextColour(independent, wx.Colour(255, 0, 166))
face_match = self.action_group[self.data.at_face_match] = tree.AppendItem(action_root, "为当前立绘添加附加表情")
tree.SetItemTextColour(face_match, wx.Colour(0, 16, 166))
atlas_spilt = self.action_group[self.data.at_atlas_split] = tree.AppendItem(action_root, "进行Q版小人切割")
tree.SetItemTextColour(atlas_spilt, wx.Colour(140, 0, 166))
sprite_spilt = self.action_group[self.data.at_sprite_split] = tree.AppendItem(action_root, "进行Sprite切割 ")
tree.SetItemTextColour(sprite_spilt, wx.Colour(248, 40, 255))
set_able = self.action_group[self.data.at_set_able] = tree.AppendItem(action_root,
f"强制转换为可还原状态【当前{self.must_able}】")
tree.SetItemTextColour(set_able, wx.Colour(255, 177, 166))
split_only = self.action_group[self.data.at_split_only] = tree.AppendItem(action_root, "仅进行立绘还原切割 ")
tree.SetItemTextColour(split_only, wx.Colour(248, 66, 255))
remove_item = self.action_group[self.data.at_remove_item] = tree.AppendItem(action_root, "删除该元素 ")
tree.SetItemTextColour(remove_item, wx.Colour(248, 0, 255))
def append_to_tree(self, tree: wx.TreeCtrl, tree_root: wx.TreeItemId):
"""
添加到树,构建tree列表
:param tree: tree 对象
:param tree_root: 根id
:return:
"""
self.more_mesh_per_id.clear()
self.more_tex_per_id.clear()
self.tree_ID = tree.AppendItem(tree_root, self.cn_name)
self.append_item_tree(tree)
def get_select(self, type_is: bool):
"""
获取选中的列表
:param type_is: true :texture,false:mesh
:return: list,选中的列表
"""
if type_is:
return self.more_tex
else:
return self.more_mesh
# 路径设置相关
def set_tex(self, index):
self.tex_path = self.more_tex[index]
return self.tex_id, f"Texture文件路径:{self.tex_path}"
def set_mesh(self, index):
self.mesh_path = self.more_mesh[index]
return self.mesh_id, f"Mesh文件路径:{self.mesh_path}"
def add_save(self, path):
self.save_path = path
def clear_tex(self):
self.tex_id, self.more_tex, self.tex_path, self.more_tex_per_id = None, [], "Empty", []
def clear_mesh(self):
self.mesh_id, self.more_mesh, self.mesh_path, self.more_mesh_per_id = None, [], "Empty", []
def build_sub(self, value_type, file_type, index):
"""
从自身的treeid中寻找目标
:param value_type:
:param file_type:
:param index:
:return:
"""
val = PerInfo(self.name, self.val, self.has_cn)
if value_type == self.data.td_single:
if file_type == self.data.td_texture_type:
val.tex_path = self.tex_path
elif file_type == self.data.td_mesh_type:
val.mesh_path = self.mesh_path
elif value_type == self.data.td_list_item:
if file_type == self.data.td_texture_type:
val.tex_path = self.more_tex[index]
elif file_type == self.data.td_mesh_type:
val.mesh_path = self.more_mesh[index]
return os.path.isfile(val.tex_path), val
def independent(self, name, tree, tree_root):
# 独立
target = PerInfo(name, f"{self.val}-# {self.sub_data}", self.has_cn)
target.tex_path = self.tex_path
target.mesh_path = self.mesh_path
target.append_to_tree(tree, tree_root)
self.parent[target.name] = target
self.sub_data += 1
class PerWorkList(BasicInfoList):
def __init__(self, item: collections.abc.Iterable = None, mesh_match=None, texture_match=None,
is_ignore_case=False):
super(PerWorkList, self).__init__(item)
self.is_ignore_case = is_ignore_case
self.texture_match = texture_match
self.mesh_match = mesh_match
self.data = GlobalData()
# 显示部分
def show_in_tree(self, tree, tree_root):
list(map(lambda x: self._info_dict[x].append_to_tree(tree, tree_root), self._key_list))
def append(self, name, cn_name, has_cn):
value = PerInfo(name, cn_name, has_cn)
self[value.name] = value
return value
def remove(self, item: collections.abc.Iterable):
return PerWorkList(super(PerWorkList, self).remove(item))
# 查找部分
def find_by_id(self, id):
values = list(filter(lambda x: self._info_dict[x].tree_ID == id, self._key_list))
if values.__len__() == 0:
return False, None
return True, self[values[0]]
def find_in_each(self, id) -> (bool, bool, bool, int, PerInfo):
"""
从每一个中寻找指定id
:param id:
:return: (是否成功,类型【单个True,列表False】,类型[tex(True),mesh(False)],索引,对象本身)
"""
target = None
for value in self:
# 如果id为以下的部分,进入
if id == value.tex_id == id or id in value.more_tex_per_id or value.mesh_id == id or \
id in value.more_mesh_per_id:
target = value
if target is None:
return False, False, False, -1, None
if id == target.tex_id:
return True, self.data.td_single, self.data.td_texture_type, 0, target
elif id == target.mesh_id:
return True, self.data.td_single, self.data.td_mesh_type, 0, target
elif id in target.more_tex_per_id:
return True, self.data.td_list_item, self.data.td_texture_type, target.more_tex_per_id.index(id), target
elif id in target.more_mesh_per_id:
return True, self.data.td_list_item, self.data.td_mesh_type, target.more_mesh_per_id.index(id), target
def find_action(self, id) -> (bool, int, PerInfo):
"""
查找是否为特殊动作按键
:param id:
:return: 是否成功【true/false】,动作类型,作用目标
"""
target = None
for value in self:
# 如果id为以下的部分,进入
if id in value.action_group:
target = value
break
if target is None:
return False, -1, target
else:
index = target.action_group.index(id)
return True, index, target
# 添加部分
def set_tex(self, value, name=None):
"""
添加贴图
:param name: [可选]新添加的texture地址的指向项目名称,为None会根据value获取
:param value: 新添加的texture地址
:return:
"""
has_ = False
if isinstance(value, str) and os.path.isfile(value):
if name is not None:
key = name
else:
key = os.path.splitext(os.path.basename(value))[0]
if re.match(r'.+\s#\d+\.png', value, re.IGNORECASE):
has_ = True
key = re.split(r'\s#\d+(\[alpha\])?$', key)[0]
# 赋值过程
val: PerInfo = self._info_dict[key]
if value not in val.more_tex:
val.more_tex.append(value)
lower_path = os.path.split(value)[0].lower()
# 如果非空考虑优先级
if 0 < val.tex_step and lower_path.endswith(self.texture_match[0]):
val.tex_path = value
val.tex_step = 0
elif 1 < val.tex_step and lower_path.endswith(self.texture_match[1]):
val.tex_path = value
val.tex_step = 1
else:
val.tex_path = value
val.tex_step = 2
if not has_:
val.tex_path = value
def set_mesh(self, value, name=None):
"""
添加mesh网格
:param name: [可选]新添加的mesh地址的指向项目名称,为None会根据value获取
:param value: 新添加的mesh地址
:return:
"""
has_ = False
if isinstance(value, str) and os.path.isfile(value):
if name is not None:
key = name
else:
key = os.path.splitext(os.path.basename(value))[0]
if re.match(r'.+\s#\d+\.obj', value, re.IGNORECASE):
has_ = True
key = re.split(r'\s#\d+(\[alpha\])?$', key)[0]
val: PerInfo = self._info_dict[key]
if value not in val.more_mesh:
val.more_mesh.append(value)
lower_path = os.path.split(value)[0].lower()
# 如果非空考虑优先级
if 0 < val.mesh_step and lower_path.endswith(self.mesh_match[0]):
val.mesh_path = value
val.mesh_step = 0
elif 1 < val.mesh_step and lower_path.endswith(self.mesh_match[1]):
val.mesh_path = value
val.mesh_step = 1
else:
val.mesh_path = value
val.mesh_step = 2
if not has_:
val.mesh_path = value
def append_name(self, name, names: dict, *, has_cn=False):
"""
添加新对象
:param names: 预设键-值对应组
:param name: 对象索引key
:param has_cn: 对象是否有中文名
:return:
"""
# if name == "unknown4":
# print(name)
if self.is_ignore_case:
name=name.lower()
if name not in self._key_list:
if name not in names.keys():
has_cn = False
target_cn = name
else:
has_cn = True
target_cn = names[name]
# 如果中文名为空,也认为没有中文名
if target_cn == "":
target_cn = name
has_cn = False
value = PerInfo(name, target_cn, has_cn)
value.parent = self
self[name] = value
return name
else:
return name
# 清空部分
def clear_mesh(self):
list(map(lambda x: x.clear_mesh(), self))
def clear_tex(self):
list(map(lambda x: x.clear_tex(), self))
# 生成部分
def build_able(self):
val = filter(lambda x: x.get_is_able_work(), self)
value = PerWorkList(val)
return value
def build_unable(self):
val = filterfalse(lambda x: x.get_is_able_work(), self)
value = PerWorkList(val)
return value
def build_search(self):
val = map(lambda x: f"{x.name}{x.cn_name}", self)
return list(val)
def build_filter(self):
val = map(lambda x: f"{x.name}", self)
val = list(enumerate(list(val), 0))
return val
def build_skip(self, filename):
filename = list(map(lambda x: os.path.splitext(os.path.basename(x))[0], filename))
val = filter(lambda x: x in filename, self)
return PerWorkList(val)
def build_from_indexes(self, indexes):
val = map(lambda x: self[x], indexes)
value = PerWorkList(val)
return value
def build_from_pattern(self, pattern):
val = list(filter(lambda x: re.match(pattern, list(x)[1]), self.build_filter()))
val = list(zip(*val))
if len(val) == 2:
return self.build_from_indexes(val[0])
else:
return PerWorkList()
| 33.189873 | 117 | 0.557335 | 1,992 | 15,732 | 4.165161 | 0.11496 | 0.022177 | 0.016874 | 0.010124 | 0.42883 | 0.343136 | 0.252139 | 0.191877 | 0.164156 | 0.164156 | 0 | 0.009229 | 0.3388 | 15,732 | 473 | 118 | 33.260042 | 0.788406 | 0.054475 | 0 | 0.267913 | 0 | 0 | 0.037146 | 0.014171 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140187 | false | 0 | 0.024922 | 0.021807 | 0.28972 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdff65087e9d7a27500aa847fc385ea3b6c07441 | 3,950 | py | Python | scbw_mq/tournament/benchmark/storage.py | Games-and-Simulations/sc-mq | f9ae798948add7fd84b77d75ca26ade94620f84e | [
"MIT"
] | 2 | 2018-05-10T18:10:28.000Z | 2018-05-13T18:14:33.000Z | scbw_mq/tournament/benchmark/storage.py | Games-and-Simulations/sc-mq | f9ae798948add7fd84b77d75ca26ade94620f84e | [
"MIT"
] | 1 | 2019-09-20T14:14:49.000Z | 2019-09-20T14:14:49.000Z | scbw_mq/tournament/benchmark/storage.py | Games-and-Simulations/sc-mq | f9ae798948add7fd84b77d75ca26ade94620f84e | [
"MIT"
] | null | null | null | import logging
import os
import shutil
from os.path import exists
from typing import Optional
from scbw.map import check_map_exists
from scbw.player import check_bot_exists
from scbw.utils import download_extract_zip
from ...utils import read_lines
logger = logging.getLogger(__name__)
class BenchmarkException(Exception):
pass
class RerunningBenchmarkException(BenchmarkException):
pass
class Benchmark:
bot_file: str
map_file: str
elo_file: str
repeat_games: int
bot_dir: str
map_dir: str
result_dir: str
def check_structure(self):
if not exists(f"{self.bot_file}"):
raise BenchmarkException(f"Bot file cannot be found in {self.bot_file}")
if not exists(self.map_file):
raise BenchmarkException(f"Map file cannot be found in {self.map_file}")
if not exists(self.elo_file):
raise BenchmarkException(f"Elo file cannot be found in {self.elo_file}")
if not exists(self.bot_dir):
raise BenchmarkException(f"Bot dir cannot be found in {self.bot_dir}")
if not exists(f"{self.map_dir}"):
raise BenchmarkException(f"Map dir cannot be found in {self.map_dir}")
if not exists(f"{self.result_dir}"):
raise BenchmarkException(f"Result dir cannot be found in {self.result_dir}")
bots = read_lines(self.bot_file)
for bot in bots:
check_bot_exists(bot, self.bot_dir)
maps = read_lines(self.map_file)
for map_file in maps:
check_map_exists(f"{self.map_dir}/{map_file}")
def has_results(self):
return len(os.listdir(self.result_dir)) > 0
class BenchmarkStorage:
def find_benchmark(self, name: str) -> Optional[Benchmark]:
raise NotImplemented
def get_benchmark(self, local_benchmark_dir):
with open(f'{local_benchmark_dir}/BENCHMARK_REPEAT_GAMES', 'r') as f:
repeat_games = int(f.read().strip())
benchmark = Benchmark()
benchmark.bot_file = f"{local_benchmark_dir}/BENCHMARK_BOTS"
benchmark.map_file = f"{local_benchmark_dir}/BENCHMARK_MAPS"
benchmark.elo_file = f"{local_benchmark_dir}/BENCHMARK_ELOS"
benchmark.bot_dir = f"{local_benchmark_dir}/bots"
benchmark.map_dir = f"{local_benchmark_dir}/maps"
benchmark.result_dir = f"{local_benchmark_dir}/results"
benchmark.repeat_games = repeat_games
return benchmark
class LocalBenchmarkStorage(BenchmarkStorage):
def __init__(self, base_dir: str):
self.base_dir = base_dir
def find_benchmark(self, name: str) -> Optional[Benchmark]:
if exists(self.benchmark_dir(name)):
return self.get_benchmark(self.benchmark_dir(name))
return None
def benchmark_dir(self, benchmark_name: str):
return f'{self.base_dir}/{benchmark_name}'
class SscaitBenchmarkStorage(LocalBenchmarkStorage):
BASE_URL = "http://sscaitournament.com/benchmarks"
def find_benchmark(self, name: str) -> Optional[Benchmark]:
if not name.startswith("SSCAIT"):
return None
if exists(self.benchmark_dir(name)):
return self.get_benchmark(self.benchmark_dir(name))
return self.try_download(name)
def try_download(self, name: str) -> Optional[Benchmark]:
benchmark_dir = self.benchmark_dir(name)
try:
os.makedirs(benchmark_dir, exist_ok=False)
download_extract_zip(f"{self.BASE_URL}/{name}.zip", benchmark_dir)
return self.get_benchmark(benchmark_dir)
except Exception as e:
logger.exception(f"Failed to download benchmark {name}")
logger.exception(e)
logger.info(f"Cleaning up dir {benchmark_dir}")
shutil.rmtree(self.benchmark_dir(name))
return None
# Feel free to include other benchmark sources!
# But they need to respect benchmark / bot structure :)
| 31.854839 | 88 | 0.678481 | 517 | 3,950 | 4.969052 | 0.197292 | 0.093422 | 0.052939 | 0.049046 | 0.317633 | 0.234332 | 0.111327 | 0.111327 | 0.0942 | 0.059167 | 0 | 0.000328 | 0.227848 | 3,950 | 123 | 89 | 32.113821 | 0.841967 | 0.025063 | 0 | 0.137931 | 0 | 0 | 0.189709 | 0.082121 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0.022989 | 0.103448 | 0.022989 | 0.482759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bdff857464c359af0d0606a7da2091b6840dd15a | 21,855 | py | Python | dev-server/scripts/docker-entrypoint.py | circlenaut/docker-images | 1768222b496288b6d08a51f979ade97554648817 | [
"MIT"
] | null | null | null | dev-server/scripts/docker-entrypoint.py | circlenaut/docker-images | 1768222b496288b6d08a51f979ade97554648817 | [
"MIT"
] | null | null | null | dev-server/scripts/docker-entrypoint.py | circlenaut/docker-images | 1768222b496288b6d08a51f979ade97554648817 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
Main Workspace Run Script
"""
import os
import sys
import logging
import coloredlogs
import json
import math
import glob
import yaml
import yamale
import scripts.functions as func
from copy import copy
from subprocess import run, call
### Enable logging
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(message)s",
level=logging.INFO,
stream=sys.stdout,
)
log = logging.getLogger(__name__)
log.info("Starting...")
### Read YAML config file
#configs = list()
configs_list = dict()
#yaml_exts = ["yaml", "yml"]
config_path = str()
# Load config files with alternative extensions
#for ext in yaml_exts:
# path = f'/scripts/config.{ext}'
# if os.path.exists(path):
# configs.append(path)
# Check if multiple config files exist and load the user defined one or system/user overwritten one
if os.path.exists('/scripts/config.yaml'):
config_path = '/scripts/config.yaml'
# Validate file
schema = yamale.make_schema('/scripts/schema.yaml')
data = yamale.make_data(config_path)
valid_config = func.yaml_valid(schema, data, "INFO")
elif os.path.exists('/scripts/config.yml'):
config_path = '/scripts/config.yml'
# Validate file
schema = yamale.make_schema('/scripts/schema.yaml')
data = yamale.make_data(config_path)
valid_config = func.yaml_valid(schema, data, "INFO")
elif os.path.exists('/scripts/config.yml') and os.path.exists('/scripts/config.yaml'):
config_path = '/scripts/config.yml'
log.warning("both config.yaml and config.yml exists, using config.yml")
if os.path.exists('/scripts/config.yaml'): os.remove('/scripts/config.yaml')
# Validate file
schema = yamale.make_schema('/scripts/schema.yaml')
data = yamale.make_data(config_path)
valid_config = func.yaml_valid(schema, data, "INFO")
else:
log.debug("No yaml config files available to load")
# Load config as yaml object
if os.path.exists(config_path):
if valid_config:
log.info(f"Loading config file: '{config_path}'")
with open(config_path, "r") as f:
configs_list = yaml.load(f, Loader=yaml.FullLoader)
log.debug(configs_list)
else:
log.debug(f"Config does not exist: '{config_path}'")
### Read or set docker default envs
docker_env = {
'LOG_VERBOSITY': os.getenv("LOG_VERBOSITY", "INFO"),
'CONFIG_BACKUP_ENABLED': os.getenv("CONFIG_BACKUP_ENABLED", "true"),
'WORKSPACE_USER': os.getenv("WORKSPACE_AUTH_USER", "coder"),
'WORKSPACE_GROUP': os.getenv("WORKSPACE_AUTH_GROUP", "users"),
'WORKSPACE_USER_SHELL': os.getenv("WORKSPACE_USER_SHELL", "zsh"),
'WORKSPACE_USER_PASSWORD': os.getenv("WORKSPACE_AUTH_PASSWORD", "password"),
'RESOURCES_PATH': os.getenv("RESOURCES_PATH", "/resources"),
'WORKSPACE_HOME': os.getenv("WORKSPACE_HOME", "/workspace"),
'APPS_PATH': os.getenv("APPS_PATH", "/apps"),
'DATA_PATH': os.getenv("DATA_PATH", "/data"),
'PROXY_BASE_URL': os.getenv("PROXY_BASE_URL", "/"),
'ZSH_PROMPT': os.getenv("ZSH_PROMPT", "none"),
'ZSH_THEME': os.getenv("ZSH_THEME", "spaceship"),
'ZSH_PLUGINS': os.getenv("ZSH_PLUGINS", "all"),
'CONDA_ENV_PATH': os.getenv("CONDA_ENV_PATH", ""),
'CADDY_VIRTUAL_PORT': os.getenv("VIRTUAL_PORT", "80"),
'CADDY_VIRTUAL_HOST': os.getenv("VIRTUAL_HOST", ""),
'CADDY_VIRTUAL_BIND_NET': os.getenv("VIRTUAL_BIND_NET", "proxy"),
'CADDY_VIRTUAL_PROTO': os.getenv("VIRTUAL_PROTO", "http"),
'CADDY_VIRTUAL_BASE_URL': os.getenv("VIRTUAL_BASE_URL", "/"),
'CADDY_PROXY_ENCODINGS_GZIP': os.getenv("PROXY_ENCODINGS_GZIP", "true"),
'CADDY_PROXY_ENCODINGS_ZSTD': os.getenv("PROXY_ENCODINGS_ZSTD", "true"),
'CADDY_PROXY_TEMPLATES': os.getenv("PROXY_TEMPLATES", "true"),
'CADDY_LETSENCRYPT_EMAIL': os.getenv("LETSENCRYPT_EMAIL", "admin@example.com"),
'CADDY_LETSENCRYPT_ENDPOINT': os.getenv("LETSENCRYPT_ENDPOINT", "dev"),
'CADDY_HTTP_PORT': os.getenv("HTTP_PORT", "80"),
'CADDY_HTTPS_ENABLE': os.getenv("HTTPS_ENABLE", "true"),
'CADDY_HTTPS_PORT': os.getenv("HTTPS_PORT", "443"),
'CADDY_AUTO_HTTPS': os.getenv("AUTO_HTTPS", "true"),
'CADDY_WORKSPACE_SSL_ENABLED': os.getenv("WORKSPACE_SSL_ENABLED", "false"),
'FB_PORT': os.getenv("FB_PORT", "8055"),
'FB_BASE_URL': os.getenv("FB_BASE_URL", "/data"),
'FB_ROOT_DIR': os.getenv("FB_ROOT_DIR", "/workspace"),
'VSCODE_BIND_ADDR': os.getenv("VSCODE_BIND_ADDR", "0.0.0.0:8300"),
'VSCODE_BASE_URL': os.getenv("VSCODE_BASE_URL", "/code"),
'APP_BIND_ADDR': os.getenv("APP_BIND_ADDR", "0.0.0.0:8080"),
'APP_BASE_URL': os.getenv("APP_BASE_URL", "/app"),
'APP_ROOT_DIR': os.getenv("APP_ROOT_DIR", "/apps/app"),
'APP_USER': os.getenv("APP_USER", "admin"),
'APP_PASSWORD': os.getenv("APP_PASSWORD", "password")
}
### Set verbosity level. log.info occasinally throws EOF errors with high verbosity
if docker_env.get("LOG_VERBOSITY") in [
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL"
]:
verbosity = docker_env.get("LOG_VERBOSITY")
else:
log.info("invalid verbosity: '{}".format(docker_env.get("LOG_VERBOSITY")))
verbosity = "INFO"
### opts_json cli options
opts = {
"verbosity": verbosity
}
log.setLevel(verbosity)
# Setup colored console logs
coloredlogs.install(fmt='%(asctime)s [%(levelname)s] %(message)s', level=verbosity, logger=log)
### Reconcile docker env var with corresponding config setting
system_configs = dict()
# copy and save user configs
users_config_copy = copy(configs_list["users"])
# if system not configured in yaml, then set to docker envs
if configs_list.get("system") == None:
log.info(f"System not defined in yaml config file. Importing settings from docker env.")
for env, value in docker_env.items():
log.debug(f"setting: '{env.lower()}' --> '{value}'")
system_configs[env.lower()] = value
# copy into system key
configs_list["system"] = copy(system_configs)
# copy users back
configs_list["users"] = copy(users_config_copy)
# reconcile if env appears in both
else:
for env, value in docker_env.items():
for config, setting in configs_list.get("system").items():
if config == env.lower():
if setting == value:
log.debug(f"yaml config same as docker environment value: '{config}' --> '{setting}'")
system_configs[config] = value
else:
log.warning(f"using config setting instead of docker environment value - {config}: '{value}'--> '{setting}'")
system_configs[config] = setting
if not env.lower() in list(configs_list.get("system").keys()):
log.debug(f"not set in yaml config, setting: '{env.lower()}' --> '{value}'")
system_configs[env.lower()] = value
# copy into system key
configs_list["system"] = copy(system_configs)
# copy users back
configs_list["users"] = copy(users_config_copy)
### Reset verbosity level according to yaml file. log.info occasinally throws EOF errors with high verbosity
if configs_list.get("system").get("log_verbosity") in [
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL"
]:
verbosity = configs_list.get("system").get("log_verbosity")
else:
log.info("invalid verbosity: '{}".format(configs_list.get("system").get("log_verbosity")))
verbosity = "INFO"
### opts_json cli options
opts = {
"verbosity": verbosity
}
log.setLevel(verbosity)
default_user = [{
'name': configs_list.get("system").get("workspace_user"),
'group': configs_list.get("system").get("workspace_group"),
'uid': "1000",
'gid': "100",
'shell': configs_list.get("system").get("workspace_user_shell"),
'password': configs_list.get("system").get("workspace_user_password"),
'directories': [
{
'name': 'home',
'path': os.path.join("/home", configs_list.get("system").get("workspace_user")),
'mode': '755'
},
{
'name': 'resources',
'path': configs_list.get("system").get("resources_path"),
'mode': '755'
},
{
'name': 'workspace',
'path': configs_list.get("system").get("workspace_home"),
'mode': '755'
},
{
'name': 'data',
'path': configs_list.get("system").get("data_path"),
'mode': '755'
},
{
'name': 'apps',
'path': configs_list.get("system").get("apps_path"),
'mode': '755'
},
{
'name': 'app',
'path': configs_list.get("system").get("app_root_dir"),
'mode': '755'
}],
'backup_paths': [
f'/home/{configs_list.get("system").get("workspace_user")}/.config',
f'/home/{configs_list.get("system").get("workspace_user")}/.ssh',
f'/home/{configs_list.get("system").get("workspace_user")}/.zshrc',
f'/home/{configs_list.get("system").get("workspace_user")}/.bashrc',
f'/home/{configs_list.get("system").get("workspace_user")}/.profile',
f'/home/{configs_list.get("system").get("workspace_user")}/.condarc',
f'/home/{configs_list.get("system").get("workspace_user")}/.oh-my-zsh',
f'/home/{configs_list.get("system").get("workspace_user")}/.gitconfig',
f'/home/{configs_list.get("system").get("workspace_user")}/filebrowser.db',
f'/home/{configs_list.get("system").get("workspace_user")}/.local',
f'/home/{configs_list.get("system").get("workspace_user")}/.conda',
f'/home/{configs_list.get("system").get("workspace_user")}/.vscode',
f'/home/{configs_list.get("system").get("workspace_user")}/.jupyter'
],
'conda': {
'env': ''
},
'zsh': {
'set_prompt': configs_list.get("system").get("zsh_prompt"),
'set_theme': configs_list.get("system").get("zsh_theme"),
'set_plugins': configs_list.get("system").get("zsh_plugins"),
'prompt': [
'https://github.com/sindresorhus/pure'
],
'theme': [
'https://github.com/romkatv/powerlevel10k',
'https://github.com/denysdovhan/spaceship-prompt',
'https://github.com/sobolevn/sobole-zsh-theme'
],
'plugins': [
'git',
'k',
'extract',
'cp',
'yarn',
'npm',
'supervisor',
'rsync',
'command-not-found',
'autojump',
'colored-man-pages',
'git-flow',
'git-extras',
'python',
'zsh-autosuggestions',
'history-substring-search',
'zsh-completions',
'ssh-agent',
'https://github.com/zsh-users/zsh-autosuggestions',
'https://github.com/zsh-users/zsh-completions',
'https://github.com/zsh-users/zsh-syntax-highlighting',
'https://github.com/zsh-users/zsh-history-substring-search',
'https://github.com/supercrabtree/k'
]},
'ssh': {
'pub_keys': [''],
'configs': [{
'hostname': '',
'port': '',
'user': '',
'pub_key_auth': '',
'id_only': '',
'id_file_path': ''
}]
},
'filebrowser': {
'port': configs_list.get("system").get("fb_port"),
'base_url': configs_list.get("system").get("fb_base_url"),
'root_dir': configs_list.get("system").get("fb_root_dir")
},
'vscode': {
'bind_addr': configs_list.get("system").get("vscode_bind_addr"),
'base_url': configs_list.get("system").get("vscode_base_url"),
'extensions': [
'ms-python.python',
'almenon.arepl',
'batisteo.vscode-django',
'bierner.color-info',
'bierner.markdown-footnotes',
'bierner.markdown-mermaid',
'bierner.markdown-preview-github-styles',
'CoenraadS.bracket-pair-colorizer-2',
'DavidAnson.vscode-markdownlint',
'donjayamanne.githistory',
'donjayamanne.python-extension-pack',
'eamodio.gitlens',
'hbenl.vscode-test-explorer',
'henriiik.docker-linter',
'kamikillerto.vscode-colorize',
'kisstkondoros.vscode-gutter-preview',
'littlefoxteam.vscode-python-test-adapter',
'magicstack.MagicPython',
'ms-azuretools.vscode-docker',
'ms-toolsai.jupyter',
'naumovs.color-highlight',
'shd101wyy.markdown-preview-enhanced',
'streetsidesoftware.code-spell-checker',
'tht13.html-preview-vscode',
'tht13.python',
'tushortz.python-extended-snippets',
'wholroyd.jinja',
'yzhang.markdown-all-in-one'
]
},
'app': {
'bind_addr': configs_list.get("system").get("app_bind_addr"),
'base_url': configs_list.get("system").get("app_base_url"),
'root_dir': configs_list.get("system").get("app_root_dir"),
'user': configs_list.get("system").get("app_user"),
'password': configs_list.get("system").get("app_password")
}
}]
def set_user_config(user_config, default_user, level):
log.setLevel(level)
log.info(user_config.get("yaml_config_value"))
log.info(user_config.get("docker_env_value"))
if user_config.get("yaml_config_value") == None:
log.info("no setting found for '{}', setting: '{}'".format(user_config.get("yaml_config_name"), user_config.get("docker_env_value")))
if user_config.get("dict_path") == 2:
configs_list.get(user_config.get("dict_path")[0])[user_config.get("dict_path")[1]] = user_config.get("docker_env_value")
elif user_config.get("yaml_config_value") == user_config.get("docker_env_value"):
log.debug("yaml config same as docker environment value: {} --> '{}'".format(user_config.get("docker_env_name"), user_config.get("docker_env_value")))
else:
log.warning("using user config setting instead of docker environment value - {}: '{}'--> '{}'".format(user_config.get("docker_env_name"), user_config.get("docker_env_value"), user_config.get("yaml_config_value")))
user_configs = [
{
"yaml_config_name": "name",
"docker_env_name": "workspace_user",
"yaml_config_value": configs_list.get("users")[0].get("name"),
"docker_env_value": configs_list.get("system").get("workspace_user"),
"dict_path": ["users", "name"]
},
{
"yaml_config_name": "group",
"docker_env_name": "workspace_group",
"yaml_config_value": configs_list.get("users")[0].get("group"),
"docker_env_value": configs_list.get("system").get("workspace_group"),
"dict_path": ["users", "group"]
},
{
"yaml_config_name": "shell",
"docker_env_name": "workspace_user_shell",
"yaml_config_value": configs_list.get("users")[0].get("shell"),
"docker_env_value": configs_list.get("system").get("workspace_user_shell"),
"dict_path": ["users", "shell"]
},
{
"yaml_config_name": "password",
"docker_env_name": "workspace_user_password",
"yaml_config_value": configs_list.get("users")[0].get("password"),
"docker_env_value": configs_list.get("system").get("workspace_user_password"),
"dict_path": ["users", "shell"]
},
]
### Set user config
if configs_list.get("users") == None:
log.info(f"Users not defined in yaml config file. Going with single user mode and importing settings from docker env or setting from default")
configs_list["users"] = default_user
# Show to console
default_user_json = json.dumps(default_user, indent = 4)
elif len(configs_list.get("users")) == 0:
log.info("User's list empty, populate and restart container")
sys.exit()
elif len(configs_list.get("users")) == 1:
log.info("Building a single user environment")
# what's the point of this? overwrite workspace envs with corresponding user envs? Maybe not good to touch and better keep docker envs concistent with this dict. Don't overwrite with user settings. Also simpler
#for uc in user_configs:
#set_user_config(uc, default_user, verbosity)
user_count = 0
for u in configs_list.get("users"):
log.debug(f"working on user count: '{user_count}'")
for default_config, default_setting in default_user[0].items():
for config, setting in u.items():
if config == default_config:
if setting == default_setting:
log.debug(f"yaml config setting same as default: '{config}' --> '{setting}'")
else:
log.debug(f"yaml config setting differs from default - {config}: '{default_setting}'--> '{setting}'")
if config == "name":
user = setting
home = os.path.join("/home", user)
if config == "password":
password = setting
if not default_config in list(u.keys()):
log.info(f"not set in yaml config, setting from default settings: '{default_config}' --> '{default_setting}'")
configs_list.get("users")[user_count][default_config] = default_setting
user_count+=1
log.info(f"setting workspace user to: '{user}'")
elif len(configs_list.get("users")) > 1:
log.info("More than 2 users defined, haven't build this functionality yet. Remove extra users and restart container.")
sys.exit()
# Dump into JSON for passage into scripts
configs_list_json = json.dumps(configs_list)
### Write docker envs to system environment
#for env, value in docker_env.items():
# func.set_env_variable(env, value)
### Clean up envs
# opts_json arguments to json
opts_json = json.dumps(opts)
### Dynamiruny set MAX_NUM_THREADS
ENV_MAX_NUM_THREADS = os.getenv("MAX_NUM_THREADS", None)
if ENV_MAX_NUM_THREADS:
# Determine the number of availabel CPU resources, but limit to a max number
if ENV_MAX_NUM_THREADS.lower() == "auto":
ENV_MAX_NUM_THREADS = str(math.ceil(os.cpu_count()))
try:
# read out docker information - if docker limits cpu quota
cpu_count = math.ceil(
int(
os.popen("cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us")
.read()
.replace("\n", "")
)
/ 100000
)
if cpu_count > 0 and cpu_count < os.cpu_count():
ENV_MAX_NUM_THREADS = str(cpu_count)
except:
pass
if (
not ENV_MAX_NUM_THREADS
or not ENV_MAX_NUM_THREADS.isnumeric()
or ENV_MAX_NUM_THREADS == "0"
):
ENV_MAX_NUM_THREADS = "4"
if int(ENV_MAX_NUM_THREADS) > 8:
# there should be atleast one thread less compared to cores
ENV_MAX_NUM_THREADS = str(int(ENV_MAX_NUM_THREADS) - 1)
# set a maximum of 32, in most cases too many threads are adding too much overhead
if int(ENV_MAX_NUM_THREADS) > 32:
ENV_MAX_NUM_THREADS = "32"
# only set if it is not None or empty
# OMP_NUM_THREADS: Suggested value: vCPUs / 2 in which vCPUs is the number of virtual CPUs.
set_env_variable(
"OMP_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # OpenMP
set_env_variable(
"OPENBLAS_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # OpenBLAS
set_env_variable("MKL_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True) # MKL
set_env_variable(
"VECLIB_MAXIMUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Accelerate
set_env_variable(
"NUMEXPR_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Numexpr
set_env_variable(
"NUMEXPR_MAX_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Numexpr - maximum
set_env_variable(
"NUMBA_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Numba
set_env_variable(
"SPARK_WORKER_CORES", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Spark Worker
set_env_variable(
"BLIS_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Blis
set_env_variable("TBB_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True) # TBB
# GOTO_NUM_THREADS
### Set container environment
# Get system env and display
system_env = os.environ.copy()
log.debug("System Environments:")
log.debug(func.capture_cmd_stdout('env', system_env))
# Display docker env
log.debug("Docker Environments:")
log.debug(func.capture_cmd_stdout('env', docker_env))
# Merge system, docker env as workspace env and display
workspace_env = func.merge_two_dicts(system_env, docker_env)
log.debug("Workspace Environment")
log.debug(func.capture_cmd_stdout('env', workspace_env))
# Format workspace env as json for passage into scripts
workspace_env_json = json.dumps(workspace_env)
### Configure user
log.info(f"configuring user")
run(
['python', f"/scripts/configure_user.py",
'--opts', opts_json,
'--env', workspace_env_json,
'--configs', configs_list_json
],
env=workspace_env
)
### Set workspace user and home
workspace_env['USER'] = user
workspace_env['HOME'] = home
workspace_env['WORKSPACE_USER'] = user
workspace_env['WORKSPACE_USER_HOME'] = home
workspace_env['WORKSPACE_USER_PASSWORD'] = password
### Start workspace
sys.exit(
run(
['python', '/scripts/run_workspace.py',
'--opts', opts_json],
env=workspace_env
)
)
| 39.307554 | 221 | 0.625212 | 2,729 | 21,855 | 4.775009 | 0.163796 | 0.05909 | 0.061239 | 0.070601 | 0.421687 | 0.37288 | 0.311872 | 0.256005 | 0.233136 | 0.150027 | 0 | 0.005598 | 0.223564 | 21,855 | 555 | 222 | 39.378378 | 0.762331 | 0.103363 | 0 | 0.164835 | 0 | 0.004396 | 0.39213 | 0.102611 | 0 | 0 | 0 | 0 | 0 | 1 | 0.002198 | false | 0.026374 | 0.030769 | 0 | 0.032967 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0211204f7b106ec6a65423c21ac69cd0c6c658 | 11,524 | py | Python | py/host.py | black-parrot-hdk/arty-parrot | d5d1c5859cbe6a7acad9147b0d815fe478f92ec9 | [
"BSD-3-Clause"
] | 1 | 2022-01-09T07:45:12.000Z | 2022-01-09T07:45:12.000Z | py/host.py | black-parrot-hdk/arty-parrot | d5d1c5859cbe6a7acad9147b0d815fe478f92ec9 | [
"BSD-3-Clause"
] | 2 | 2021-05-26T02:27:26.000Z | 2021-05-28T07:02:48.000Z | py/host.py | black-parrot-hdk/arty-parrot | d5d1c5859cbe6a7acad9147b0d815fe478f92ec9 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys
import argparse
from enum import Enum
from typing import Optional
import serial
from tqdm import tqdm
from nbf import NBF_COMMAND_LENGTH_BYTES, NbfCommand, NbfFile, OPCODE_FINISH, OPCODE_PUTCH, OPCODE_READ_8, OPCODE_WRITE_8, ADDRESS_CSR_FREEZE
DRAM_REGION_START = 0x00_8000_0000
DRAM_REGION_END = 0x10_0000_0000
def _debug_format_message(command: NbfCommand) -> str:
if command.opcode == OPCODE_PUTCH:
return str(command) + f" (putch {repr(command.data[0:1].decode('utf-8'))})"
else:
return str(command)
class LogDomain(Enum):
# meta info on requested commands
COMMAND = 'command'
# sent messages
TRANSMIT = 'transmit'
# received messages out-of-turn
RECEIVE = 'receive'
# received messages in response to a transmitted command
REPLY = 'reply'
@property
def message_prefix(self):
if self == LogDomain.COMMAND:
return "[CMD ]"
elif self == LogDomain.TRANSMIT:
return "[TX ]"
elif self == LogDomain.RECEIVE:
return "[RX ]"
elif self == LogDomain.REPLY:
return "[REPLY]"
else:
raise ValueError(f"unknown log domain '{self}'")
def _log(domain: LogDomain, message: str):
tqdm.write(domain.message_prefix + " " + message)
class HostApp:
def __init__(self, serial_port_name: str, serial_port_baud: int):
self.port = serial.Serial(
port=serial_port_name,
baudrate=serial_port_baud,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
# Without a timeout, SIGINT can't end the process while we are blocking on a read.
timeout=3.0
)
self.commands_sent = 0
self.commands_received = 0
self.reply_violations = 0
def close_port(self):
if self.port.is_open:
self.port.close()
def _send_message(self, command: NbfCommand):
self.port.write(command.to_bytes())
self.port.flush()
self.commands_sent += 1
def _receive_message(self, block=True) -> Optional[NbfCommand]:
if block or self.port.in_waiting >= NBF_COMMAND_LENGTH_BYTES:
buffer = self.port.read(NBF_COMMAND_LENGTH_BYTES)
if len(buffer) != NBF_COMMAND_LENGTH_BYTES:
raise ValueError(f"serial port returned {len(buffer)} bytes, but {NBF_COMMAND_LENGTH_BYTES} requested")
self.commands_received += 1
return NbfCommand.from_bytes(buffer)
else:
return None
def _receive_until_opcode(self, opcode: int, block=True) -> Optional[NbfCommand]:
message = self._receive_message(block=block)
while message is not None and message.opcode != opcode:
_log(LogDomain.RECEIVE, _debug_format_message(message))
message = self._receive_message()
return message
def print_summary_statistics(self):
_log(LogDomain.COMMAND, f" Sent: {self.commands_sent} commands")
_log(LogDomain.COMMAND, f" Received: {self.commands_received} commands")
if self.reply_violations > 0:
_log(LogDomain.COMMAND, f" Reply violations: {self.reply_violations} commands")
def _validate_reply(self, command: NbfCommand, reply: NbfCommand) -> bool:
if not command.is_correct_reply(reply):
self.reply_violations += 1
_log(LogDomain.REPLY, f'Unexpected reply: {command} -> {reply}')
# TODO: abort on invalid reply?
return False
return True
def _validate_outstanding_replies(self, command_queue_expecting_replies: list, sliding_window_num_commands: int, log_all_rx: bool = False):
"""
Reads replies from the incoming data stream, matching them with the provided command queue
in-order and validating each. If more than "sliding_window_num_commands" commands are in the
queue, blocks waiting for an incoming command. Pops all validated commands from the front of
the queue, in-place.
"""
while len(command_queue_expecting_replies) > 0:
sent_command = command_queue_expecting_replies[0]
is_window_full = len(command_queue_expecting_replies) > sliding_window_num_commands
reply = self._receive_until_opcode(
sent_command.opcode,
block=is_window_full
)
if reply is None:
# all queued packets have been processed
break
if log_all_rx:
# TODO: indicate this is an expected reply
_log(LogDomain.RECEIVE, _debug_format_message(reply))
# TODO: verbose/echo mode
was_valid = self._validate_reply(sent_command, reply)
if was_valid:
# TODO: consider aborting on invalid reply
command_queue_expecting_replies.pop(0)
def load_file(self, source_file: str, ignore_unfreezes: bool = False, sliding_window_num_commands: int = 0, log_all_messages: bool = False):
file = NbfFile(source_file)
outstanding_commands_expecting_replies = []
command: NbfCommand
for command in tqdm(file, total=file.peek_length(), desc="loading nbf"):
if ignore_unfreezes and command.matches(OPCODE_WRITE_8, ADDRESS_CSR_FREEZE, 0):
continue
if log_all_messages:
_log(LogDomain.TRANSMIT, _debug_format_message(command))
self._send_message(command)
if command.expects_reply():
outstanding_commands_expecting_replies.append(command)
self._validate_outstanding_replies(outstanding_commands_expecting_replies, sliding_window_num_commands, log_all_rx=log_all_messages)
self._validate_outstanding_replies(outstanding_commands_expecting_replies, 0, log_all_rx=log_all_messages)
_log(LogDomain.COMMAND, "Load complete")
def unfreeze(self):
unfreeze_command = NbfCommand.with_values(OPCODE_WRITE_8, ADDRESS_CSR_FREEZE, 0)
self._send_message(unfreeze_command)
reply = self._receive_until_opcode(unfreeze_command.opcode)
self._validate_reply(unfreeze_command, reply)
def listen_perpetually(self, verbose: bool):
_log(LogDomain.COMMAND, "Listening for incoming messages...")
while message := self._receive_message():
# in "verbose" mode, we'll always print the full message, even for putchar
if not verbose and message.opcode == OPCODE_PUTCH:
print(chr(message.data[0]), end = '')
continue
_log(LogDomain.RECEIVE, _debug_format_message(message))
if message.opcode == OPCODE_FINISH:
print(f"FINISH: core {message.address_int}, code {message.data_int}")
# TODO: this assumes unicore
return
def verify(self, reference_file: str):
file = NbfFile(reference_file)
writes_checked = 0
writes_corrupted = 0
command: NbfCommand
for command in tqdm(file, total=file.peek_length(), desc="verifying nbf"):
if command.opcode != OPCODE_WRITE_8:
continue
if command.address_int < DRAM_REGION_START or command.address_int > DRAM_REGION_END - 8:
continue
read_message = NbfCommand.with_values(OPCODE_READ_8, command.address_int, 0)
self._send_message(read_message)
reply = self._receive_until_opcode(OPCODE_READ_8)
self._validate_reply(read_message, reply)
writes_checked += 1
if reply.data != command.data:
writes_corrupted += 1
_log(LogDomain.COMMAND, f"Corruption detected at address 0x{command.address_hex_str}")
_log(LogDomain.COMMAND, f" Expected: 0x{command.data_hex_str}")
_log(LogDomain.COMMAND, f" Actual: 0x{reply.data_hex_str}")
_log(LogDomain.COMMAND, "Verify complete")
_log(LogDomain.COMMAND, f" Writes checked: {writes_checked}")
_log(LogDomain.COMMAND, f" Corrupt writes found: {writes_corrupted}")
if writes_corrupted > 0:
_log(LogDomain.COMMAND, "== CORRUPTION DETECTED ==")
def _load_command(app: HostApp, args):
app.load_file(
args.file,
ignore_unfreezes=args.no_unfreeze,
sliding_window_num_commands=args.window_size,
log_all_messages=args.verbose
)
app.print_summary_statistics()
if args.listen:
app.listen_perpetually(verbose=args.verbose)
def _unfreeze_command(app: HostApp, args):
app.unfreeze()
if args.listen:
app.listen_perpetually(verbose=False)
def _verify_command(app: HostApp, args):
app.verify(args.file)
app.print_summary_statistics()
def _listen_command(app: HostApp, args):
app.listen_perpetually(verbose=False)
if __name__ == "__main__":
root_parser = argparse.ArgumentParser()
root_parser.add_argument('-p', '--port', dest='port', type=str, default='/dev/ttyS4', help='Serial port (full path or name)')
root_parser.add_argument('-b', '--baud', dest='baud_rate', type=int, default=2_000_000, help='Serial port baud rate')
command_parsers = root_parser.add_subparsers(dest="command")
command_parsers.required = True
load_parser = command_parsers.add_parser("load", help="Stream a file of NBF commands to the target")
load_parser.add_argument('file', help="NBF-formatted file to load")
load_parser.add_argument('--no-unfreeze', action='store_true', dest='no_unfreeze', help='Suppress any "unfreeze" commands in the input file')
load_parser.add_argument('--listen', action='store_true', dest='listen', help='Continue listening for incoming messages until program is aborted')
load_parser.add_argument('--window-size', type=int, default=500, dest='window_size', help='Specifies the maximum number of outstanding replies to allow before blocking')
load_parser.add_argument('--verbose', action='store_true', dest='verbose', help='Log all send and received commands, even if valid')
# TODO: add --verify which automatically implies --no-unfreeze then manually unfreezes after
# TODO: add --verbose which prints all sent and received commands
load_parser.set_defaults(handler=_load_command)
unfreeze_parser = command_parsers.add_parser("unfreeze", help="Send an \"unfreeze\" command to the target")
unfreeze_parser.add_argument('--listen', action='store_true', dest='listen', help='Continue listening for incoming messages until program is aborted')
unfreeze_parser.set_defaults(handler=_unfreeze_command)
verify_parser = command_parsers.add_parser("verify", help="Read back the results of an NBF file's memory writes and confirm that their values match the original file")
verify_parser.add_argument('file', help="NBF-formatted file to load")
verify_parser.set_defaults(handler=_verify_command)
listen_parser = command_parsers.add_parser("listen", help="Watch for incoming messages and print the received data")
listen_parser.set_defaults(handler=_listen_command)
args = root_parser.parse_args()
app = HostApp(serial_port_name=args.port, serial_port_baud=args.baud_rate)
try:
args.handler(app, args)
app.close_port()
except KeyboardInterrupt:
app.close_port()
print("Aborted")
sys.exit(1)
| 41.602888 | 173 | 0.674505 | 1,421 | 11,524 | 5.209008 | 0.204082 | 0.02756 | 0.030802 | 0.021616 | 0.215212 | 0.141043 | 0.106188 | 0.075925 | 0.058363 | 0.058363 | 0 | 0.008165 | 0.234814 | 11,524 | 276 | 174 | 41.753623 | 0.831254 | 0.083391 | 0 | 0.09596 | 0 | 0.005051 | 0.155509 | 0.019998 | 0 | 0 | 0 | 0.007246 | 0 | 1 | 0.09596 | false | 0 | 0.035354 | 0 | 0.222222 | 0.035354 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da02379e9f1f2797e8f3d2fe77571451d25da847 | 618 | py | Python | mistex/plugins/citation.py | martinosorb/mistex | 27db70a95ae4bb8bc84c17c9d59c1bef5b5e92f4 | [
"BSD-3-Clause"
] | null | null | null | mistex/plugins/citation.py | martinosorb/mistex | 27db70a95ae4bb8bc84c17c9d59c1bef5b5e92f4 | [
"BSD-3-Clause"
] | null | null | null | mistex/plugins/citation.py | martinosorb/mistex | 27db70a95ae4bb8bc84c17c9d59c1bef5b5e92f4 | [
"BSD-3-Clause"
] | null | null | null | from mistune.inline_parser import LINK_LABEL
CITATION_PATTERN = r'\[\^@(' + LINK_LABEL + r')\]'
def render_citation(text):
return '\\cite{' + text + '}'
def parse_citation(self, m, state):
text = m.group(1)
self._ensure_bib()
return 'citation', self.render(text, state)
def plugin_citation(md):
md.inline.register_rule('citation', CITATION_PATTERN, parse_citation)
index = md.inline.rules.index('std_link')
if index != -1:
md.inline.rules.insert(index, 'citation')
else:
md.inline.rules.append('citation')
md.renderer.register('citation', render_citation)
| 22.888889 | 73 | 0.666667 | 80 | 618 | 4.975 | 0.4375 | 0.080402 | 0.09799 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003945 | 0.179612 | 618 | 26 | 74 | 23.769231 | 0.781065 | 0 | 0 | 0 | 0 | 0 | 0.105178 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.0625 | 0.0625 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0300886933dfe76dadbea34ca3db3b9a2e627c | 355 | py | Python | src/sima/simo/linearization.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/simo/linearization.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/simo/linearization.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | # Generated with Linearization
#
from enum import Enum
from enum import auto
class Linearization(Enum):
""""""
STOCHASTIC = auto()
DIFFERENTIATION = auto()
def label(self):
if self == Linearization.STOCHASTIC:
return "Stochastic"
if self == Linearization.DIFFERENTIATION:
return "Differentiation" | 23.666667 | 49 | 0.64507 | 33 | 355 | 6.939394 | 0.454545 | 0.069869 | 0.122271 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.264789 | 355 | 15 | 50 | 23.666667 | 0.877395 | 0.078873 | 0 | 0 | 1 | 0 | 0.078616 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.8 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
da03370dc8f2f31bcdc7fd9d8a5697527015558e | 2,881 | py | Python | 2020_August_Leetcode_30_days_challenge/Week_3_Non-overlapping Intervals/by_sorting.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | 32 | 2020-01-05T13:37:16.000Z | 2022-03-26T07:27:09.000Z | 2020_August_Leetcode_30_days_challenge/Week_3_Non-overlapping Intervals/by_sorting.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | null | null | null | 2020_August_Leetcode_30_days_challenge/Week_3_Non-overlapping Intervals/by_sorting.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | 8 | 2020-06-18T16:17:27.000Z | 2022-03-15T23:58:18.000Z | '''
Description:
Given a collection of intervals, find the minimum number of intervals you need to remove to make the rest of the intervals non-overlapping.
Example 1:
Input: [[1,2],[2,3],[3,4],[1,3]]
Output: 1
Explanation: [1,3] can be removed and the rest of intervals are non-overlapping.
Example 2:
Input: [[1,2],[1,2],[1,2]]
Output: 2
Explanation: You need to remove two [1,2] to make the rest of intervals non-overlapping.
Example 3:
Input: [[1,2],[2,3]]
Output: 0
Explanation: You don't need to remove any of the intervals since they're already non-overlapping.
Note:
You may assume the interval's end point is always bigger than its start point.
Intervals like [1,2] and [2,3] have borders "touching" but they don't overlap each other.
'''
from typing import List
class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
# sort segments by start index in ascending order
intervals.sort( key = lambda segment: segment[0] )
last_compare_idx = 0
removal_counter = 0
for cur_idx in range(1, len(intervals)):
cur_start, cur_end = intervals[cur_idx]
last_start, last_end = intervals[last_compare_idx]
if cur_start < last_end:
# need to remove one interval to avoid overlapping
removal_counter += 1
if cur_end < last_end:
# remove last interval, because it is lefter then current
last_compare_idx = cur_idx
else:
# remove current interval, because it is lefter then last one
# last compare idx keeps the same
pass
else:
# so far so good, no need to remove
last_compare_idx = cur_idx
return removal_counter
# n : the length of input list, intervals
## Time Complexity: O( n log n)
#
# The overhead in time is the cost of sorting, which is of O( n log n ).
## Space Complexity: O( 1 )
#
# The overhead in space is the storage for loop index and temporary variable, which is of O( 1 ).
import unittest
class Testing( unittest.TestCase ):
def test_case_1( self ):
result = Solution().eraseOverlapIntervals( intervals=[[1,2],[2,3],[3,4],[1,3]] )
self.assertEqual(result, 1)
def test_case_2( self ):
result = Solution().eraseOverlapIntervals( intervals=[[1,2],[1,2],[1,2]] )
self.assertEqual(result, 2)
def test_case_3( self ):
result = Solution().eraseOverlapIntervals( intervals=[[1,2],[2,3]] )
self.assertEqual(result, 0)
if __name__ == '__main__':
unittest.main() | 25.052174 | 139 | 0.588684 | 386 | 2,881 | 4.297927 | 0.339378 | 0.014467 | 0.036166 | 0.009644 | 0.191682 | 0.141049 | 0.10006 | 0.069922 | 0.062688 | 0 | 0 | 0.032209 | 0.321069 | 2,881 | 115 | 140 | 25.052174 | 0.815951 | 0.449844 | 0 | 0.129032 | 0 | 0 | 0.005122 | 0 | 0 | 0 | 0 | 0 | 0.096774 | 1 | 0.129032 | false | 0.032258 | 0.064516 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0469fe0ec53d36c9f4e75701bb9541ada5eeed | 1,220 | py | Python | hive_plug_play/engine/processor.py | seakintruth/hive-plug-play | 032caed7a0690a58410b3d4e93a1fdecf2009d58 | [
"MIT"
] | 3 | 2021-05-11T07:12:05.000Z | 2021-10-04T04:01:38.000Z | hive_plug_play/engine/processor.py | seakintruth/hive-plug-play | 032caed7a0690a58410b3d4e93a1fdecf2009d58 | [
"MIT"
] | 9 | 2021-06-02T03:43:01.000Z | 2021-07-23T14:52:03.000Z | hive_plug_play/engine/processor.py | seakintruth/hive-plug-play | 032caed7a0690a58410b3d4e93a1fdecf2009d58 | [
"MIT"
] | 1 | 2021-05-24T15:57:20.000Z | 2021-05-24T15:57:20.000Z | from os import truncate
from hive_plug_play.database.handlers import PlugPlayDb
class BlockProcessor:
@classmethod
def init(cls, config):
cls.config = config
cls.db = PlugPlayDb(config)
cls.head_block = {}
cls.block_num = 0
cls.block_time = ''
@classmethod
def check_op_id(cls, op_id):
allowed_op_ids = cls.config['op_ids']
if allowed_op_ids == []:
return True
else:
return op_id in allowed_op_ids
@classmethod
def process_block(cls, block_num, block):
prev = block['previous']
block_hash = block['block_id']
timestamp = block['timestamp']
cls.db.add_block(block_num, block_hash, prev, timestamp)
transactions = block['transactions']
for i in range(len(transactions)):
trans = transactions[i]
for op in trans['operations']:
if op['type'] == 'custom_json_operation':
if cls.check_op_id(op['value']['id']):
cls.db.add_op(block_num, block['transaction_ids'][i], op['value'])
cls.db._save()
cls.block_num = block_num
cls.block_time = timestamp | 32.105263 | 90 | 0.584426 | 149 | 1,220 | 4.557047 | 0.355705 | 0.070692 | 0.076583 | 0.047128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001183 | 0.307377 | 1,220 | 38 | 91 | 32.105263 | 0.802367 | 0 | 0 | 0.090909 | 0 | 0 | 0.085995 | 0.017199 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.060606 | 0 | 0.242424 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da04bc9087b18cc1593f2b126074d0370e63d6a0 | 1,040 | py | Python | tests/integration/conftest.py | gcallaghan/openapi-spec-validator | 3ab3411936faaee91246627f957ba6108cd47d44 | [
"Apache-2.0"
] | null | null | null | tests/integration/conftest.py | gcallaghan/openapi-spec-validator | 3ab3411936faaee91246627f957ba6108cd47d44 | [
"Apache-2.0"
] | null | null | null | tests/integration/conftest.py | gcallaghan/openapi-spec-validator | 3ab3411936faaee91246627f957ba6108cd47d44 | [
"Apache-2.0"
] | null | null | null | from os import path
import pytest
from six.moves.urllib import request
from six.moves.urllib.parse import urlunparse
from yaml import safe_load
from openapi_spec_validator import openapi_v3_spec_validator
def spec_url(spec_file, schema='file'):
directory = path.abspath(path.dirname(__file__))
full_path = path.join(directory, spec_file)
return urlunparse((schema, None, full_path, None, None, None))
def spec_from_file(spec_file):
directory = path.abspath(path.dirname(__file__))
path_full = path.join(directory, spec_file)
with open(path_full) as fh:
return safe_load(fh)
def spec_from_url(spec_url):
content = request.urlopen(spec_url)
return safe_load(content)
class Factory(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
@pytest.fixture
def factory():
return Factory(
spec_url=spec_url,
spec_from_file=spec_from_file,
spec_from_url=spec_from_url,
)
@pytest.fixture
def validator():
return openapi_v3_spec_validator
| 22.608696 | 66 | 0.742308 | 146 | 1,040 | 4.876712 | 0.294521 | 0.067416 | 0.046348 | 0.067416 | 0.224719 | 0.109551 | 0.109551 | 0 | 0 | 0 | 0 | 0.002328 | 0.174038 | 1,040 | 45 | 67 | 23.111111 | 0.826542 | 0 | 0 | 0.129032 | 0 | 0 | 0.003846 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.193548 | 0.064516 | 0.612903 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
da058a79bcff3d1633c9de586676094982ec1208 | 24,030 | py | Python | scripts/populate_conferences.py | sf2ne/Playground | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | [
"Apache-2.0"
] | null | null | null | scripts/populate_conferences.py | sf2ne/Playground | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | [
"Apache-2.0"
] | 13 | 2020-03-24T15:29:41.000Z | 2022-03-11T23:15:28.000Z | scripts/populate_conferences.py | sf2ne/Playground | 95b2d222d7ac43baca0249acbfc34e043d6a95b3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
import os
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth.core import User
from website import settings
from website.app import init_app
from website.conferences.model import Conference
def main():
init_app(set_backends=True, routes=False)
populate_conferences()
MEETING_DATA = {
'spsp2014': {
'name': 'Society for Personality and Social Psychology 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'asb2014': {
'name': 'Association of Southeastern Biologists 2014',
'info_url': 'http://www.sebiologists.org/meetings/talks_posters.html',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'aps2014': {
'name': 'Association for Psychological Science 2014',
'info_url': 'http://centerforopenscience.org/aps/',
'logo_url': '/static/img/2014_Convention_banner-with-APS_700px.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'annopeer2014': {
'name': '#annopeer',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'cpa2014': {
'name': 'Canadian Psychological Association 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'filaments2014': {
'name': 'National Radio Astronomy Observatory Filaments 2014',
'info_url': None,
'logo_url': 'https://science.nrao.edu/science/meetings/2014/'
'filamentary-structure/images/filaments2014_660x178.png',
'active': False,
'admins': [
'lvonschi@nrao.edu',
# 'Dkim@nrao.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'bitss2014': {
'name': 'Berkeley Initiative for Transparency in the Social Sciences Research Transparency Forum 2014',
'info_url': None,
'logo_url': os.path.join(
settings.STATIC_URL_PATH,
'img',
'conferences',
'bitss.jpg',
),
'active': False,
'admins': [
'gkroll@berkeley.edu',
'awais@berkeley.edu',
],
'public_projects': True,
'poster': False,
'talk': True,
},
'spsp2015': {
'name': 'Society for Personality and Social Psychology 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'meetings@spsp.org',
],
'poster': True,
'talk': True,
},
'aps2015': {
'name': 'Association for Psychological Science 2015',
'info_url': None,
'logo_url': 'http://www.psychologicalscience.org/images/APS_2015_Banner_990x157.jpg',
'active': True,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'icps2015': {
'name': 'International Convention of Psychological Science 2015',
'info_url': None,
'logo_url': 'http://icps.psychologicalscience.org/wp-content/themes/deepblue/images/ICPS_Website-header_990px.jpg',
'active': False,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'mpa2015': {
'name': 'Midwestern Psychological Association 2015',
'info_url': None,
'logo_url': 'http://www.midwesternpsych.org/resources/Pictures/MPA%20logo.jpg',
'active': True,
'admins': [
'mpa@kent.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'NCCC2015': {
'name': 'North Carolina Cognition Conference 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'aoverman@elon.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VPRSF2015': {
'name': 'Virginia Piedmont Regional Science Fair 2015',
'info_url': None,
'logo_url': 'http://vprsf.org/wp-content/themes/VPRSF/images/logo.png',
'active': False,
'admins': [
'director@vprsf.org',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'APRS2015': {
'name': 'UVA Annual Postdoctoral Research Symposium 2015',
'info_url': None,
'logo_url': 'http://s1.postimg.org/50qj9u6i7/GPA_Logo.jpg',
'active': False,
'admins': [
'mhurst@virginia.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'ASB2015': {
'name': 'Association of Southeastern Biologists 2015',
'info_url': None,
'logo_url': 'http://www.sebiologists.org/wp/wp-content/uploads/2014/09/banner_image_Large.png',
'active': False,
'admins': [
'amorris.mtsu@gmail.com',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'TeaP2015': {
'name': 'Tagung experimentell arbeitender Psychologen 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VSSEF2015': {
'name': 'Virginia State Science and Engineering Fair 2015',
'info_url': 'http://www.vmi.edu/conferences/vssef/vssef_home/',
'logo_url': 'http://www.vmi.edu/uploadedImages/Images/Headers/vssef4.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'RMPA2015': {
'name': 'Rocky Mountain Psychological Association 2015',
'info_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/85th_annual_rmpa_conference_program_hr.pdf',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARP2015': {
'name': 'Association for Research in Personality 2015',
'info_url': 'http://www.personality-arp.org/conference/',
'logo_url': 'http://www.personality-arp.org/wp-content/uploads/conference/st-louis-arp.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEP2015': {
'name': 'Society of Experimental Psychologists Meeting 2015',
'info_url': 'http://faculty.virginia.edu/Society_of_Experimental_Psychologists/',
'logo_url': 'http://www.sepsych.org/nav/images/SEP-header.gif',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Reid2015': {
'name': 'L. Starling Reid Undergraduate Psychology Conference 2015',
'info_url': 'http://avillage.web.virginia.edu/Psych/Conference',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NEEPS2015': {
'name': 'Northeastern Evolutionary Psychology Conference 2015',
'info_url': 'http://neeps2015.weebly.com/',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'VaACS2015': {
'name': 'Virginia Section American Chemical Society Student Poster Session 2015',
'info_url': 'http://virginia.sites.acs.org/',
'logo_url': 'http://virginia.sites.acs.org/Bulletin/15/UVA.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MADSSCi2015': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2015',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://s24.postimg.org/qtc3baefp/2015madssci_seasr.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NRAO2015': {
'name': 'National Radio Astronomy Observatory Accretion 2015',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015/posters',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARCS2015': {
'name': 'Advancing Research Communication and Scholarship 2015',
'info_url': 'http://commons.pacificu.edu/arcs/',
'logo_url': 'http://commons.pacificu.edu/assets/md5images/4dfd167454e9f4745360a9550e189323.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'singlecasedesigns2015': {
'name': 'Single Case Designs in Clinical Psychology: Uniting Research and Practice',
'info_url': 'https://www.royalholloway.ac.uk/psychology/events/eventsarticles/singlecasedesignsinclinicalpsychologyunitingresearchandpractice.aspx',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OSFM2015': {
'name': 'OSF for Meetings 2015',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'JSSP2015': {
'name': 'Japanese Society of Social Psychology 2015',
'info_url': 'http://www.socialpsychology.jp/conf2015/index.html',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'4S2015': {
'name': 'Society for Social Studies of Science 2015',
'info_url': 'http://www.4sonline.org/meeting',
'logo_url': 'http://www.4sonline.org/ee/denver-skyline.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IARR2016': {
'name': 'International Association for Relationship Research 2016',
'info_url': 'http://iarr.psych.utoronto.ca/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IA2015': {
'name': 'Inclusive Astronomy 2015',
'info_url': 'https://vanderbilt.irisregistration.com/Home/Site?code=InclusiveAstronomy2015',
'logo_url': 'https://vanderbilt.blob.core.windows.net/images/Inclusive%20Astronomy.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'R2RC': {
'name': 'Right to Research Coalition',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OpenCon2015': {
'name': 'OpenCon2015',
'info_url': 'http://opencon2015.org/',
'logo_url': 'http://s8.postimg.org/w9b30pxyd/Open_Con2015_new_logo.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ESIP2015': {
'name': 'Earth Science Information Partners 2015',
'info_url': 'http://esipfed.org/',
'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SPSP2016': {
'name': 'Society for Personality and Social Psychology 2016 ',
'info_url': 'http://meeting.spsp.org',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NACIII': {
'name': '2015 National Astronomy Consortium (NAC) III Workshop',
'info_url': 'https://info.nrao.edu/do/odi/meetings/2015/nac111/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CDS2015': {
'name': 'Cognitive Development Society 2015',
'info_url': 'http://meetings.cogdevsoc.org/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEASR2016': {
'name': 'Southeastern Association of Shared Resources 2016',
'info_url': 'http://seasr.abrf.org',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Accretion2015': {
'name': 'Observational Evidence of Gas Accretion onto Galaxies?',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'2020Futures': {
'name': 'U.S. Radio/Millimeter/Submillimeter Science Futures in the 2020s',
'info_url': 'https://science.nrao.edu/science/meetings/2015/2020futures/home',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'RMPA2016': {
'name': 'Rocky Mountain Psychological Association 2016',
'info_url': 'http://www.rockymountainpsych.org/convention-info.html',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CNI2015': {
'name': 'Coalition for Networked Information (CNI) Fall Membership Meeting 2015',
'info_url': 'https://wp.me/P1LncT-64s',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'SWPA2016': {
'name': 'Southwestern Psychological Association Convention 2016',
'info_url': 'https://www.swpsych.org/conv_dates.php',
'logo_url': 'http://s28.postimg.org/xbwyqqvx9/SWPAlogo4.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ESIP2016W': {
'name': 'Earth Science Information Partners Winter Meeting 2016',
'info_url': 'http://commons.esipfed.org/2016WinterMeeting',
'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MiamiBrainhack15': {
'name': 'University of Miami Brainhack 2015',
'info_url': 'http://brainhack.org/americas/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'PsiChiRepository': {
'name': 'Psi Chi',
'info_url': 'http://www.psichi.org/?ResearchAdvisory#.VmBpeOMrI1g',
'logo_url': 'http://s11.postimg.org/4g2451vcz/Psi_Chi_Logo.png',
'admins': [
'research.director@psichi.org',
],
'field_names': {
'submission1': 'measures',
'submission2': 'materials',
'submission1_plural': 'measures/scales',
'submission2_plural': 'study materials',
'meeting_title_type': 'Repository',
'add_submission': 'materials',
'mail_subject': 'Title',
'mail_message_body': 'Measure or material short description',
'mail_attachment': 'Your measure/scale or material file(s)'
},
},
'GI2015': {
'name': 'Genome Informatics 2015',
'info_url': 'https://meetings.cshl.edu/meetings.aspx?meet=info&year=15',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MADSSCi2016': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2016',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://madssci.abrf.org/sites/default/files/madssci-logo-bk.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SMM2015': {
'name': 'The Society for Marine Mammalogy',
'info_url': 'https://www.marinemammalscience.org/conference/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'TESS': {
'name': 'Time-sharing Experiments for the Social Sciences',
'info_url': 'http://www.tessexperiments.org',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
'field_names': {
'submission1': 'poster',
'submission2': 'study',
'submission1_plural': 'posters',
'submission2_plural': 'studies',
'meeting_title_type': 'Studies',
'add_submission': 'studies',
}
},
'ASCERM2016': {
'name': 'ASCE Rocky Mountain Student Conference 2016',
'info_url': 'http://luninuxos.com/asce/',
'logo_url': 'http://s2.postimg.org/eaduh2ovt/2016_ASCE_Rocky_Mtn_banner.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'ARCA2016': {
'name': '5th Applied Research Conference in Africa',
'info_url': 'http://www.arcaconference.org/',
'logo_url': 'http://www.arcaconference.org/images/ARCA_LOGO_NEW.JPG',
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'CURCONF2016': {
'name': 'CUR Biennial Conference 2016',
'info_url': 'http://www.cur.org/conferences_and_events/biennial2016/',
'logo_url': 'http://s11.postimg.org/v8feuna4y/Conference_logo_eps.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CATALISE2016': {
'name': 'Criteria and Terminology Applied to Language Impairments: Synthesising the Evidence (CATALISE) 2016',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Emergy2016': {
'name': '9th Biennial Emergy Research Conference',
'info_url': 'http://www.cep.ees.ufl.edu/emergy/conferences/ERC09_2016/index.shtml',
'logo_url': 'http://s12.postimg.org/uf9ioqmct/emergy.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'aps2016': {
'name': '28th APS Annual Convention',
'info_url': 'http://www.psychologicalscience.org/convention',
'logo_url': 'http://www.psychologicalscience.org/redesign/wp-content/uploads/2015/03/APS_2016_Banner_990x157.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'jssp2016': {
'name': 'Japanese Society of Social Psychology 2016',
'info_url': 'http://www.socialpsychology.jp/conf2016/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'sepech2016': {
'name': 'XI SEPECH - Research Seminar in Human Sciences (Seminário de Pesquisa em Ciências Humanas)',
'info_url': 'http://www.uel.br/eventos/sepech/sepech2016/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'etmaal2016': {
'name': 'Etmaal van de Communicatiewetenschap 2016 - Media Psychology',
'info_url': 'https://etmaal2016.wordpress.com',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'WSAN2016': {
'name': 'WSAN2016 Erasmus University Rotterdam',
'info_url': 'http://www.humane.eu/wsan/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ContainerStrategies': {
'name': 'Container Strategies for Data & Software Preservation',
'info_url': 'https://daspos.crc.nd.edu/index.php/workshops/container-strategies-for-data-software-preservation-that-promote-open-science',
'logo_url': 'http://s17.postimg.org/8nl1v5mxb/Screen_Shot_2016_03_02_at_9_05_24_PM.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
},
'CNI2016': {
'name': 'Coalition for Networked Information (CNI) Spring Membership Meeting 2016',
'info_url': 'https://wp.me/P1LncT-6fd',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
}
def populate_conferences():
for meeting, attrs in MEETING_DATA.iteritems():
meeting = meeting.strip()
admin_emails = attrs.pop('admins', [])
admin_objs = []
for email in admin_emails:
try:
user = User.find_one(Q('username', 'iexact', email))
admin_objs.append(user)
except ModularOdmException:
raise RuntimeError('Username {0!r} is not registered.'.format(email))
custom_fields = attrs.pop('field_names', {})
conf = Conference(
endpoint=meeting, admins=admin_objs, **attrs
)
conf.field_names.update(custom_fields)
try:
conf.save()
except ModularOdmException:
conf = Conference.find_one(Q('endpoint', 'eq', meeting))
for key, value in attrs.items():
if isinstance(value, dict):
current = getattr(conf, key)
current.update(value)
setattr(conf, key, current)
else:
setattr(conf, key, value)
conf.admins = admin_objs
changed_fields = conf.save()
if changed_fields:
print('Updated {}: {}'.format(meeting, changed_fields))
else:
print('Added new Conference: {}'.format(meeting))
if __name__ == '__main__':
main()
| 32.693878 | 156 | 0.544112 | 2,335 | 24,030 | 5.475803 | 0.241113 | 0.034491 | 0.085875 | 0.1145 | 0.489989 | 0.430705 | 0.373221 | 0.347411 | 0.328641 | 0.308619 | 0 | 0.044415 | 0.297295 | 24,030 | 734 | 157 | 32.73842 | 0.71278 | 0.002206 | 0 | 0.534819 | 0 | 0.013928 | 0.497414 | 0.009594 | 0 | 0 | 0 | 0 | 0 | 1 | 0.002786 | false | 0 | 0.009749 | 0 | 0.012535 | 0.002786 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0602f1e855ed3a2c59e5d54ad317e3bc77bd87 | 3,563 | py | Python | clinicadl/clinicadl/subject_level/train_autoencoder.py | 921974496/AD-DL | 9a0303579a665800633024bdab1ac44f794a0c38 | [
"MIT"
] | 1 | 2020-11-30T01:39:12.000Z | 2020-11-30T01:39:12.000Z | clinicadl/clinicadl/subject_level/train_autoencoder.py | 921974496/AD-DL | 9a0303579a665800633024bdab1ac44f794a0c38 | [
"MIT"
] | null | null | null | clinicadl/clinicadl/subject_level/train_autoencoder.py | 921974496/AD-DL | 9a0303579a665800633024bdab1ac44f794a0c38 | [
"MIT"
] | null | null | null | from __future__ import print_function
import argparse
from os import path
from time import time
import sys
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from .utils import ae_finetuning
from ..tools.deep_learning.iotools import Parameters
from ..tools.deep_learning.data import MinMaxNormalization, MRIDataset, load_data
from ..tools.deep_learning import create_autoencoder, commandline_to_json
def train_autoencoder(params):
""" Parameters
params: class from utils module containing all the parameters for training a
CNN.
"""
if params.evaluation_steps % params.accumulation_steps != 0 and params.evaluation_steps != 1:
raise Exception('Evaluation steps %d must be a multiple of accumulation steps %d' %
(params.evaluation_steps, params.accumulation_steps))
if params.minmaxnormalization:
transformations = MinMaxNormalization()
else:
transformations = None
total_time = time()
criterion = torch.nn.MSELoss()
training_tsv, valid_tsv = load_data(params.tsv_path, params.diagnoses,
params.split, params.n_splits,
params.baseline)
data_train = MRIDataset(params.input_dir, training_tsv,
params.preprocessing, transformations)
data_valid = MRIDataset(params.input_dir, valid_tsv,
params.preprocessing, transformations)
# Use argument load to distinguish training and testing
train_loader = DataLoader(data_train,
params.batch_size,
shuffle=True,
num_workers=params.num_workers,
drop_last=True
)
valid_loader = DataLoader(data_valid,
)
valid_loader = DataLoader(data_valid,
batch_size=params.batch_size,
shuffle=False,
num_workers=params.num_workers,
drop_last=False
)
text_file = open(path.join(params.output_dir, 'python_version.txt'), 'w')
text_file.write('Version of python: %s \n' % sys.version)
text_file.write('Version of pytorch: %s \n' % torch.__version__)
text_file.close()
decoder = create_autoencoder(params.model, params.pretrained_path,
difference=params.pretrained_difference)
optimizer = eval("torch.optim." + params.optimizer)(filter(lambda x: x.requires_grad, decoder.parameters()), params.learning_rate, weight_decay=params.weight_decay)
if params.add_sigmoid:
if isinstance(decoder.decoder[-1], nn.ReLU):
decoder.decoder = nn.Sequential(*list(decoder.decoder)[:-1])
decoder.decoder.add_module("sigmoid", nn.Sigmoid())
ae_finetuning(decoder, train_loader, valid_loader, criterion, optimizer, False, params)
total_time = time() - total_time
print('Total time', total_time)
#if __name__ == "__main__":
# commandline = parser.parse_known_args()
# commandline_to_json(commandline, 'ConvAutoencoder')
# options = commandline[0]
# if commandline[1]:
# print("unknown arguments: %s" % parser.parse_known_args()[1])
# train_params_autoencoder = Parameters(tsv_path, output_dir, input_dir, model)
# train_params_autoencoder.write(options)
# train_autoencoder(train_parameters_autoencoder)
| 39.153846 | 168 | 0.63991 | 383 | 3,563 | 5.704961 | 0.342037 | 0.020595 | 0.017849 | 0.028833 | 0.118993 | 0.071396 | 0.031121 | 0 | 0 | 0 | 0 | 0.002716 | 0.276733 | 3,563 | 90 | 169 | 39.588889 | 0.845169 | 0.158855 | 0 | 0.105263 | 0 | 0 | 0.053908 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0 | 0.210526 | 0 | 0.22807 | 0.035088 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da08fa8384c712fcc3ccc0c8023334a1b12a22c0 | 375 | py | Python | globals/mime.py | RogueScholar/debreate | 0abc168c51336b31ff87c61f84bc7bb6000e88f4 | [
"MIT"
] | 97 | 2016-09-16T08:44:04.000Z | 2022-01-29T22:30:18.000Z | globals/mime.py | RogueScholar/debreate | 0abc168c51336b31ff87c61f84bc7bb6000e88f4 | [
"MIT"
] | 34 | 2016-09-20T00:42:45.000Z | 2021-04-16T07:21:44.000Z | globals/mime.py | RogueScholar/debreate | 0abc168c51336b31ff87c61f84bc7bb6000e88f4 | [
"MIT"
] | 24 | 2016-09-16T08:44:56.000Z | 2021-07-29T11:32:47.000Z | # -*- coding: utf-8 -*-
## \package globals.mime
# MIT licensing
# See: docs/LICENSE.txt
from globals.execute import GetCommandOutput
from globals.execute import GetExecutable
## TODO: Doxygen
def GetFileMimeType(filename):
CMD_file = GetExecutable(u'file')
if not CMD_file:
return None
return GetCommandOutput(CMD_file, (u'--mime-type', u'--brief', filename,))
| 17.857143 | 75 | 0.728 | 48 | 375 | 5.625 | 0.645833 | 0.077778 | 0.133333 | 0.177778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003106 | 0.141333 | 375 | 20 | 76 | 18.75 | 0.835404 | 0.248 | 0 | 0 | 0 | 0 | 0.080292 | 0 | 0 | 0 | 0 | 0.05 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.714286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
da09c6430f4f0663e4ddd43367f12c8087614e78 | 1,761 | py | Python | src/deployer/result.py | jbenden/deployer | b036fa3030f99ed0730bb3770cf7e01c58c257f1 | [
"Apache-2.0"
] | 2 | 2018-08-30T14:14:13.000Z | 2022-03-24T15:19:29.000Z | src/deployer/result.py | jbenden/deployer | b036fa3030f99ed0730bb3770cf7e01c58c257f1 | [
"Apache-2.0"
] | null | null | null | src/deployer/result.py | jbenden/deployer | b036fa3030f99ed0730bb3770cf7e01c58c257f1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 Joseph Benden <joe@benden.us>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A module holding the results of all plug-ins.
.. moduleauthor:: Joseph Benden <joe@benden.us>
:copyright: (c) Copyright 2018 by Joseph Benden.
:license: Apache License 2.0, see LICENSE.txt for full details.
"""
class Result(dict):
"""Represents the resultant of a plug-in's execution."""
def __bool__(self):
"""Cast to boolean."""
return self['result'] != 'failure' and self['result'] != 'continue'
def __nonzero__(self):
"""Cast to boolean."""
return self.__bool__() # noqa: no-cover
def __str__(self):
"""Return our `stdout` if present, otherwise returns the `result` value."""
return self['stdout'] if 'stdout' in self else str(self['result'])
def failed(self):
"""Determine if the resultant is a failure."""
return self['result'] == 'failure' # noqa: no-cover
def succeeded(self):
"""Determine if the resultant is a success."""
return self['result'] != 'failure' # noqa: no-cover
def skipped(self):
"""Determine if the resultant was skipped."""
return self['result'] == 'skipped' # noqa: no-cover
| 33.865385 | 83 | 0.662124 | 241 | 1,761 | 4.771784 | 0.46888 | 0.052174 | 0.055652 | 0.06 | 0.226957 | 0.163478 | 0.116522 | 0.064348 | 0 | 0 | 0 | 0.010815 | 0.212379 | 1,761 | 51 | 84 | 34.529412 | 0.818313 | 0.64452 | 0 | 0 | 0 | 0 | 0.14841 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.461538 | false | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
da0cc012c8071ddd102f587a464226bdf7578158 | 1,381 | py | Python | app.py | dhairyaostwal/bankingo | cc148940a9d4ae60d80acdc2e3c90a01a8a99c46 | [
"MIT"
] | 2 | 2021-12-11T02:32:35.000Z | 2021-12-12T08:42:41.000Z | app.py | dhairyaostwal/bankingo | cc148940a9d4ae60d80acdc2e3c90a01a8a99c46 | [
"MIT"
] | null | null | null | app.py | dhairyaostwal/bankingo | cc148940a9d4ae60d80acdc2e3c90a01a8a99c46 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request
import pickle
app = Flask(__name__)
userInput = []
@app.route("/", methods=["GET", "POST"])
def hello():
userInput.clear()
if request.method == "POST":
variance = request.form.get("variance")
skewness = request.form.get("skewness")
curtosis = request.form.get("curtosis")
entropy = request.form.get("entropy")
userInput.append(variance)
userInput.append(skewness)
userInput.append(curtosis)
userInput.append(entropy)
# converting string to float values
for i in range(len(userInput)):
userInput[i] = float(userInput[i])
print("User input: ", userInput)
# testing our pickle file
with open('pickleOutput2', 'rb') as f:
mp = pickle.load(f)
pickle_test = mp.predict([userInput])
print("Predicted Output: ", pickle_test)
if pickle_test[0]==1:
return render_template("trueBundle.html")
else:
return render_template("falseBundle.html")
return render_template("index.html")
@app.route("/verified/")
def verified():
return render_template("trueBundle.html")
@app.route("/not-verified/")
def notVerified():
return render_template("falseBundle.html")
if __name__ == '__main__':
app.debug = True
app.run() | 26.557692 | 54 | 0.620565 | 152 | 1,381 | 5.5 | 0.447368 | 0.100478 | 0.119617 | 0.07177 | 0.165072 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002882 | 0.246198 | 1,381 | 52 | 55 | 26.557692 | 0.800192 | 0.041274 | 0 | 0.108108 | 0 | 0 | 0.145234 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.054054 | 0.054054 | 0.27027 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0d025051a5ed1885fbb8e49bb40af12912744c | 3,210 | py | Python | AlarmTimer.py | amjith/PyAlarmTimer | f664daa42d9ec70fc7ac512ce71868c703e8a011 | [
"MIT"
] | 2 | 2015-01-13T00:36:29.000Z | 2015-04-12T19:17:32.000Z | AlarmTimer.py | amjith/PyAlarmTimer | f664daa42d9ec70fc7ac512ce71868c703e8a011 | [
"MIT"
] | 1 | 2015-01-12T23:02:28.000Z | 2015-01-13T00:36:26.000Z | AlarmTimer.py | amjith/PyAlarmTimer | f664daa42d9ec70fc7ac512ce71868c703e8a011 | [
"MIT"
] | null | null | null | import sys
from PyQt4 import QtCore, QtGui
from itertools import cycle
from Resources.LcdNumber_ui import Ui_Form
class AlarmTimer(QtGui.QMainWindow):
def __init__(self, timer_values, parent=None):
QtGui.QWidget.__init__(self, parent)
QtGui.QMainWindow.__init__(self, None, QtCore.Qt.WindowStaysOnTopHint|QtCore.Qt.FramelessWindowHint)
self.ui = Ui_Form()
self.ui.setupUi(self)
# Initialize member variables
self.color_names = [ "Normal", "Yellow" ]
self.color_idx = 1
self.updateTimers(timer_values)
self.cur_timer = self.timer_iter.next() # Current timer value
self.snooze_time = 1 * 60
self.show()
self.oneSecondCounter = 0
self.timerPause = False
# Start a timer for 250ms and call showTimer()
timer = QtCore.QTimer(self)
timer.timeout.connect(self.showTimer)
timer.start(250)
def showTimer(self):
if self.timerPause:
return
text = "%d:%02d" % (self.cur_timer/60,self.cur_timer % 60)
self.ui.lcdNumber.display(text)
if (self.cur_timer == 0):
self.color_idx = 3 - self.color_idx
self.show()
self.setStyleSheet("QWidget { background-color: %s }" % self.color_names[self.color_idx - 1])
elif self.oneSecondCounter == 3:
self.cur_timer -= 1
self.oneSecondCounter = 0
else:
self.oneSecondCounter += 1
def updateTimers(self, timer_list):
self.alarm_times = timer_list
self.timer_iter = cycle(self.alarm_times) # An iterator that cycles through the list
def pauseTimer(self):
self.timerPause = not self.timerPause
def resetTimer(self): # Reset the timer back to the head of the list
self.timer_iter = cycle(self.alarm_times)
self.cur_timer = self.timer_iter.next()
def mouseReleaseEvent(self, event):
button = event.button()
if button == 2:
self.hide()
if (self.cur_timer == 0):
self.cur_timer = self.snooze_time # Start the timer with snooze value if teh cur_timer has expired
elif button == 1: # left click
if (self.cur_timer == 0): # blinking timer should be closed on a left click
self.cur_timer = self.timer_iter.next()
self.setStyleSheet("QWidget { background-color: Normal }" )
def mousePressEvent(self, event):
button = event.button()
if button == 1:
self.dragPosition = event.globalPos() - self.frameGeometry().topLeft();
def mouseMoveEvent(self, event):
if event.buttons() != QtCore.Qt.LeftButton: # not left click
return
self.move(event.globalPos() - self.dragPosition)
def Str2Num(str_list):
num = []
for str in str_list:
try:
num.append(int(str))
except ValueError:
num.append(float(str))
return num
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
timerList = Str2Num(sys.argv[1:])
myapp = AlarmTimer(timerList)
myapp.show()
sys.exit(app.exec_())
| 33.092784 | 121 | 0.610592 | 385 | 3,210 | 4.942857 | 0.348052 | 0.046243 | 0.063058 | 0.033631 | 0.20494 | 0.139254 | 0.119285 | 0.037835 | 0 | 0 | 0 | 0.014442 | 0.288162 | 3,210 | 96 | 122 | 33.4375 | 0.818381 | 0.098131 | 0 | 0.213333 | 0 | 0 | 0.03294 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.053333 | 0 | 0.226667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0dfc1a5ddc1f3fd9cff38b3e12d87c2cfff865 | 3,200 | py | Python | backend/handlers/graphql/resolvers/quota.py | al-indigo/vmemperor | 80eb6d47d839a4736eb6f9d2fcfad35f0a7b3bb1 | [
"Apache-2.0"
] | null | null | null | backend/handlers/graphql/resolvers/quota.py | al-indigo/vmemperor | 80eb6d47d839a4736eb6f9d2fcfad35f0a7b3bb1 | [
"Apache-2.0"
] | 8 | 2017-10-11T13:26:10.000Z | 2021-12-13T20:27:52.000Z | backend/handlers/graphql/resolvers/quota.py | ispras/vmemperor | 80eb6d47d839a4736eb6f9d2fcfad35f0a7b3bb1 | [
"Apache-2.0"
] | 4 | 2017-07-27T12:25:42.000Z | 2018-01-28T02:06:26.000Z | from graphql import ResolveInfo
from rethinkdb.errors import ReqlNonExistenceError
from handlers.graphql.graphql_handler import ContextProtocol
from handlers.graphql.types.pool import Quota
from handlers.graphql.utils.query import resolve_from_root
import constants.re as re
from utils.quota import check_vdi_size, check_memory, check_vcpu_count, check_vm_count, get_used_vdi_size, \
get_used_memory, get_used_vcpu_count, get_used_vm_count
from utils.user import user_entities, get_user_object
def resolve_quotas(root, info, **args):
from xenadapter import Pool
ctx: ContextProtocol = info.context
if ctx.user_authenticator.is_admin():
return re.db.table(Pool.quotas_table_name).coerce_to('array').run()
else:
return re.db.table(Pool.quotas_table_name).get_all(*user_entities(ctx.user_authenticator)).coerce_to('array').run()
def get_item(user):
from xenadapter import Pool
result = re.db.table(Pool.quotas_table_name).get(user).run()
if result:
return result
else:
user_object = get_user_object(user)
if user_object:
result = {key: None for key in Quota._meta.fields.keys()}
result.update({
"user_id": user
})
return result
else:
raise ValueError(f"No such user: {user}")
def resolve_quota(root, info, user):
ctx: ContextProtocol = info.context
if not ctx.user_authenticator.is_admin():
if user not in user_entities(ctx.user_authenticator):
raise ValueError(f"Access denied: Not a member of an entity: {user}")
return get_item(user)
def resolve_quota_left(root, info : ResolveInfo, user):
ctx: ContextProtocol = info.context
if not ctx.user_authenticator.is_admin() and user not in user_entities(ctx.user_authenticator):
raise ValueError(f"Access denied: Not a member of an entity: {user}")
fields = [item.name.value for item in info.field_asts[0].selection_set.selections]
result = {}
if 'vdiSize' in fields:
result['vdi_size'] = check_vdi_size(user)
if 'memory' in fields:
result['memory'] = check_memory(user)
if 'vcpuCount' in fields:
result['vcpu_count'] = check_vcpu_count(user)
if 'vmCount' in fields:
result['vm_count'] = check_vm_count(user)
if 'user' in fields:
result['user_id'] = user
return result
def resolve_quota_usage(root, info : ResolveInfo, user):
ctx: ContextProtocol = info.context
if not ctx.user_authenticator.is_admin() and user not in user_entities(ctx.user_authenticator):
raise ValueError(f"Access denied: Not a member of an entity: {user}")
fields = [item.name.value for item in info.field_asts[0].selection_set.selections]
result = {}
if 'vdiSize' in fields:
result['vdi_size'] = get_used_vdi_size(user)
if 'memory' in fields:
result['memory'] = get_used_memory(user)
if 'vcpuCount' in fields:
result['vcpu_count'] = get_used_vcpu_count(user)
if 'vmCount' in fields:
result['vm_count'] = get_used_vm_count(user)
if 'user' in fields:
result['user_id'] = user
return result
| 32 | 123 | 0.690938 | 446 | 3,200 | 4.748879 | 0.201794 | 0.037771 | 0.0661 | 0.054769 | 0.621341 | 0.556185 | 0.556185 | 0.556185 | 0.508026 | 0.429651 | 0 | 0.000792 | 0.210625 | 3,200 | 99 | 124 | 32.323232 | 0.837688 | 0 | 0 | 0.478873 | 0 | 0 | 0.101753 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070423 | false | 0 | 0.140845 | 0 | 0.309859 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0e334dd350b538cbe6369f8de20266f08cd7ab | 23,670 | py | Python | webapp/creators/parse_eml.py | PASTAplus/umbra | 25f179801ab86d6506759b19849de1f7a8bf9e8d | [
"Apache-2.0"
] | null | null | null | webapp/creators/parse_eml.py | PASTAplus/umbra | 25f179801ab86d6506759b19849de1f7a8bf9e8d | [
"Apache-2.0"
] | null | null | null | webapp/creators/parse_eml.py | PASTAplus/umbra | 25f179801ab86d6506759b19849de1f7a8bf9e8d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Mod: propagate_names
:Synopsis: Parse EML files to collect information on the responsible parties, creating RESPONSIBLE_PARTIES_TEXT_FILE.
:Author:
ide
:Created:
6/1/21
"""
from enum import Enum, auto
import glob
import os
import pickle
import daiquiri
from flask import (
Flask, Blueprint, jsonify, request, current_app
)
from recordclass import recordclass
from webapp.config import Config
import webapp.creators.db as db
import webapp.creators.nlp as nlp
from metapype.eml import names
from metapype.model.metapype_io import from_xml
from metapype.model.node import Node
logger = daiquiri.getLogger(Config.LOG_FILE)
def log_info(msg):
app = Flask(__name__)
with app.app_context():
current_app.logger.info(msg)
def log_error(msg):
app = Flask(__name__)
with app.app_context():
current_app.logger.error(msg)
class EMLTextComponents(Enum):
DATASET_TITLE = auto(),
DATASET_ABSTRACT = auto(),
DATASET_KEYWORDS = auto(),
DATATABLE_DESCRIPTIONS = auto(),
DATASET_GEO_DESCRIPTIONS = auto(),
METHOD_STEP_DESCRIPTIONS = auto(),
PROJECT_TITLES = auto(),
PROJECT_ABSTRACTS = auto(),
RELATED_PROJECT_TITLES = auto(),
RELATED_PROJECT_ABSTRACTS = auto()
ProjectText = recordclass(
'ProjectText',
'project_title project_abstract'
)
EMLText = recordclass(
'EMLText',
'dataset_title dataset_abstract dataset_keywords datatable_descriptions dataset_geographic_descriptions method_step_descriptions projects related_projects'
)
eml_text_by_pid = {}
def xml_to_json(filepath):
cwd = os.getcwd()
with open(filepath, 'r') as fp:
xml = fp.read()
try:
return from_xml(xml)
except Exception as err:
print(f'Metapype failed to convert xml to json for file {filepath}. Error:{err}')
return None
def parse_section(node):
text = []
if node.content:
text.append(node.content)
return text
title = node.find_child(names.TITLE)
if title and title.content:
text.append(title.content)
section = node.find_child(names.SECTION)
if section:
text.extend(parse_section(section))
return text
para = node.find_child(names.PARA)
if para:
text.extend(parse_para(para))
return text
return text
def parse_para(node):
text = []
if node.content:
text.append(node.content)
return text
value = node.find_child(names.VALUE)
if value and value.content:
return [value.content]
return text
def parse_text_type(node):
text = []
if node.content:
text.append(node.content)
return text
section = node.find_child(names.SECTION)
if section:
return parse_section(section)
para = node.find_child(names.PARA)
if para:
return parse_para(para)
return text
def get_existing_eml_files():
filelist = glob.glob(f'{Config.EML_FILES_PATH}/*.xml')
return [os.path.basename(x) for x in filelist]
def get_dataset_title(eml_node):
title_node = eml_node.find_single_node_by_path([names.DATASET, names.TITLE, names.VALUE])
if not title_node:
title_node = eml_node.find_single_node_by_path([names.DATASET, names.TITLE])
return [title_node.content]
def get_dataset_abstract(eml_node):
abstract_node = eml_node.find_single_node_by_path([names.DATASET, names.ABSTRACT, names.PARA])
if not abstract_node:
abstract_node = eml_node.find_single_node_by_path([names.DATASET, names.ABSTRACT, names.SECTION, names.PARA])
if abstract_node:
return parse_text_type(abstract_node)
else:
return []
def harvest_projects(eml_node):
project_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.PROJECT])
project_text = get_project_text(project_nodes)
related_project_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.PROJECT, names.RELATED_PROJECT])
related_project_text = get_project_text(related_project_nodes)
return project_text, related_project_text
def get_project_text(project_nodes):
project_text = []
for project_node in project_nodes:
title = ''
abstract = ''
title_node = project_node.find_child(names.TITLE)
if title_node:
title = [title_node.content]
abstract_node = project_node.find_child(names.ABSTRACT)
if abstract_node:
abstract = parse_text_type(abstract_node)
project_text.append(ProjectText(
project_title=title,
project_abstract=abstract))
return project_text
def get_project_titles(eml_node):
project_titles = []
title_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.PROJECT, names.TITLE])
for title_node in title_nodes:
if title_node.content:
project_titles.append([title_node.content])
return project_titles
def get_project_abstracts(eml_node):
project_abstracts = []
abstract_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.PROJECT, names.ABSTRACT, names.PARA])
for abstract_node in abstract_nodes:
project_abstracts.extend(parse_text_type(abstract_node))
return project_abstracts
def get_keywords(eml_node):
kw = []
keyword_nodes = []
eml_node.find_all_descendants(names.KEYWORD, keyword_nodes)
for keyword_node in keyword_nodes:
kw.append(keyword_node.content)
return kw
def get_all_ranks(eml_node, rank):
rank_nodes = []
eml_node.find_all_descendants(names.TAXONRANKNAME, rank_nodes)
found = set()
for rank_node in rank_nodes:
if rank_node.content.lower() == rank:
parent = rank_node.parent
rank_value = parent.find_child(names.TAXONRANKVALUE).content
found.add(rank_value)
return sorted(found)
def get_all_genera(eml_node):
return get_all_ranks(eml_node, 'genus')
def get_all_species(eml_node):
return get_all_ranks(eml_node, 'species')
def get_children(parent_node, child_name):
children = []
child_nodes = parent_node.find_all_children(child_name)
for child_node in child_nodes:
if child_node.content:
children.append((child_name, child_node.content))
return children
def get_person(rp_node):
person = []
individual_name_node = rp_node.find_child(names.INDIVIDUALNAME)
if individual_name_node:
person.extend(get_children(individual_name_node, names.SALUTATION))
person.extend(get_children(individual_name_node, names.GIVENNAME))
person.extend(get_children(individual_name_node, names.SURNAME))
person.extend(get_children(rp_node, names.ORGANIZATIONNAME))
person.extend(get_children(rp_node, names.POSITIONNAME))
return person
def get_address(rp_node):
address = []
address_node = rp_node.find_child(names.ADDRESS)
if address_node:
address.extend(get_children(address_node, names.DELIVERYPOINT))
address.extend(get_children(address_node, names.CITY))
address.extend(get_children(address_node, names.ADMINISTRATIVEAREA))
address.extend(get_children(address_node, names.POSTALCODE))
address.extend(get_children(address_node, names.COUNTRY))
return address
def get_responsible_party(rp_node):
party = []
party.extend(get_person(rp_node))
party.extend(get_address(rp_node))
party.extend(get_children(rp_node, names.PHONE))
party.extend(get_children(rp_node, names.ELECTRONICMAILADDRESS))
party.extend(get_children(rp_node, names.ONLINEURL))
party.extend(get_children(rp_node, names.USERID))
return party
def get_responsible_parties(pid, eml_node, path):
rp_nodes = eml_node.find_all_nodes_by_path(path)
parties = []
for rp_node in rp_nodes:
party = get_responsible_party(rp_node)
parties.append((pid, path[-1], party))
return parties
def get_creators(pid, eml_node):
return get_responsible_parties(pid, eml_node, [names.DATASET, names.CREATOR])
def get_contacts(pid, eml_node):
return get_responsible_parties(pid, eml_node, [names.DATASET, names.CONTACT])
def get_associated_parties(pid, eml_node):
return get_responsible_parties(pid, eml_node, [names.DATASET, names.ASSOCIATEDPARTY])
def get_metadata_providers(pid, eml_node):
return get_responsible_parties(pid, eml_node, [names.DATASET, names.METADATAPROVIDER])
def get_project_personnel(pid, eml_node):
return get_responsible_parties(pid, eml_node, [names.DATASET, names.PROJECT, names.PERSONNEL])
def get_related_project_personnel(pid, eml_node):
return get_responsible_parties(pid, eml_node, [names.DATASET, names.PROJECT, names.RELATED_PROJECT, names.PERSONNEL])
def get_all_responsible_parties(pid, eml_node):
responsible_parties = []
responsible_parties.extend(get_creators(pid, eml_node))
responsible_parties.extend(get_contacts(pid, eml_node))
responsible_parties.extend(get_associated_parties(pid, eml_node))
responsible_parties.extend(get_metadata_providers(pid, eml_node))
responsible_parties.extend(get_project_personnel(pid, eml_node))
responsible_parties.extend(get_related_project_personnel(pid, eml_node))
return responsible_parties
def get_data_table_descriptions(eml_node):
data_table_descriptions = []
description_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.DATATABLE, names.ENTITYDESCRIPTION])
for description_node in description_nodes:
data_table_descriptions.extend(parse_text_type(description_node))
return data_table_descriptions
def get_method_step_descriptions(eml_node):
method_step_descriptions = []
description_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.METHODS,
names.METHODSTEP, names.DESCRIPTION])
for description_node in description_nodes:
method_step_descriptions.extend(parse_text_type(description_node))
return method_step_descriptions
def get_all_titles_and_abstracts(eml_node):
dataset_title = get_dataset_title(eml_node)
dataset_abstract = get_dataset_abstract(eml_node)
project_titles = []
project_abstracts = []
all_text = dataset_title[0] + " "
if dataset_abstract:
all_text += ' '.join(dataset_abstract)
for title in project_titles:
all_text += title[0] + " "
for abstract in project_abstracts:
all_text += ' '.join(dataset_abstract)
return dataset_title, dataset_abstract, project_titles, project_abstracts, all_text
def get_dataset_geographic_descriptions(eml_node):
geographic_descriptions = []
geographic_description_nodes = eml_node.find_all_nodes_by_path([names.DATASET,
names.COVERAGE,
names.GEOGRAPHICCOVERAGE,
names.GEOGRAPHICDESCRIPTION])
for geographic_description_node in geographic_description_nodes:
description = geographic_description_node.content
if description:
geographic_descriptions.append(description)
return geographic_descriptions
def parse_eml_file(filename):
pid = filename[:-4]
filepath = f'{Config.EML_FILES_PATH}/{filename}'
eml_node = xml_to_json(filepath)
return pid, eml_node
def collect_responsible_parties(filename, added_package_ids=None, removed_package_ids=None, trace=False):
if added_package_ids == [] and removed_package_ids == []:
return
responsible_parties = db.parse_responsible_parties_file(filename)
db.prune_pids(responsible_parties, removed_package_ids)
# write the existing responsible parties, minus the ones to be removed
output_filename = f'{Config.EML_FILES_PATH}/{filename}'
with open(output_filename, 'w') as output_file:
for _, val in responsible_parties.items():
for line in val:
output_file.write(line)
output_file.write('\n')
# now, append the new responsible parties
with open(output_filename, 'a') as output_file:
filelist = get_existing_eml_files()
if trace:
log_info(f'len(filelist)={len(filelist)}')
for index, filename in enumerate(filelist):
pid = os.path.splitext(filename)[0]
if added_package_ids and pid not in added_package_ids:
continue
pid, eml_node = parse_eml_file(filename)
if eml_node:
if trace:
log_info(f' Adding {index} - {pid}')
responsible_parties = get_all_responsible_parties(pid, eml_node)
for responsible_party in responsible_parties:
output_file.write(str(responsible_party))
output_file.write('\n')
output_file.flush()
# We're done with the JSON model. Delete it so we don't run out of memory.
Node.delete_node_instance(eml_node.id, True)
def collect_titles_and_abstracts(output_filename):
with open(output_filename, 'w') as output_file:
filelist = get_existing_eml_files()
for index, filename in enumerate(filelist):
# if filename.startswith('edi.'): # TEMP
pid = filename[:-4]
filepath = f'{Config.EML_FILES_PATH}/{filename}'
eml_node = xml_to_json(filepath)
if not eml_node:
continue
dataset_title, dataset_abstract, project_titles, project_abstracts, all_text = get_all_titles_and_abstracts(eml_node)
all_text = all_text.replace('\n', '')
output_file.write(f'{pid}\n')
output_file.write(f'{all_text}\n')
def collect_method_step_descriptions(output_filename):
with open(output_filename, 'w') as output_file:
filelist = get_existing_eml_files()
for index, filename in enumerate(filelist):
# if filename.startswith('edi.'): # TEMP
pid = filename[:-4]
filepath = f'{Config.EML_FILES_PATH}/{filename}'
eml_node = xml_to_json(filepath)
if not eml_node:
continue
text = get_data_table_descriptions(eml_node)
text = get_method_step_descriptions(eml_node)
# all_text = all_text.replace('\n', '')
# output_file.write(f'{pid}\n')
# output_file.write(f'{all_text}\n')
def collect_text_for_scope(scope):
text = []
filelist = get_existing_eml_files()
for index, filename in enumerate(filelist):
if filename.startswith(scope):
filepath = f'{Config.EML_FILES_PATH}/{filename}'
eml_node = xml_to_json(filepath)
if not eml_node:
continue
text1 = get_data_table_descriptions(eml_node)
text2 = [] #get_method_step_descriptions(eml_node)
*_, text3 = get_all_titles_and_abstracts(eml_node)
text.append(' '.join(text1) + ' '.join(text2) + text3)
return ' '.join(text)
def collect_text(pids):
text = []
for pid in pids:
filename = pid + '.xml'
filepath = f'{Config.EML_FILES_PATH}/{filename}'
eml_node = xml_to_json(filepath)
if not eml_node:
continue
text1 = [] #get_data_table_descriptions(eml_node)
text2 = [] #get_method_step_descriptions(eml_node)
*_, text3 = get_all_titles_and_abstracts(eml_node)
text.append(' '.join(text1) + ' '.join(text2) + text3)
return ' '.join(text)
def init_eml_text_by_pid():
global eml_text_by_pid
filename = 'eml_text_by_pid.pkl'
filepath = f'{Config.DATA_FILES_PATH}/{filename}'
try:
with open(filepath, 'rb') as pf:
eml_text_by_pid = pickle.load(pf)
print(f'Init harvest EML text... count={len(eml_text_by_pid)}')
return eml_text_by_pid
except FileNotFoundError:
pass
def save_eml_text_by_pid():
global eml_text_by_pid
filename = 'eml_text_by_pid.pkl'
filepath = f'{Config.DATA_FILES_PATH}/{filename}'
with open(filepath, 'wb') as pickle_file:
pickle.dump(eml_text_by_pid, pickle_file)
def clean_projects(projects):
cleaned = []
for project in projects:
project.project_title = clean_list(project.project_title)
project.project_abstract = clean_list(project.project_abstract)
cleaned.append(project)
return cleaned
def clean_list(l):
return [nlp.clean(s, remove_digits=True) for s in l]
def harvest_eml_text(pids=None):
global eml_text_by_pid
if not pids:
pids = db.get_all_pids()
init_eml_text_by_pid()
count = len(eml_text_by_pid)
for pid in pids:
if eml_text_by_pid.get(pid):
continue
filename = pid + '.xml'
filepath = f'{Config.EML_FILES_PATH}/{filename}'
eml_node = xml_to_json(filepath)
if not eml_node:
continue
dataset_title = get_dataset_title(eml_node)
dataset_abstract = get_dataset_abstract(eml_node)
dataset_keywords = get_keywords(eml_node)
datatable_descriptions = get_data_table_descriptions(eml_node)
dataset_geographic_descriptions = get_dataset_geographic_descriptions(eml_node)
method_step_descriptions = get_method_step_descriptions(eml_node)
projects, related_projects = harvest_projects(eml_node)
eml_text_by_pid[pid] = EMLText(
dataset_title=clean_list(dataset_title),
dataset_abstract=clean_list(dataset_abstract),
dataset_keywords=clean_list(dataset_keywords),
datatable_descriptions=clean_list(datatable_descriptions),
dataset_geographic_descriptions=clean_list(dataset_geographic_descriptions),
method_step_descriptions=clean_list(method_step_descriptions),
projects=clean_projects(projects),
related_projects=clean_projects(related_projects)
)
count += 1
if count % 100 == 0:
print(f'Saving... count={count}')
save_eml_text_by_pid()
save_eml_text_by_pid()
def concat_project_text(projects, related_projects,
components=(EMLTextComponents.PROJECT_TITLES,
EMLTextComponents.PROJECT_ABSTRACTS,
EMLTextComponents.RELATED_PROJECT_TITLES,
EMLTextComponents.RELATED_PROJECT_ABSTRACTS)):
project_text = ''
for project in projects:
if EMLTextComponents.PROJECT_TITLES in components:
project_text += ' '.join(project.project_title)
if EMLTextComponents.PROJECT_ABSTRACTS in components:
project_text += ' '.join(project.project_abstract)
for related_project in related_projects:
if EMLTextComponents.PROJECT_TITLES in components:
project_text += ' '.join(related_project.project_title)
if EMLTextComponents.PROJECT_ABSTRACTS in components:
project_text += ' '.join(related_project.project_abstract)
return project_text
def get_eml_text_as_string(pid, components=(EMLTextComponents.DATASET_TITLE,
EMLTextComponents.DATASET_ABSTRACT,
EMLTextComponents.DATASET_KEYWORDS,
EMLTextComponents.DATATABLE_DESCRIPTIONS,
EMLTextComponents.PROJECT_TITLES,
EMLTextComponents.PROJECT_ABSTRACTS,
EMLTextComponents.RELATED_PROJECT_TITLES,
EMLTextComponents.RELATED_PROJECT_ABSTRACTS)):
if not eml_text_by_pid:
init_eml_text_by_pid()
eml_string = ''
eml_text = eml_text_by_pid.get((pid))
if not eml_text:
return ''
if EMLTextComponents.DATASET_TITLE in components:
eml_string += ' '.join(eml_text.dataset_title)
if EMLTextComponents.DATASET_ABSTRACT in components:
eml_string += ' '.join(eml_text.dataset_abstract)
if EMLTextComponents.DATASET_KEYWORDS in components:
eml_string += ' '.join(eml_text.dataset_keywords)
if EMLTextComponents.DATATABLE_DESCRIPTIONS in components:
eml_string += ' '.join(eml_text.datatable_descriptions)
if EMLTextComponents.DATASET_GEO_DESCRIPTIONS in components:
eml_string += ' '.join(eml_text.dataset_geographic_descriptions)
if EMLTextComponents.METHOD_STEP_DESCRIPTIONS in components:
eml_string += ' '.join(eml_text.method_step_descriptions)
eml_string += concat_project_text(eml_text.projects,
eml_text.related_projects,
components)
return eml_string
def get_eml_text_as_string_by_name(givenname, surname,
components=(EMLTextComponents.DATASET_TITLE,
EMLTextComponents.DATASET_ABSTRACT,
EMLTextComponents.DATASET_KEYWORDS,
EMLTextComponents.DATATABLE_DESCRIPTIONS,
EMLTextComponents.PROJECT_TITLES,
EMLTextComponents.PROJECT_ABSTRACTS,
EMLTextComponents.RELATED_PROJECT_TITLES,
EMLTextComponents.RELATED_PROJECT_ABSTRACTS)):
if not eml_text_by_pid:
init_eml_text_by_pid()
pids = db.get_pids_by_name(givenname, surname)
eml_string = ''
for pid in pids:
eml_string += get_eml_text_as_string(pid, components)
return eml_string
def get_eml_keywords_by_name(givenname, surname):
if not eml_text_by_pid:
init_eml_text_by_pid()
pids = db.get_pids_by_name(givenname, surname)
keywords = []
for pid in pids:
eml_text = eml_text_by_pid.get((pid))
if not eml_text:
continue
keywords.extend(eml_text.dataset_keywords)
return keywords
if __name__ == '__main__':
pass
# collect_responsible_parties(f'{EML_FILES_PATH}/responsible_parties.txt')
# harvest_eml_text()
# raise ValueError
#
# from collections import Counter
# givenname = 'Diana'
# surname = 'Wall'
# keywords = get_eml_keywords_by_name(givenname, surname)
# counter = Counter(keywords)
# highest = counter.most_common(20)
#
# text = get_eml_text_as_string_by_name(givenname, surname)
# lemmas = nlp.lemmatize(text)
# counter = Counter(lemmas)
# highest = counter.most_common(30)
# pids = db.get_all_pids()
# harvest_eml_text(pids)
# for pid in pids:
# eml_string = get_eml_text_as_string(pid)
# text = collect_text_for_scope('knb-lter-sbc')
# collect_method_step_descriptions('foo.txt')
# filename = 'knb-lter-fce.1143.2.xml'
# pid, eml_node = parse_eml_file(filename)
# if eml_node:
# text1 = get_data_table_descriptions(eml_node)
# text2 = get_method_step_descriptions(eml_node)
# *_, text3 = get_all_titles_and_abstracts(eml_node)
# all_text = ' '.join(text1) + ' '.join(text1) + text3
# collect_responsible_parties(f'{EML_FILES_PATH}/responsible_parties.txt')
# collect_titles_and_abstracts(f'{EML_FILES_PATH}/titles_and_abstracts.txt')
| 35.170877 | 159 | 0.673722 | 2,855 | 23,670 | 5.239229 | 0.100525 | 0.041182 | 0.015644 | 0.020858 | 0.574007 | 0.518853 | 0.458484 | 0.391496 | 0.334336 | 0.324041 | 0 | 0.00245 | 0.241318 | 23,670 | 672 | 160 | 35.223214 | 0.830493 | 0.075792 | 0 | 0.323045 | 0 | 0 | 0.039316 | 0.021583 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098765 | false | 0.004115 | 0.026749 | 0.018519 | 0.257202 | 0.00823 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0ee8203f152fc62fcdc9038c65fc7421a7da86 | 2,399 | py | Python | pywrap.py | rosejn/pywrap | 21076e66e3ae99013524e4f391a5928193072fd6 | [
"MIT"
] | null | null | null | pywrap.py | rosejn/pywrap | 21076e66e3ae99013524e4f391a5928193072fd6 | [
"MIT"
] | null | null | null | pywrap.py | rosejn/pywrap | 21076e66e3ae99013524e4f391a5928193072fd6 | [
"MIT"
] | null | null | null | import ctypes
def _wrap(functype, name, library, restype, params, errcheck=None):
prototype = functype(restype, *(param.type for param in params))
paramflags = tuple(param.paramflags for param in params)
wrapper = prototype((name, library), paramflags)
if errcheck:
wrapper.errcheck = errcheck
return wrapper
def wrap_winapi(name, library, restype, params, errcheck=None):
return _wrap(ctypes.WINFUNCTYPE, name, library, restype, params, errcheck=errcheck)
def wrap_cdecl(name, library, restype, params, errcheck=None):
return _wrap(ctypes.CFUNCTYPE, name, library, restype, params, errcheck=errcheck)
class Parameter(object):
def __init__(self, name, type_, default=None, out=False):
self._name = name
self._type = type_
self._out = out
self._default = default
@property
def flag(self):
if self._out:
return 2
else:
return 1
@property
def type(self):
return self._type
@property
def paramflags(self):
paramflags = (self.flag, self._name, self._default)
if self._default is None:
return paramflags[:-1]
else:
return paramflags
class Errcheck(object):
@staticmethod
def expect_true(result, func, args):
if not result:
raise ctypes.WinError()
return result
@staticmethod
def expect_null(result, func, args):
if result:
raise ctypes.WinError()
return result
@staticmethod
def expect_not_null(result, func, args):
if not result:
raise ctypes.WinError()
return result
@staticmethod
def expect_value(value):
def errcheck(result, func, args):
if result != value:
raise ctypes.WinError()
return result
return errcheck
@staticmethod
def expect_lasterror(value):
def errcheck(result, func, args):
if ctypes.get_last_error() != value:
raise ctypes.WinError()
return result
return errcheck
@staticmethod
def expect_no_error(result, func, args):
if ctypes.get_last_error():
raise ctypes.WinError()
return result
@staticmethod
def print_all(result, func, args):
print(result, func, args)
return result
| 24.731959 | 87 | 0.615673 | 265 | 2,399 | 5.449057 | 0.215094 | 0.055402 | 0.077562 | 0.066482 | 0.530471 | 0.509695 | 0.429363 | 0.364266 | 0.317175 | 0.204986 | 0 | 0.001784 | 0.298875 | 2,399 | 96 | 88 | 24.989583 | 0.856718 | 0 | 0 | 0.424658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.219178 | false | 0 | 0.013699 | 0.041096 | 0.493151 | 0.027397 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da0fb1e76df7bb263d04fdeb069e451fb04e547a | 2,756 | py | Python | pp/components/ring_single.py | flaport/gdsfactory | 1f2e844c1fe27b9c6340e2d51500fd3358fa16e5 | [
"MIT"
] | 8 | 2020-08-25T11:25:18.000Z | 2022-03-27T11:32:11.000Z | pp/components/ring_single.py | flaport/gdsfactory | 1f2e844c1fe27b9c6340e2d51500fd3358fa16e5 | [
"MIT"
] | null | null | null | pp/components/ring_single.py | flaport/gdsfactory | 1f2e844c1fe27b9c6340e2d51500fd3358fa16e5 | [
"MIT"
] | 1 | 2022-03-04T07:03:29.000Z | 2022-03-04T07:03:29.000Z | from typing import Callable
from pp.cell import cell
from pp.component import Component
from pp.components.bend_circular import bend_circular
from pp.components.coupler_ring import coupler_ring
from pp.components.waveguide import waveguide as waveguide_function
from pp.config import call_if_func
from pp.drc import assert_on_2nm_grid
@cell
def ring_single(
wg_width: float = 0.5,
gap: float = 0.2,
bend_radius: float = 10.0,
length_x: float = 4.0,
length_y: float = 0.001,
coupler: Callable = coupler_ring,
waveguide: Callable = waveguide_function,
bend: Callable = bend_circular,
pins: bool = False,
) -> Component:
"""Single bus ring made of a ring coupler (cb: bottom)
connected with two vertical waveguides (wl: left, wr: right)
two bends (bl, br) and horizontal waveguide (wg: top)
Args:
wg_width: waveguide width
gap: gap between for coupler
bend_radius: for the bend and coupler
length_x: ring coupler length
length_y: vertical waveguide length
coupler: ring coupler function
waveguide: waveguide function
bend: bend function
pins: add pins
.. code::
bl-wt-br
| |
wl wr length_y
| |
--==cb==-- gap
length_x
.. plot::
:include-source:
import pp
c = pp.c.ring_single(wg_width=0.5, gap=0.2, length_x=4, length_y=0.1, bend_radius=5)
pp.plotgds(c)
"""
bend_radius = float(bend_radius)
assert_on_2nm_grid(gap)
coupler = call_if_func(
coupler, gap=gap, wg_width=wg_width, bend_radius=bend_radius, length_x=length_x
)
waveguide_side = call_if_func(waveguide, width=wg_width, length=length_y)
waveguide_top = call_if_func(waveguide, width=wg_width, length=length_x)
bend_ref = bend(width=wg_width, radius=bend_radius) if callable(bend) else bend
c = Component()
cb = c << coupler
wl = c << waveguide_side
wr = c << waveguide_side
bl = c << bend_ref
br = c << bend_ref
wt = c << waveguide_top
wl.connect(port="E0", destination=cb.ports["N0"])
bl.connect(port="N0", destination=wl.ports["W0"])
wt.connect(port="W0", destination=bl.ports["W0"])
br.connect(port="N0", destination=wt.ports["E0"])
wr.connect(port="W0", destination=br.ports["W0"])
wr.connect(port="E0", destination=cb.ports["N1"]) # just for netlist
c.add_port("E0", port=cb.ports["E0"])
c.add_port("W0", port=cb.ports["W0"])
if pins:
pp.add_pins_to_references(c)
return c
if __name__ == "__main__":
import pp
c = ring_single()
cc = pp.add_pins(c)
# print(c.settings)
# print(c.get_settings())
pp.show(cc)
| 27.56 | 90 | 0.645501 | 400 | 2,756 | 4.2575 | 0.25 | 0.032883 | 0.023488 | 0.017616 | 0.086905 | 0.086905 | 0.050499 | 0.050499 | 0.050499 | 0 | 0 | 0.018634 | 0.240566 | 2,756 | 99 | 91 | 27.838384 | 0.795031 | 0.286647 | 0 | 0 | 0 | 0 | 0.021552 | 0 | 0 | 0 | 0 | 0 | 0.039216 | 1 | 0.019608 | false | 0 | 0.176471 | 0 | 0.215686 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da1134de439d802fe50733f5cc4818017bddfa77 | 1,469 | py | Python | uninstall.py | Manisso/LFSET | df5f104687daf66ef4a0cb9808a0ce377415e2dc | [
"MIT"
] | 8 | 2019-05-29T22:49:04.000Z | 2021-02-28T21:05:28.000Z | uninstall.py | Manisso/LFSET | df5f104687daf66ef4a0cb9808a0ce377415e2dc | [
"MIT"
] | null | null | null | uninstall.py | Manisso/LFSET | df5f104687daf66ef4a0cb9808a0ce377415e2dc | [
"MIT"
] | 2 | 2019-06-09T17:52:31.000Z | 2019-09-09T17:14:46.000Z | #!/usr/bin/env python
# -*- codeing: UTF-8 -*-
import time
import sys
import os
print('''
____ ___ .__ __ .__ .__ .____ __________________________________________
| | \____ |__| ____ _______/ |______ | | | | ______ ___.__. | | \_ _____/ _____/\_ _____/\__ ___/
| | / \| |/ \ / ___/\ __\__ \ | | | | \____ < | | ______ | | | __) \_____ \ | __)_ | |
| | / | \ | | \\___ \ | | / __ \| |_| |__ | |_> >___ | /_____/ | |___| \ / \ | \ | |
|______/|___| /__|___| /____ > |__| (____ /____/____/ /\ | __// ____| |_______ \___ / /_______ //_______ / |____|
\/ \/ \/ \/ \/ |__| \/ \/ \/ \/ \/
''')
ch = raw_input('Do you REALLY want to uninstall LFSET? (N/y): ')
if ch == 'N':
print("Have a good day!")
elif ch == 'y':
print('sorry you didnt like it :( BUT, HAVE A GOOD DAY :))))')
time.sleep(5)
os.system('clear')
os.system('cd .. && cd .. && cd ..')
os.system('rm -rf etc')
os.system('rm -rf files')
os.system('rm -rf tools')
os.system('rm -rf .git')
os.system('rm -rf LICENSE')
os.system('rm -rf README.md')
print('All files uninstalled, after 10sec this file will be del...')
time.sleep(10)
os.system('sudo rm -rf uninstall.py')
| 43.205882 | 136 | 0.429544 | 111 | 1,469 | 3.585586 | 0.54955 | 0.180905 | 0.150754 | 0.180905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006572 | 0.378489 | 1,469 | 33 | 137 | 44.515152 | 0.429354 | 0.029272 | 0 | 0 | 0 | 0.142857 | 0.79073 | 0.029494 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.107143 | 0 | 0.107143 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
da1152e98be68574744964ed8c665a43ee954229 | 13,297 | py | Python | src/services/db/oracle.py | daesnorey/PPRJ | f826eb194f895d13522f61a51a5100a5cdbead99 | [
"Apache-2.0"
] | null | null | null | src/services/db/oracle.py | daesnorey/PPRJ | f826eb194f895d13522f61a51a5100a5cdbead99 | [
"Apache-2.0"
] | null | null | null | src/services/db/oracle.py | daesnorey/PPRJ | f826eb194f895d13522f61a51a5100a5cdbead99 | [
"Apache-2.0"
] | null | null | null | """oracle.py.
db_connection.py file will contain the connection behaviour
to the database
"""
import traceback
import random
import copy
import cx_Oracle
import json
from src.objects.third import Third
from src.services.db.db_types import DbTypes
class Oracle(object):
"""Oracle class will handle the conection to the database."""
def __init__(self):
"""Constructor."""
self.__data_base = None
self.__cursor = None
def __open(self, debug=False):
"""Connect to the database."""
username = 'pre_dnovoa'#'PPRJ'
password = 'w27XYfj5'
hostname = '127.0.0.1'
servicename = 'XE'
port = 1521
dsn_tns = cx_Oracle.makedsn(hostname, port, servicename)
if debug is True:
print(dsn_tns)
try:
self.__data_base = cx_Oracle.connect(username, password, dsn_tns)
except cx_Oracle.DatabaseError as e:
error, = e.args
if error.code == 1017:
print('Please check your credentials.')
# sys.exit()?
else:
print(e)
# Very important part!
raise
# If the database connection succeeded create the cursor
# we-re going to use.
self.__cursor = self.__data_base.cursor()
def __close(self):
if self.__data_base is not None:
self.__data_base.close()
self.__data_base = None
self.__cursor = None
def get_cursor(self):
"""Get cursor connection."""
if self.__cursor is None:
self.__open()
return self.__cursor
def execute(self, query, bindvars={}, commit=False, debug=False):
"""Execute query, return cursor."""
__noramalizate = self.normalize_query(query, bindvars)
__query = __noramalizate[0]
__bindvars = __noramalizate[1]
if debug:
print(query, bindvars)
print("*" * 10)
print(__query, __bindvars)
response = self.get_cursor().execute(__query, __bindvars)
if commit is True:
self.__data_base.commit()
return response
def normalize_query(self, query, bindvars):
"""Method normalize_query."""
if not bindvars or "." not in query:
return [query, bindvars]
new_bindvars = {}
for key in bindvars:
value = bindvars[key]
if DbTypes.exist(value):
continue
if "." in key:
new_key = self.get_condition_key(key)
new_bindvars[new_key] = value
query = query.replace(":" + key, ":" + new_key)
else:
new_bindvars[key] = value
return [query, new_bindvars]
def get_condition_key(self, key):
"""Method get_condition_key."""
dot = "."
new_key = ""
if dot in key:
new_key = str(random.choice('abcdefghij'))
new_key += str(random.randint(0, 1000))
new_key += key.split(dot)[1]
return new_key
def get_join_select(self, fields=None, conditions=None,
join_fields=None, *table):
"""Method get_query.
@param table: table name in database
@param fields: dictionary which contain the fields to affect.
@param condition: dictionary which contain the fields and
values to filter
"""
if not fields:
fields = []
if not conditions:
conditions = {}
if not join_fields:
join_fields = {}
__inst = self.get_join_instruction(fields, len(table), join_fields)
__inst += self.get_conditions(1, conditions)
query = __inst
for number in range(len(table)):
str_replace = ":table" + str(number)
__table = table[number].replace("l__", "")
__table = table[number].replace("r__", "")
query = query.replace(str_replace, table[number])
return query
def get_join_instruction(self, fields, n_tables=1, join=None):
"""get_instruction.
This method will evaluate the action and will return the right
instruction
"""
if not join:
join = []
__ini = "SELECT :fields FROM :table0"
if n_tables > 1:
for index in range(n_tables - 1):
to_join = join[index]
str_table = ":table" + str(index + 1)
str_join = ""
if to_join.startswith("l__"):
__ini += " LEFT JOIN "
elif to_join.startswith("r__"):
__ini += " RIGHT JOIN "
else:
__ini += " INNER JOIN "
__ini += str_table
print("to_join", to_join)
for field in to_join:
print("field", field)
if str_join:
str_join += " AND "
str_join += str_table + "." + field
str_join += "= :table0." + field
__ini += " ON " + str_join
__inst = ""
for field in fields:
if __inst:
__inst += ","
__inst += field
if not fields:
__inst = "*"
response = __ini.replace(":fields", __inst)
return response
def get_query(self, table, fields=None, conditions=None, action=1):
"""Method get_query.
@param table: table name in database
@param fields: dictionary which contain the fields to affect.
@param condition: dictionary which contain the fields and values to
filter
@param action: 0=INSERT, 1=SELECT, 2=UPDATE, 3=DELETE
"""
if not fields:
fields = []
if not conditions:
conditions = {}
__inst = self.get_instruction(action, fields)
__inst += self.get_conditions(action, conditions)
if action == 0:
__inst += " returning :return_id INTO :new_id"
query = __inst.replace(":table", table)
return query
def get_instruction(self, action, fields):
"""get_instruction.
This method will evaluate the action and will return the right
instruction
"""
__ini = ""
if action == 0:
__ini = "INSERT INTO :table (:fields) VALUES (:values)"
elif action == 1:
__ini = "SELECT :fields FROM :table"
elif action == 2:
__ini = "UPDATE :table SET :fields"
elif action == 3:
__ini = "DELETE FROM :table"
return __ini
__inst = ""
__values = ""
for field in fields:
try:
__type = fields[field].get("type")# if isinstance(fields[field], dict) else None
except:
__type = None
if __inst:
__inst += ","
__values += ","
if action == 0:
__inst += field
__values += "TO_DATE(:{0}, 'yyyy-MM-dd')".format(field) if __type == "date" else ":{}".format(field)
elif action == 2:
__inst += "{0}= TO_DATE(:{0}, 'yyyy-MM-dd')".format(field) if __type == "date" else "{0}=:{0}".format(field)
else:
__inst += field
__values += ":" + field
if not fields and action == 1:
__inst = "*"
response = __ini.replace(":fields", __inst).replace(":values",
__values)
return response
def get_conditions(self, action, conditions):
"""Method get_conditions.
this method will evaluate the action and the conditions
if the action is 0 or there are no conditions then it returns an empty
string
otherwise it return the right condition
"""
s_conditions = len(conditions)
if action == 0 or s_conditions == 0:
return ""
__condition = " WHERE "
__cond = ""
for condition in conditions:
try:
__type = conditions[condition].get("type")
except:
__type = None
__value = conditions[condition] if not __type else conditions[condition].get("value")
if not isinstance(__value, list):
__value = [__value]
for __val in __value:
if __cond:
__cond += " AND "
if DbTypes.exist(__val):
__sentence = DbTypes.get_sentence(__val)
if '{}' in __sentence:
__cond += __sentence.format(condition)
else:
__cond += condition + " " + __sentence
else:
__cond += "{0} = TO_DATE(:{0}, 'yyyy-MM-dd')".format(condition) if __type == "date" else "{0}=:{0}".format(condition)
__condition += __cond
return __condition
def save(self, table, generic_object, name_id):
"""Method save.
@attribute table
@attribute generic_object
@attribute name_id
"""
__fields = copy.copy(generic_object)
if name_id in __fields:
del __fields[name_id]
if isinstance(generic_object[name_id], dict):
id_object = generic_object[name_id]['value']
else:
id_object = generic_object[name_id]
else:
id_object = -1
response = {}
try:
response = dict(error=0, text="success")
if id_object > 0:
__condition = {name_id: id_object}
__update_query = self.get_query(table, __fields, __condition,
action=2)
for field in generic_object: generic_object[field] = generic_object[field].get("value")
print(__update_query)
self.execute(__update_query, generic_object, True)
else:
newest_id_wrapper = self.get_cursor().var(cx_Oracle.NUMBER)
__insert_query = self.get_query(table, fields=__fields, action=0)
for field in __fields: __fields[field] = __fields[field].get("value")
__fields["new_id"] = newest_id_wrapper
__insert_query = __insert_query.replace(":return_id", name_id)
print(__insert_query)
self.execute(__insert_query, __fields, True, False)
new_id = newest_id_wrapper.getvalue()
response["id"] = int(new_id)
except Exception as e:
formatted_lines = traceback.format_exc().splitlines()
print(formatted_lines[0])
print(formatted_lines[-1])
print(e)
response = dict(error=1, text="There was an error saving", desc_error=formatted_lines[-1])
return response
def delete(self, table, conditions):
"""Method delete.
@attribute table
@attribute name_id
@attribute id_object
"""
condition_size = len(conditions)
if condition_size == 0:
return dict(error=2, text="Data incomplete at delete")
__delete_query = self.get_query(table, conditions=conditions,
action=3)
response = {}
try:
self.execute(__delete_query, conditions, True)
response = dict(error=0, text="success")
except Exception:
response = dict(error=2, text="There was an error deleting")
return response
def search(self, **options):
table = options.get("table")
if not table:
raise Exception("fuck you")
tmp = {}
if isinstance(table, list):
pass
else:
query = self.get_instruction(1, {}).replace(":table", table)
fields = options.get("fields")
conditions = options.get("conditions")
class_object = options.get("class_object")
for field in fields:
nquery = "{} WHERE".format(query)
for condition in conditions:
if len(condition.strip()) == 0:
continue
nquery += " LOWER({}) LIKE LOWER('%{}%') OR".format(field, condition)
nquery = nquery.strip("OR").strip()
response = self.execute(nquery, {}, debug=False)
if not response:
continue
for row in response.fetchall():
id = row[0]
if not tmp.get(id):
tmp[id] = [row, 1]
else:
tmp[id][1] += 1
if class_object:
result = []
for key in tmp.keys():
item = class_object(tmp[key][0], tmp[key][1])
result.append(item)
result.sort(key=lambda x: x.w, reverse=True)
else:
result = tmp
return result
| 31.88729 | 137 | 0.518087 | 1,379 | 13,297 | 4.704859 | 0.166062 | 0.010789 | 0.012947 | 0.015413 | 0.183724 | 0.156134 | 0.120376 | 0.110512 | 0.083847 | 0.083847 | 0 | 0.009855 | 0.381891 | 13,297 | 416 | 138 | 31.963942 | 0.779535 | 0.106565 | 0 | 0.250883 | 0 | 0 | 0.063891 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053004 | false | 0.010601 | 0.024735 | 0 | 0.137809 | 0.045936 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da11aa1d5d2c57b57bf78f6eb3a605f93202667a | 2,211 | py | Python | utils/utils_math.py | spisakt/PUMI | bea29696aa90e5581f08919e1a2cd9f569284984 | [
"BSD-3-Clause"
] | 5 | 2018-06-12T08:17:13.000Z | 2022-02-25T20:07:00.000Z | utils/utils_math.py | spisakt/PUMI | bea29696aa90e5581f08919e1a2cd9f569284984 | [
"BSD-3-Clause"
] | null | null | null | utils/utils_math.py | spisakt/PUMI | bea29696aa90e5581f08919e1a2cd9f569284984 | [
"BSD-3-Clause"
] | 2 | 2020-10-19T15:27:28.000Z | 2021-06-04T17:02:27.000Z | from nipype.interfaces.utility import Function
def add_two(a, b):
return float(a)+float(b)
def sum_list(in_list):
return sum(in_list)
def sub_two(a,b):
return float(a)-float(b)
def abs_val(x):
return abs(x)
def sec2sigmaV(TR, sec):
sigmaV=sec/(2*TR)
return sigmaV
# calculates colmeans, rowmenas or global mean, depenxding on the 'axis' parameter
# and saves it to another txt
def txt2MeanTxt(in_file, axis=None, header=False):
import numpy as np
import os
if header:
print "drop first line"
data = np.loadtxt(in_file, skiprows=1) #header -> dropline
else:
print "don't drop first line"
data = np.loadtxt(in_file)
mean = data.mean(axis=axis)
np.savetxt('mean.txt', [mean])
return os.getcwd() + '/mean.txt'
def txt2MaxTxt(in_file, axis=None, header=False):
import numpy as np
import os
if header:
print "drop first line"
data = np.loadtxt(in_file, skiprows=1) #header -> dropline
else:
print "don't drop first line"
data = np.loadtxt(in_file)
mean = data.max(axis=axis)
np.savetxt('max.txt', [mean])
return os.getcwd() + '/max.txt'
###############################################
AddTwo = Function(input_names=['a', 'b'],
output_names=['sum'],
function=add_two)
SumList = Function(input_names=['in_list'],
output_names=['sum'],
function=sum_list)
SubTwo = Function(input_names=['a', 'b'],
output_names=['dif'],
function=sub_two)
Abs = Function(input_names=['x'],
output_names=['abs'],
function=abs_val)
Sec2sigmaV = Function(input_names=['TR', 'sec'],
output_names=['sigmaV'],
function=sec2sigmaV)
Txt2meanTxt = Function(input_names=['in_file', 'axis', 'header'],
output_names=['mean_file'],
function=txt2MeanTxt)
Txt2maxTxt = Function(input_names=['in_file', 'axis', 'header'],
output_names=['max_file'],
function=txt2MaxTxt) | 27.6375 | 82 | 0.557214 | 267 | 2,211 | 4.483146 | 0.280899 | 0.0401 | 0.105263 | 0.056809 | 0.487886 | 0.452799 | 0.452799 | 0.401003 | 0.401003 | 0.282373 | 0 | 0.007717 | 0.296698 | 2,211 | 80 | 83 | 27.6375 | 0.762058 | 0.065129 | 0 | 0.315789 | 0 | 0 | 0.094246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.087719 | null | null | 0.070175 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da11fc5980e78cefaeb92357886c125f256182a0 | 439 | py | Python | exercicio_py/ex0007_progressao_aritmetica/main_v1.py | danielle8farias/Exercicios-Python-3 | f2fe9b6ca63536df1d83fd10162cfc04de36b830 | [
"MIT"
] | null | null | null | exercicio_py/ex0007_progressao_aritmetica/main_v1.py | danielle8farias/Exercicios-Python-3 | f2fe9b6ca63536df1d83fd10162cfc04de36b830 | [
"MIT"
] | null | null | null | exercicio_py/ex0007_progressao_aritmetica/main_v1.py | danielle8farias/Exercicios-Python-3 | f2fe9b6ca63536df1d83fd10162cfc04de36b830 | [
"MIT"
] | null | null | null | ########
# autora: danielle8farias@gmail.com
# repositório: https://github.com/danielle8farias
# Descrição: Usuário informa o 1º termo de uma PA e sua razão. O programa retorna os 10 primeiros termos dessa PA.
########
A1 = int(input('Primeiro termo: '))
r = int(input('Razão: '))
i = 1
An = A1
while i < 11:
print(f'{An}', end=' -> ')
#fórmula da Progressão aritmética
An = A1 + i*r
#i = i + 1
i += 1
print('FIM')
| 23.105263 | 114 | 0.610478 | 65 | 439 | 4.123077 | 0.676923 | 0.022388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037791 | 0.216401 | 439 | 18 | 115 | 24.388889 | 0.741279 | 0.537585 | 0 | 0 | 0 | 0 | 0.187845 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da12532f996b1734f9456dcccabecc881b1e321b | 2,165 | py | Python | rns/viz.py | matwilso/relation-networks | 66c67b342a90ae3699e576dcec883c329905b2e0 | [
"MIT"
] | null | null | null | rns/viz.py | matwilso/relation-networks | 66c67b342a90ae3699e576dcec883c329905b2e0 | [
"MIT"
] | null | null | null | rns/viz.py | matwilso/relation-networks | 66c67b342a90ae3699e576dcec883c329905b2e0 | [
"MIT"
] | null | null | null | import io
import os
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
from rns.constant import W, H
# Plotter functions
PLOT_FUNCS = {}
def register_plotter(func):
PLOT_FUNCS[func.__name__] = func
def func_wrapper(images, **conv_kwargs):
return func(images, **conv_kwargs)
return func_wrapper
def plot(mode, vals, FLAGS, itr=0, save=True, return_buf=False, show=False):
func = PLOT_FUNCS[mode]
path = func(vals, FLAGS, itr=itr)
buf = None
if save:
plt.savefig(path)
if return_buf:
buf = io.BytesIO()
plt.savefig(buf)
buf.seek(0)
if show:
plt.show()
plt.close()
return buf
@register_plotter
def arr(arr, FLAGS, itr=None):
plt.imshow(arr, cmap='binary')
@register_plotter
def in_out_vae(vals, FLAGS, itr=0):
vae_title = '{}-vae.png'.format(itr)
os.makedirs(FLAGS['plot_path'], exist_ok=True)
vae_path = os.path.join(FLAGS['plot_path'], vae_title)
fig, (ax1, ax2) = plt.subplots(1,2)
ax1.imshow(vals['img1'])#, cmap='binary')
ax2.imshow(vals['img2'])#, cmap='binary')
return vae_path
@register_plotter
def contour(vals, FLAGS, itr=0):
X, Y, Z, state = vals['X'], vals['Y'], vals['Z'], vals['state']
prob_title = '{}-prob.png'.format(itr)
os.makedirs(FLAGS['plot_path'], exist_ok=True)
prob_path = os.path.join(FLAGS['plot_path'], prob_title)
plt.contour(X,Y,Z[:,:,0])
plt.scatter(state[0,:,0], state[0,:,1])
plt.title(prob_title)
return prob_path
@register_plotter
def samples(vals, FLAGS, itr=0):
samples = vals['samples']
sample_title = '{}-sample.png'.format(itr)
sample_path = os.path.join(FLAGS['plot_path'], sample_title)
sns.jointplot(samples[:,0,0], samples[:,0,1], kind='hex', color='#4cb391', xlim=(-1.0,1.0), ylim=(-1.0,1.0))
return sample_path
@register_plotter
def shapes(vals, FLAGS, itr=None):
dg = vals['dg']
ax = plt.gca(aspect='equal', xlim=W, ylim=H)
rect = mpatches.Rectangle((0,0), W, H, color='C0')
ax.add_patch(rect)
objs = dg.__next__()
for o in objs['shapes']:
o.plot(ax)
| 27.0625 | 112 | 0.639261 | 332 | 2,165 | 4.03012 | 0.298193 | 0.041854 | 0.053812 | 0.038864 | 0.168161 | 0.129297 | 0.129297 | 0.068759 | 0.068759 | 0.068759 | 0 | 0.021155 | 0.192148 | 2,165 | 79 | 113 | 27.405063 | 0.743854 | 0.022633 | 0 | 0.109375 | 0 | 0 | 0.063003 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.09375 | 0.015625 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da1332cc41c82ca79874cad9790e459f7a50414e | 591 | py | Python | payments/tests.py | asm3ft/cs3240-quickthooters | 53000deca2d4a4ff4244cde76f36e7adcfb52784 | [
"MIT",
"PostgreSQL",
"Unlicense"
] | null | null | null | payments/tests.py | asm3ft/cs3240-quickthooters | 53000deca2d4a4ff4244cde76f36e7adcfb52784 | [
"MIT",
"PostgreSQL",
"Unlicense"
] | 9 | 2021-04-08T21:41:10.000Z | 2022-03-12T00:26:00.000Z | payments/tests.py | asm3ft/cs3240-quickthooters | 53000deca2d4a4ff4244cde76f36e7adcfb52784 | [
"MIT",
"PostgreSQL",
"Unlicense"
] | null | null | null | from django.test import TestCase
from django.test import RequestFactory, TestCase
from .views import charge, HomePageView
from login.models import Profile
from django.contrib.auth.models import User
# class PaymentViewsTestCase(TestCase):
# def setUp(self):
# # Every test needs access to the request factory.
# self.factory = RequestFactory()
# self.user = User.objects.create_user(
# username='jacob', email='jacob@…', password='top_secret')
# def charge_view_test(self):
# # tbd
# self.assertEquals(1, 1)
| 26.863636 | 71 | 0.663283 | 69 | 591 | 5.666667 | 0.57971 | 0.076726 | 0.071611 | 0.102302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004454 | 0.240271 | 591 | 21 | 72 | 28.142857 | 0.859688 | 0.588832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 3 |
da1588b9217dc470ed263275774b3681de6ca380 | 642 | py | Python | MTRF/r3l/r3l/robot/robot.py | facebookresearch/MTRF | 2fee8f3f1c2150fcecc2db2fa9e122a664a72d72 | [
"Apache-2.0"
] | 2 | 2021-11-29T10:09:56.000Z | 2022-02-01T05:48:32.000Z | MTRF/r3l/r3l/robot/robot.py | facebookresearch/MTRF | 2fee8f3f1c2150fcecc2db2fa9e122a664a72d72 | [
"Apache-2.0"
] | null | null | null | MTRF/r3l/r3l/robot/robot.py | facebookresearch/MTRF | 2fee8f3f1c2150fcecc2db2fa9e122a664a72d72 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates
# Copyright (c) MTRF authors
import abc
class Robot(metaclass=abc.ABCMeta):
def __init__(self, env=None):
self._env = env
if env:
self._sim = env.sim
else:
self._sim = None
@property
@abc.abstractmethod
def is_hardware(self):
raise NotImplementedError
@abc.abstractmethod
def step(self, action):
raise NotImplementedError
@abc.abstractmethod
def set_state(self, state):
raise NotImplementedError
@abc.abstractmethod
def get_obs_dict(self):
raise NotImplementedError
| 21.4 | 49 | 0.640187 | 71 | 642 | 5.633803 | 0.507042 | 0.17 | 0.2 | 0.3075 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.281931 | 642 | 29 | 50 | 22.137931 | 0.867679 | 0.115265 | 0 | 0.380952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.238095 | false | 0 | 0.047619 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
da160107a31d4d4dd133e4fba3b2b4c6286bd983 | 2,955 | py | Python | pycurb/time_rule.py | azavea/PyCurb | 9492ca40b0639680b73aa7bdfcf9f744f9e75727 | [
"Apache-2.0"
] | null | null | null | pycurb/time_rule.py | azavea/PyCurb | 9492ca40b0639680b73aa7bdfcf9f744f9e75727 | [
"Apache-2.0"
] | 8 | 2020-09-30T17:15:50.000Z | 2020-10-23T21:00:53.000Z | pycurb/time_rule.py | azavea/PyCurb | 9492ca40b0639680b73aa7bdfcf9f744f9e75727 | [
"Apache-2.0"
] | null | null | null | from abc import ABC
from pycurb.utils import (parse_date, parse_day_of_month, parse_day_of_week,
parse_occurrence, parse_time)
class TimeRule(ABC):
pass
class DaysOfWeek(TimeRule):
def __init__(self, days, occurences_in_month=None):
if isinstance(days, str):
days = [days]
self.days = [parse_day_of_week(day) for day in days]
self.occurences_in_month = None
if occurences_in_month:
self.occurences_in_month = [
parse_occurrence(o) for o in occurences_in_month
]
@staticmethod
def from_dict(d):
return DaysOfWeek(d['days'])
def to_dict(self):
return {'days': self.days}
class DaysOfMonth(TimeRule):
def __init__(self, days):
if isinstance(days, 'str'):
days = [days]
self.days = [parse_day_of_month(day) for day in days]
@staticmethod
def from_dict(d):
return DaysOfMonth(d['days'])
def to_dict(self):
return {'days': self.days}
class DesignatedPeriod(TimeRule):
def __init__(self, name, apply):
self.name = name
apply = apply.lower()
self.apply = None
if apply in ('except during', 'only during'):
self.apply = apply
@staticmethod
def from_dict(d):
return DesignatedPeriod(d['name'], d['apply'])
def to_dict(self):
d = {'name': self.name}
if self.apply:
d['apply'] = self.apply
return d
class EffectiveDates(TimeRule):
def __init__(self, date_from, date_to):
self.date_from = parse_date(date_from)
self.date_to = parse_date(date_to)
self.year = False
if len(date_from.split('-')) > 2 and len(date_to.split('-')) > 2:
self.year = True
@staticmethod
def from_dict(d):
return EffectiveDates(d['from'], d['to'])
def to_dict(self):
d = {
'from': '{}-{}'.format(self.date_from.month, self.date_from.day),
'to': '{}-{}'.format(self.date_to.month, self.date_to.day)
}
if self.year:
d['from'] = '{}-'.format(self.date_from.year) + d['from']
d['to'] = '{}-'.format(self.date_to.year) + d['to']
return d
class TimeOfDay(TimeRule):
def __init__(self, time_from, time_to):
self.time_from = parse_time(time_from)
self.time_to = parse_time(time_to)
def is_equal(self, time_of_day):
return self.to_dict() == time_of_day.to_dict()
@staticmethod
def from_dict(d):
return TimeOfDay(d['from'], d['to'])
def to_dict(self):
st_h = str(self.time_from.hour).zfill(2)
st_m = str(self.time_from.minute).zfill(2)
en_h = str(self.time_to.hour).zfill(2)
en_m = str(self.time_to.minute).zfill(2)
return {
'from': '{}:{}'.format(st_h, st_m),
'to': '{}:{}'.format(en_h, en_m)
}
| 26.863636 | 77 | 0.575973 | 394 | 2,955 | 4.068528 | 0.154822 | 0.044916 | 0.046787 | 0.059264 | 0.361822 | 0.25577 | 0.1335 | 0.1335 | 0.107299 | 0.107299 | 0 | 0.002844 | 0.285956 | 2,955 | 109 | 78 | 27.110092 | 0.756872 | 0 | 0 | 0.256098 | 0 | 0 | 0.042301 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.195122 | false | 0.012195 | 0.02439 | 0.097561 | 0.426829 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da170ec47cebeb13d6c068d32835dcf9ac0425e1 | 2,653 | py | Python | amrlib/models/parse_gsii/vocabs.py | plandes/amrlib | c31f92f05a265362367eea85f512e54030860147 | [
"MIT"
] | 103 | 2020-09-04T07:21:09.000Z | 2022-03-31T23:06:41.000Z | amrlib/models/parse_gsii/vocabs.py | plandes/amrlib | c31f92f05a265362367eea85f512e54030860147 | [
"MIT"
] | 39 | 2020-09-03T14:26:22.000Z | 2022-03-08T20:18:59.000Z | amrlib/models/parse_gsii/vocabs.py | plandes/amrlib | c31f92f05a265362367eea85f512e54030860147 | [
"MIT"
] | 19 | 2020-09-30T12:15:08.000Z | 2022-02-18T18:15:31.000Z | import os
PAD, UNK, DUM, NIL, END, CLS = '<PAD>', '<UNK>', '<DUMMY>', '<NULL>', '<END>', '<CLS>'
# Note: for the function that saves the vocabs, see create_vocabs.py
def get_vocabs(vocab_dir):
vocabs = dict()
vocabs['tok'] = Vocab(os.path.join(vocab_dir, 'tok_vocab'), 5, [CLS])
vocabs['lem'] = Vocab(os.path.join(vocab_dir, 'lem_vocab'), 5, [CLS])
vocabs['pos'] = Vocab(os.path.join(vocab_dir, 'pos_vocab'), 5, [CLS])
vocabs['ner'] = Vocab(os.path.join(vocab_dir, 'ner_vocab'), 5, [CLS])
vocabs['predictable_concept'] = Vocab(os.path.join(vocab_dir, 'predictable_concept_vocab'), 5, [DUM, END])
vocabs['concept'] = Vocab(os.path.join(vocab_dir, 'concept_vocab'), 5, [DUM, END])
vocabs['rel'] = Vocab(os.path.join(vocab_dir, 'rel_vocab'), 50, [NIL])
vocabs['word_char'] = Vocab(os.path.join(vocab_dir, 'word_char_vocab'), 100, [CLS, END])
vocabs['concept_char'] = Vocab(os.path.join(vocab_dir, 'concept_char_vocab'), 100, [CLS, END])
return vocabs
class Vocab(object):
def __init__(self, filename, min_occur_cnt, specials = None):
idx2token = [PAD, UNK] + (specials if specials is not None else [])
self._priority = dict()
num_tot_tokens = 0
num_vocab_tokens = 0
with open(filename) as f:
lines = f.readlines()
for line in lines:
try:
token, cnt = line.rstrip('\n').split('\t')
cnt = int(cnt)
num_tot_tokens += cnt
except:
print(line)
if cnt >= min_occur_cnt:
idx2token.append(token)
num_vocab_tokens += cnt
self._priority[token] = int(cnt)
self.coverage = num_vocab_tokens/num_tot_tokens
self._token2idx = dict(zip(idx2token, range(len(idx2token))))
self._idx2token = idx2token
self._padding_idx = self._token2idx[PAD]
self._unk_idx = self._token2idx[UNK]
def priority(self, x):
return self._priority.get(x, 0)
@property
def size(self):
return len(self._idx2token)
@property
def unk_idx(self):
return self._unk_idx
@property
def padding_idx(self):
return self._padding_idx
def idx2token(self, x):
if isinstance(x, list):
return [self.idx2token(i) for i in x]
return self._idx2token[x]
def token2idx(self, x):
if isinstance(x, list):
return [self.token2idx(i) for i in x]
return self._token2idx.get(x, self.unk_idx)
| 37.366197 | 110 | 0.574821 | 344 | 2,653 | 4.235465 | 0.264535 | 0.054907 | 0.067948 | 0.092656 | 0.289636 | 0.264928 | 0.156486 | 0.043926 | 0 | 0 | 0 | 0.017433 | 0.286468 | 2,653 | 70 | 111 | 37.9 | 0.752245 | 0.024878 | 0 | 0.087719 | 0 | 0 | 0.083172 | 0.009671 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140351 | false | 0 | 0.017544 | 0.070175 | 0.333333 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da1752aa56e3a9f32b692a7cdcc8c12c9105eaac | 1,139 | py | Python | MAR2020/MakingChange.py | dexterchan/DailyChallenge | 1f38dc3b22983835836a84d6281777d8e20fce7a | [
"Apache-2.0"
] | null | null | null | MAR2020/MakingChange.py | dexterchan/DailyChallenge | 1f38dc3b22983835836a84d6281777d8e20fce7a | [
"Apache-2.0"
] | null | null | null | MAR2020/MakingChange.py | dexterchan/DailyChallenge | 1f38dc3b22983835836a84d6281777d8e20fce7a | [
"Apache-2.0"
] | null | null | null | #Given a list of possible coins in cents, and an amount (in cents) n,
# return the minimum number of coins needed to create the amount n.
# If it is not possible to create the amount using the given coin denomination, return None.
#Here's an example and some starter code:
#ANalysis, sort the list of possible coins O(nlogn) from largest to smallest
#for each cent,
#divide amount by cent value = d, if d >= 1
# amt = amt - d*cent value
# store cent value to list
# iterate for next cent
# at end of list
# if amt > 0 , return None
def make_change(coins, n):
# Fill this in.
lst = []
coinsLst = sorted(coins, reverse=True)
amt = n
for c in coinsLst:
d = amt // c
amt = amt - d * c
for i in range(d):
lst.append(str(c))
if amt > 0:
return None
else:
result = "%d coins (%s)"%(len(lst), "+".join((lst)))
return result
if __name__ == "__main__":
print(make_change([1, 5, 10, 25], 36))
# 3 coins (25 + 10 + 1)
print(make_change([1, 5, 10, 25], 30))
# 2 coins (25 + 5)
print(make_change([1, 5, 10, 25], 27))
# 2 coins (25 + 1 + 1) | 28.475 | 92 | 0.603161 | 191 | 1,139 | 3.534031 | 0.445026 | 0.059259 | 0.066667 | 0.071111 | 0.140741 | 0.093333 | 0.093333 | 0 | 0 | 0 | 0 | 0.050971 | 0.276558 | 1,139 | 40 | 93 | 28.475 | 0.768204 | 0.510975 | 0 | 0 | 0 | 0 | 0.04059 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da17721d993049cf0bec2f0d42ed1fdb58798fac | 951 | py | Python | lightcycle-frontend/tournament/admin.py | Onapsis/pytron | 2ed0622ae13f010bcd8fdbbd2f1e9cba3d2e3d58 | [
"MIT"
] | 1 | 2015-11-04T12:04:42.000Z | 2015-11-04T12:04:42.000Z | lightcycle-frontend/tournament/admin.py | Onapsis/pytron | 2ed0622ae13f010bcd8fdbbd2f1e9cba3d2e3d58 | [
"MIT"
] | null | null | null | lightcycle-frontend/tournament/admin.py | Onapsis/pytron | 2ed0622ae13f010bcd8fdbbd2f1e9cba3d2e3d58 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from tournament.models import Bot, Challenge, UserProfile
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'score')
model = UserProfile
class UserProfileInline(admin.TabularInline):
model = UserProfile
class UserWithProfileAdmin(UserAdmin):
inlines = [UserProfileInline]
list_display = ( 'email', 'username', 'is_active')
class BotAdmin(admin.ModelAdmin):
list_display = ('owner', 'creation_date', 'modification_date')
class ChallengeAdmin(admin.ModelAdmin):
list_display = ('requested_by', 'creation_date', 'winner_bot', 'challenger_bot',
'challenged_bot')
admin.site.register(Bot, BotAdmin)
admin.site.register(Challenge, ChallengeAdmin)
admin.site.unregister(User)
admin.site.register(User, UserWithProfileAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
| 32.793103 | 84 | 0.773922 | 104 | 951 | 6.961538 | 0.403846 | 0.062155 | 0.093923 | 0.107735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.115668 | 951 | 28 | 85 | 33.964286 | 0.86088 | 0 | 0 | 0.090909 | 0 | 0 | 0.135647 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.727273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
da18045dfd912105fc7a816ba7142c291b5641e5 | 358 | py | Python | scripts/plotting/utils.py | ltiao/pynance | 1f170f9d32262eacf566a8d7647be04715c47dc1 | [
"MIT"
] | 1 | 2021-04-24T09:23:35.000Z | 2021-04-24T09:23:35.000Z | scripts/plotting/utils.py | ltiao/pynance | 1f170f9d32262eacf566a8d7647be04715c47dc1 | [
"MIT"
] | null | null | null | scripts/plotting/utils.py | ltiao/pynance | 1f170f9d32262eacf566a8d7647be04715c47dc1 | [
"MIT"
] | 1 | 2021-07-14T08:55:39.000Z | 2021-07-14T08:55:39.000Z | import numpy as np
import pandas as pd
import yaml
from pynance.benchmarks import make_benchmark
from pathlib import Path
GOLDEN_RATIO = 0.5 * (1 + np.sqrt(5))
WIDTH = 397.48499
def pt_to_in(x):
pt_per_in = 72.27
return x / pt_per_in
def size(width, aspect=GOLDEN_RATIO):
width_in = pt_to_in(width)
return (width_in, width_in / aspect)
| 17.9 | 45 | 0.72067 | 64 | 358 | 3.8125 | 0.53125 | 0.086066 | 0.04918 | 0.065574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 0.195531 | 358 | 19 | 46 | 18.842105 | 0.791667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.384615 | 0 | 0.692308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
da1abe2987395fdd9c8ec09105630f0f84d026d1 | 3,637 | py | Python | tests/test_per_sample_wrapper.py | NiWaRe/deepee | 98b5cd09f356f4a597fe204799a524c4d444dd2d | [
"Apache-2.0"
] | 16 | 2021-03-24T09:50:32.000Z | 2022-03-10T12:03:37.000Z | tests/test_per_sample_wrapper.py | NiWaRe/deepee | 98b5cd09f356f4a597fe204799a524c4d444dd2d | [
"Apache-2.0"
] | 4 | 2021-03-27T09:36:20.000Z | 2021-10-18T09:30:47.000Z | tests/test_per_sample_wrapper.py | NiWaRe/deepee | 98b5cd09f356f4a597fe204799a524c4d444dd2d | [
"Apache-2.0"
] | 4 | 2021-06-24T08:30:47.000Z | 2021-11-09T08:33:57.000Z | from deepee import PerSampleGradientWrapper
import torch
import pytest
class MiniModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(10, 1)
def forward(self, x):
return self.lin(x)
def test_wrap():
wrapped = PerSampleGradientWrapper(MiniModel(), 2)
def test_forward():
data = torch.randn(2, 1, 10)
wrapped = PerSampleGradientWrapper(MiniModel(), 2)
output = wrapped(data)
assert output.shape == (2, 1, 1)
def test_raises_param_error():
wrapped = PerSampleGradientWrapper(MiniModel(), 2)
with pytest.raises(ValueError):
params = wrapped.parameters()
def test_check_device_cpu():
wrapped = PerSampleGradientWrapper(MiniModel(), 2).to("cpu")
assert (
next(
iter(
set([param.device.type for param in wrapped.wrapped_model.parameters()])
)
)
== "cpu"
)
for model in wrapped.models:
assert (
next(iter(set([param.device.type for param in model.parameters()])))
== "cpu"
)
def test_check_device_gpu():
if torch.cuda.is_available():
wrapped = PerSampleGradientWrapper(MiniModel(), 2).to("cuda")
assert "cuda" in next(
iter(
set([param.device.type for param in wrapped.wrapped_model.parameters()])
)
)
for model in wrapped.models:
assert "cuda" in next(
iter(set([param.device.type for param in model.parameters()]))
)
else:
pass
def test_per_sample_grads():
torch.manual_seed(42)
data = torch.randn(2, 1, 10)
torch.manual_seed(42)
wrapped = PerSampleGradientWrapper(MiniModel(), 2)
torch.manual_seed(42)
model = MiniModel() # single copy
output_single = model(data)
output_wrapped = wrapped(data)
loss_single = output_single.mean()
loss_wrapped = output_wrapped.mean()
loss_single.backward()
loss_wrapped.backward()
wrapped.calculate_per_sample_gradients()
single_grads = torch.cat([param.grad.flatten() for param in model.parameters()])
accumulated_grads = torch.cat(
[
param.accumulated_gradients.sum(dim=0).flatten()
for param in wrapped.wrapped_model.parameters()
]
)
assert torch.allclose(single_grads, accumulated_grads)
def test_per_sample_grads_transfer_learning():
class MiniModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(10, 1)
list(self.lin.parameters())[0].requires_grad_(False)
def forward(self, x):
return self.lin(x)
"One model parameter does not requires_grad"
torch.manual_seed(42)
data = torch.randn(2, 1, 10)
torch.manual_seed(42)
wrapped = PerSampleGradientWrapper(MiniModel(), 2)
torch.manual_seed(42)
model = MiniModel() # single copy
output_single = model(data)
output_wrapped = wrapped(data)
loss_single = output_single.mean()
loss_wrapped = output_wrapped.mean()
loss_single.backward()
loss_wrapped.backward()
wrapped.calculate_per_sample_gradients()
single_grads = torch.cat(
[param.grad.flatten() for param in model.parameters() if param.requires_grad]
)
accumulated_grads = torch.cat(
[
param.accumulated_gradients.sum(dim=0).flatten()
for param in wrapped.wrapped_model.parameters()
if hasattr(param, "accumulated_gradients")
]
)
assert torch.allclose(single_grads, accumulated_grads) | 29.096 | 88 | 0.636239 | 420 | 3,637 | 5.311905 | 0.207143 | 0.028687 | 0.035858 | 0.128642 | 0.753922 | 0.696549 | 0.667862 | 0.626625 | 0.600628 | 0.600628 | 0 | 0.01582 | 0.252681 | 3,637 | 125 | 89 | 29.096 | 0.805004 | 0.006324 | 0 | 0.596154 | 0 | 0 | 0.023256 | 0.005814 | 0 | 0 | 0 | 0 | 0.067308 | 1 | 0.105769 | false | 0.009615 | 0.028846 | 0.019231 | 0.173077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da1ae3572abdad23c9e302bb355fe093cb9ac8e8 | 1,981 | py | Python | graphid/util/util_grabdata.py | Erotemic/graphid | 5d04c2eec609f135464a921ba03d9578fa6e22fd | [
"Apache-2.0"
] | 4 | 2019-03-04T02:49:26.000Z | 2021-10-06T00:51:13.000Z | graphid/util/util_grabdata.py | Erotemic/graphid | 5d04c2eec609f135464a921ba03d9578fa6e22fd | [
"Apache-2.0"
] | 1 | 2019-02-15T23:42:26.000Z | 2019-02-15T23:42:26.000Z | graphid/util/util_grabdata.py | Erotemic/graphid | 5d04c2eec609f135464a921ba03d9578fa6e22fd | [
"Apache-2.0"
] | null | null | null | import ubelt as ub
from os.path import exists # NOQA
TESTIMG_URL_DICT = {
'astro.png' : 'https://i.imgur.com/KXhKM72.png', # Use instead of
'carl.jpg' : 'http://i.imgur.com/flTHWFD.jpg',
'grace.jpg' : 'http://i.imgur.com/rgQyu7r.jpg',
'jeff.png' : 'http://i.imgur.com/l00rECD.png',
'ada2.jpg' : 'http://i.imgur.com/zHOpTCb.jpg',
'ada.jpg' : 'http://i.imgur.com/iXNf4Me.jpg',
'easy1.png' : 'http://i.imgur.com/Qqd0VNq.png',
'easy2.png' : 'http://i.imgur.com/BDP8MIu.png',
'easy3.png' : 'http://i.imgur.com/zBcm5mS.png',
'hard3.png' : 'http://i.imgur.com/ST91yBf.png',
'zebra.png' : 'http://i.imgur.com/58hbGcd.png',
'star.png' : 'http://i.imgur.com/d2FHuIU.png',
'patsy.jpg' : 'http://i.imgur.com/C1lNRfT.jpg',
}
def grab_test_imgpath(key='astro.png', allow_external=True, verbose=True):
"""
Gets paths to standard / fun test images.
Downloads them if they dont exits
Args:
key (str): one of the standard test images, e.g. astro.png, carl.jpg, ...
allow_external (bool): if True you can specify existing fpaths
Returns:
str: testimg_fpath - filepath to the downloaded or cached test image.
Example:
>>> testimg_fpath = grab_test_imgpath('carl.jpg')
>>> assert exists(testimg_fpath)
"""
if allow_external and key not in TESTIMG_URL_DICT:
testimg_fpath = key
if not exists(testimg_fpath):
raise AssertionError(
'testimg_fpath={!r} not found did you mean on of {!r}' % (
testimg_fpath, sorted(TESTIMG_URL_DICT.keys())))
else:
testimg_fname = key
testimg_url = TESTIMG_URL_DICT[key]
testimg_fpath = ub.grabdata(testimg_url, fname=testimg_fname, verbose=verbose)
return testimg_fpath
if __name__ == '__main__':
"""
CommandLine:
python -m graphid.util.util_grabdata all
"""
import xdoctest
xdoctest.doctest_module(__file__)
| 34.754386 | 86 | 0.627461 | 273 | 1,981 | 4.399267 | 0.432234 | 0.064946 | 0.097419 | 0.129892 | 0.159867 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01297 | 0.221605 | 1,981 | 56 | 87 | 35.375 | 0.765888 | 0.218576 | 0 | 0 | 0 | 0 | 0.40268 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 1 | 0.03125 | false | 0 | 0.09375 | 0 | 0.15625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da1b4387561fab9dc87a8b4f0a9f01c7e63f73be | 308 | py | Python | pyschieber/deck.py | Murthy10/pyschieber | f9db28c9553b8f321f6ed71cff04eff7879af5f6 | [
"MIT"
] | 5 | 2018-01-17T08:11:14.000Z | 2018-11-27T11:37:15.000Z | pyschieber/deck.py | Murthy10/pyschieber | f9db28c9553b8f321f6ed71cff04eff7879af5f6 | [
"MIT"
] | 4 | 2018-05-09T08:41:05.000Z | 2018-11-16T08:07:39.000Z | pyschieber/deck.py | Murthy10/pyschieber | f9db28c9553b8f321f6ed71cff04eff7879af5f6 | [
"MIT"
] | 3 | 2018-04-20T07:39:30.000Z | 2018-11-10T12:44:08.000Z | from pyschieber.suit import Suit
from pyschieber.card import Card
class Deck:
def __init__(self):
self.cards = []
for suit in Suit:
self.cards += [Card(suit=suit, value=i) for i in range(6, 15)]
def __str__(self):
return str([str(card) for card in self.cards])
| 23.692308 | 74 | 0.62013 | 46 | 308 | 3.978261 | 0.434783 | 0.147541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013333 | 0.269481 | 308 | 12 | 75 | 25.666667 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0.222222 | 0.111111 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
da1cd997565c598625d3fbc3be2100124fc27c2c | 21,915 | py | Python | multipy/flux.py | kamilazdybal/multipy | ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9 | [
"MIT"
] | null | null | null | multipy/flux.py | kamilazdybal/multipy | ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9 | [
"MIT"
] | null | null | null | multipy/flux.py | kamilazdybal/multipy | ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9 | [
"MIT"
] | null | null | null | """multipy: Python library for multicomponent mass transfer"""
__author__ = "James C. Sutherland, Kamila Zdybal"
__copyright__ = "Copyright (c) 2022, James C. Sutherland, Kamila Zdybal"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = ["Kamila Zdybal"]
__email__ = ["kamilazdybal@gmail.com"]
__status__ = "Production"
import numpy as np
import pandas as pd
import random
import copy
import scipy
import multipy
import warnings
gas_constant = 8.31446261815324
################################################################################
################################################################################
####
#### Class: Flux
####
################################################################################
################################################################################
class Flux:
"""
Supports computing and storing fluxes. This class assumes that the species velocities, :math:`\\mathbf{u}_i`, are known.
Diffusive fluxes:
- mass diffusive flux relative to a mass-averaged velocity, :math:`\mathbf{j}_i`
- mass diffusive flux relative to a molar-averaged velocity, :math:`\mathbf{j}_i^u`
- molar diffusive flux relative to a mass-averaged velocity, :math:`\mathbf{J}_i^v`
- molar diffusive flux relative to a molar-averaged velocity, :math:`\mathbf{J}_i`
:param species_velocities:
vector ``numpy.ndarray`` specifying the species velocities :math:`\mathbf{u}_i` in :math:`[m/s]`. It should be of size ``(n_species,n_observations)``.
**Getters:**
- **get_species_velocities**
- **get_diffusive_molar_molar** (is set to ``None`` at class init)
- **get_diffusive_molar_mass** (is set to ``None`` at class init)
- **get_diffusive_mass_molar** (is set to ``None`` at class init)
- **get_diffusive_mass_mass** (is set to ``None`` at class init)
**Setters:**
- **set_species_velocities**
- **set_diffusive_molar_molar** (is set to ``None`` at class init)
- **set_diffusive_molar_mass** (is set to ``None`` at class init)
- **set_diffusive_mass_molar** (is set to ``None`` at class init)
- **set_diffusive_mass_mass** (is set to ``None`` at class init)
"""
# --------------------------------------------------------------------------
def __init__(self, species_velocities):
if not isinstance(species_velocities, np.ndarray):
raise ValueError("Parameter `species_velocities` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(species_velocities)
except:
raise ValueError("Parameter `species_velocities` has to be a matrix.")
if n_species < 2:
raise ValueError("Parameter `species_velocities` has to have at least two species.")
self.__species_velocities = species_velocities
self.__velocity = multipy.Velocity(self.get_species_velocities)
self.__diffusive_molar_molar = None
self.__diffusive_molar_mass = None
self.__diffusive_mass_molar = None
self.__diffusive_mass_mass = None
@property
def get_species_velocities(self):
return self.__species_velocities
@property
def get_diffusive_molar_molar(self):
return self.__diffusive_molar_molar
@property
def get_diffusive_molar_mass(self):
return self.__diffusive_molar_mass
@property
def get_diffusive_mass_molar(self):
return self.__diffusive_mass_molar
@property
def get_diffusive_mass_mass(self):
return self.__diffusive_mass_mass
@get_species_velocities.setter
def set_species_velocities(self, new_species_velocities):
if new_species_velocities is not None:
if not isinstance(new_species_velocities, np.ndarray):
raise ValueError("Parameter `species_velocities` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(new_species_velocities)
except:
raise ValueError("Parameter `species_velocities` has to be a matrix.")
self.__species_velocities = new_species_velocities
@get_diffusive_molar_molar.setter
def set_diffusive_molar_molar(self, new_diffusive_molar_molar):
if new_diffusive_molar_molar is not None:
if not isinstance(new_diffusive_molar_molar, np.ndarray):
raise ValueError("Parameter `diffusive_molar_molar` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(new_diffusive_molar_molar)
except:
raise ValueError("Parameter `diffusive_molar_molar` has to be a matrix.")
self.__diffusive_molar_molar = new_diffusive_molar_molar
@get_diffusive_molar_mass.setter
def set_diffusive_molar_mass(self, new_diffusive_molar_mass):
if new_diffusive_molar_mass is not None:
if not isinstance(new_diffusive_molar_mass, np.ndarray):
raise ValueError("Parameter `diffusive_molar_mass` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(new_diffusive_molar_mass)
except:
raise ValueError("Parameter `diffusive_molar_mass` has to be a matrix.")
self.__diffusive_molar_mass = new_diffusive_molar_mass
@get_diffusive_mass_molar.setter
def set_diffusive_mass_molar(self, new_diffusive_mass_molar):
if new_diffusive_mass_molar is not None:
if not isinstance(new_diffusive_mass_molar, np.ndarray):
raise ValueError("Parameter `diffusive_mass_molar` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(new_diffusive_mass_molar)
except:
raise ValueError("Parameter `diffusive_mass_molar` has to be a matrix.")
self.__diffusive_mass_molar = new_diffusive_mass_molar
@get_diffusive_mass_mass.setter
def set_diffusive_mass_mass(self, new_diffusive_mass_mass):
if new_diffusive_mass_mass is not None:
if not isinstance(new_diffusive_mass_mass, np.ndarray):
raise ValueError("Parameter `diffusive_mass_mass` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(new_diffusive_mass_mass)
except:
raise ValueError("Parameter `diffusive_mass_mass` has to be a matrix.")
self.__diffusive_mass_mass = new_diffusive_mass_mass
# --------------------------------------------------------------------------
def plot_diffusive_flux(self, species_names=None, colors=None, figsize=(10,5), filename=None):
"""
Plots the computed diffusive fluxes.
**Example:**
.. image:: ../images/stefan-tube-diffusive-flux-molar-diff-molar-avg.svg
:width: 400
:param species_names: (optional)
``list`` of ``str`` specifying the species names.
:param colors: (optional)
``list`` of ``str`` specifying the plotting colors for each species. Example: ``colors=['#C7254E', '#BBBBBB', '#008CBA']``.
:param figsize: (optional)
``tuple`` specifying the figure size.
:param filename: (optional)
``str`` specifying the filename. If set to ``None``, plot will not be saved to a file.
"""
if filename is not None:
path = False
if filename[0:2] == '..':
__filename = filename[2::]
path = True
else:
__filename = filename
__base = __filename.split('.')[0]
__extension = __filename.split('.')[1]
if path:
__filename = '..' + __base
else:
__filename = __base
if self.get_diffusive_molar_molar is not None:
if filename is not None:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_molar_molar, flux='molar', velocity='molar', species_names=species_names, colors=colors, figsize=figsize, filename=__filename + '-molar-diff-molar-avg.' + __extension)
else:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_molar_molar, flux='molar', velocity='molar', species_names=species_names, colors=colors, figsize=figsize, filename=None)
if self.get_diffusive_molar_mass is not None:
if filename is not None:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_molar_mass, flux='molar', velocity='mass', species_names=species_names, colors=colors, figsize=figsize, filename=__filename + '-molar-diff-mass-avg.' + __extension)
else:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_molar_mass, flux='molar', velocity='mass', species_names=species_names, colors=colors, figsize=figsize, filename=None)
if self.get_diffusive_mass_molar is not None:
if filename is not None:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_mass_molar, flux='mass', velocity='molar', species_names=species_names, colors=colors, figsize=figsize, filename=__filename + '-mass-diff-molar-avg.' + __extension)
else:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_mass_molar, flux='mass', velocity='molar', species_names=species_names, colors=colors, figsize=figsize, filename=None)
if self.get_diffusive_mass_mass is not None:
if filename is not None:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_mass_mass, flux='mass', velocity='mass', species_names=species_names, colors=colors, figsize=figsize, filename=__filename + '-mass-diff-mass-avg.' + __extension)
else:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_mass_mass, flux='mass', velocity='mass', species_names=species_names, colors=colors, figsize=figsize, filename=None)
# --------------------------------------------------------------------------
def diffusive_molar_molar(self, species_mole_fractions, species_molar_densities):
"""
Computes the molar diffusive flux relative to a molar-averaged velocity:
.. math::
\mathbf{J}_i = c_i \mathbf{u}_i + c_i \mathbf{u}
:param species_mole_fractions:
scalar ``numpy.ndarray`` specifying the species mole fractions, :math:`X_i`, in :math:`[-]`. It should be of size ``(n_species,n_observations)``.
:param species_molar_densities:
scalar ``numpy.ndarray`` specifying the molar densities of species, :math:`c_i`, in :math:`[mole/m^3]`. It should be of size ``(n_species,n_observations)``.
:return:
- **diffusive_flux** - vector ``numpy.ndarray`` of molar diffusive fluxes relative to a molar-averaged velocity :math:`\mathbf{J}_i` in :math:`[mole/(m^2s)]`. It has size ``(n_species,n_observations)``.
"""
if not isinstance(species_mole_fractions, np.ndarray):
raise ValueError("Parameter `species_mole_fractions` has to be of type `numpy.ndarray`.")
try:
(n_species_1, n_observations_1) = np.shape(species_mole_fractions)
except:
raise ValueError("Parameter `species_mole_fractions` has to be a matrix.")
if not isinstance(species_molar_densities, np.ndarray):
raise ValueError("Parameter `species_molar_densities` has to be of type `numpy.ndarray`.")
try:
(n_species_2, n_observations_2) = np.shape(species_molar_densities)
except:
raise ValueError("Parameter `species_molar_densities` has to be a matrix.")
if n_observations_1 != n_observations_2:
raise ValueError("Parameters `species_mole_fractions` and `species_molar_densities` have different number of observations `n_observations`.")
if n_species_1 != n_species_2:
raise ValueError("Parameters `species_mole_fractions` and `species_molar_densities` have different number of species `n_species`.")
(n_species, n_observations) = np.shape(self.get_species_velocities)
if n_observations != n_observations_1:
raise ValueError("Parameters `species_mole_fractions`, `species_molar_densities` and `species_velocities` have different number of observations `n_observations`.")
if n_species != n_species_1:
raise ValueError("Parameters `species_mole_fractions`, `species_molar_densities` and `species_velocities` have different number of species `n_species`.")
molar_averaged_velocity = self.__velocity.molar_averaged(species_mole_fractions)
diffusive_flux = np.multiply(species_molar_densities, self.get_species_velocities) - np.multiply(species_molar_densities, molar_averaged_velocity)
self.__diffusive_molar_molar = diffusive_flux
return diffusive_flux
# --------------------------------------------------------------------------
def diffusive_molar_mass(self, species_mass_fractions, species_molar_densities):
"""
Computes the molar diffusive flux relative to a mass-averaged velocity:
.. math::
\mathbf{J}_i^v = c_i \mathbf{u}_i + c_i \mathbf{v}
:param species_mass_fractions:
scalar ``numpy.ndarray`` specifying the species mass fractions, :math:`Y_i`, in :math:`[-]`. It should be of size ``(n_species,n_observations)``.
:param species_molar_densities:
scalar ``numpy.ndarray`` specifying the species molar densities :math:`c_i` in :math:`[mole/m^3]`. It should be of size ``(n_species,n_observations)``.
:return:
- **diffusive_flux** - vector ``numpy.ndarray`` of molar diffusive fluxes relative to a mass-averaged velocity :math:`\mathbf{J}_i^v` in :math:`[mole/(m^2s)]`. It has size ``(n_species,n_observations)``.
"""
if not isinstance(species_mass_fractions, np.ndarray):
raise ValueError("Parameter `species_mass_fractions` has to be of type `numpy.ndarray`.")
try:
(n_species_1, n_observations_1) = np.shape(species_mass_fractions)
except:
raise ValueError("Parameter `species_mass_fractions` has to be a matrix.")
if not isinstance(species_molar_densities, np.ndarray):
raise ValueError("Parameter `species_molar_densities` has to be of type `numpy.ndarray`.")
try:
(n_species_2, n_observations_2) = np.shape(species_molar_densities)
except:
raise ValueError("Parameter `species_molar_densities` has to be a matrix.")
if n_observations_1 != n_observations_2:
raise ValueError("Parameters `species_mass_fractions` and `species_molar_densities` have different number of observations `n_observations`.")
if n_species_1 != n_species_2:
raise ValueError("Parameters `species_mass_fractions` and `species_molar_densities` have different number of species `n_species`.")
(n_species, n_observations) = np.shape(self.get_species_velocities)
if n_observations != n_observations_1:
raise ValueError("Parameters `species_mass_fractions`, `species_molar_densities` and `species_velocities` have different number of observations `n_observations`.")
if n_species != n_species_1:
raise ValueError("Parameters `species_mass_fractions`, `species_molar_densities` and `species_velocities` have different number of species `n_species`.")
mass_averaged_velocity = self.__velocity.mass_averaged(species_mass_fractions)
diffusive_flux = np.multiply(species_molar_densities, self.get_species_velocities) - np.multiply(species_molar_densities, mass_averaged_velocity)
self.__diffusive_molar_mass = diffusive_flux
return diffusive_flux
# --------------------------------------------------------------------------
def diffusive_mass_molar(self, species_mole_fractions, species_mass_densities):
"""
Computes the mass diffusive flux relative to a molar-averaged velocity:
.. math::
\mathbf{j}_i^u = \\rho_i \mathbf{u}_i + \\rho_i \mathbf{u}
:param species_mole_fractions:
scalar ``numpy.ndarray`` specifying the species mole fractions :math:`X_i` in :math:`[-]`. It should be of size ``(n_species,n_observations)``.
:param species_mass_densities:
scalar ``numpy.ndarray`` specifying the species mass densities :math:`\mathbf{\\rho}_i` in :math:`[kg/m^3]`. It should be of size ``(n_species,n_observations)``.
:return:
- **diffusive_flux** - vector ``numpy.ndarray`` of mass diffusive fluxes relative to a molar-averaged velocity :math:`\mathbf{j}_i^u` in :math:`[kg/(m^2s)]`. It has size ``(n_species,n_observations)``.
"""
if not isinstance(species_mole_fractions, np.ndarray):
raise ValueError("Parameter `species_mole_fractions` has to be of type `numpy.ndarray`.")
try:
(n_species_1, n_observations_1) = np.shape(species_mole_fractions)
except:
raise ValueError("Parameter `species_mole_fractions` has to be a matrix.")
if not isinstance(species_mass_densities, np.ndarray):
raise ValueError("Parameter `species_mass_densities` has to be of type `numpy.ndarray`.")
try:
(n_species_2, n_observations_2) = np.shape(species_mass_densities)
except:
raise ValueError("Parameter `species_mass_densities` has to be a matrix.")
if n_observations_1 != n_observations_2:
raise ValueError("Parameters `species_mole_fractions` and `species_mass_densities` have different number of observations `n_observations`.")
if n_species_1 != n_species_2:
raise ValueError("Parameters `species_mole_fractions` and `species_mass_densities` have different number of species `n_species`.")
(n_species, n_observations) = np.shape(self.get_species_velocities)
if n_observations != n_observations_1:
raise ValueError("Parameters `species_mole_fractions`, `species_mass_densities` and `species_velocities` have different number of observations `n_observations`.")
if n_species != n_species_1:
raise ValueError("Parameters `species_mole_fractions`, `species_mass_densities` and `species_velocities` have different number of species `n_species`.")
molar_averaged_velocity = self.__velocity.molar_averaged(species_mole_fractions)
diffusive_flux = np.multiply(species_mass_densities, self.get_species_velocities) - np.multiply(species_mass_densities, molar_averaged_velocity)
self.__diffusive_mass_molar = diffusive_flux
return diffusive_flux
# --------------------------------------------------------------------------
def diffusive_mass_mass(self, species_mass_fractions, species_mass_densities):
"""
Computes the mass diffusive flux relative to a mass-averaged velocity:
.. math::
\mathbf{j}_i = \\rho_i \mathbf{u}_i + \\rho_i \mathbf{v}
:param species_mass_fractions:
scalar ``numpy.ndarray`` specifying the species mass fractions :math:`Y_i` in :math:`[-]`. It should be of size ``(n_species, n_observations)``.
:param species_mass_densities:
scalar ``numpy.ndarray`` specifying the species mass densities :math:`\mathbf{\\rho}_i` in :math:`[kg/m^3]`. It should be of size ``(n_species, n_observations)``.
:return:
- **diffusive_flux** - vector ``numpy.ndarray`` of mass diffusive fluxes relative to a mass-averaged velocity :math:`\mathbf{j}_i` in :math:`[kg/(m^2s)]`. It has size ``(n_species, n_observations)``.
"""
if not isinstance(species_mass_fractions, np.ndarray):
raise ValueError("Parameter `species_mass_fractions` has to be of type `numpy.ndarray`.")
try:
(n_species_1, n_observations_1) = np.shape(species_mass_fractions)
except:
raise ValueError("Parameter `species_mass_fractions` has to be a matrix.")
if not isinstance(species_mass_densities, np.ndarray):
raise ValueError("Parameter `species_mass_densities` has to be of type `numpy.ndarray`.")
try:
(n_species_2, n_observations_2) = np.shape(species_mass_densities)
except:
raise ValueError("Parameter `species_mass_densities` has to be a matrix.")
if n_observations_1 != n_observations_2:
raise ValueError("Parameters `species_mass_fractions` and `species_mass_densities` have different number of observations `n_observations`.")
if n_species_1 != n_species_2:
raise ValueError("Parameters `species_mass_fractions` and `species_mass_densities` have different number of species `n_species`.")
(n_species, n_observations) = np.shape(self.get_species_velocities)
if n_observations != n_observations_1:
raise ValueError("Parameters `species_mass_fractions`, `species_mass_densities` and `species_velocities` have different number of observations `n_observations`.")
if n_species != n_species_1:
raise ValueError("Parameters `species_mass_fractions`, `species_mass_densities` and `species_velocities` have different number of species `n_species`.")
mass_averaged_velocity = self.__velocity.mass_averaged(species_mass_fractions)
diffusive_flux = np.multiply(species_mass_densities, self.get_species_velocities) - np.multiply(species_mass_densities, mass_averaged_velocity)
self.__diffusive_mass_mass = diffusive_flux
return diffusive_flux
# --------------------------------------------------------------------------
| 47.745098 | 244 | 0.657906 | 2,632 | 21,915 | 5.159954 | 0.06421 | 0.032987 | 0.020543 | 0.035564 | 0.869008 | 0.83256 | 0.801856 | 0.78168 | 0.759591 | 0.718136 | 0 | 0.005602 | 0.209902 | 21,915 | 458 | 245 | 47.849345 | 0.778747 | 0.244809 | 0 | 0.461538 | 0 | 0 | 0.261039 | 0.091635 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068376 | false | 0 | 0.029915 | 0.021368 | 0.141026 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
da1d6a02eeb844897d0b4f2d15640a391973f96d | 1,971 | py | Python | ENotePadAlgorithm/strEncrypt/Morse.py | xioacd99/EnhancedNotePad | b95da1c4d957061ad60015f3b9ab5c445b5a1bc4 | [
"MIT"
] | null | null | null | ENotePadAlgorithm/strEncrypt/Morse.py | xioacd99/EnhancedNotePad | b95da1c4d957061ad60015f3b9ab5c445b5a1bc4 | [
"MIT"
] | null | null | null | ENotePadAlgorithm/strEncrypt/Morse.py | xioacd99/EnhancedNotePad | b95da1c4d957061ad60015f3b9ab5c445b5a1bc4 | [
"MIT"
] | null | null | null | # encode时会将非ANSII字符变为空格
# decode时会跳过非ANSII字符
# 摩斯电码加密的字符只有字符,数字,标点,不区分大小写
class MorseCoder:
def __init__(self):
self.encode_alphabet = {"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", # 加密对照表
"E": ".", "F": "..-.", "G": "--.", "H": "....",
"I": "..", "J": ".---", "K": "-.-", "L": ".-..",
"M": "--", "N": "-.", "O": "---", "P": ".--.",
"Q": "--.-", "R": ".-.", "S": "...", "T": "-",
"U": "..-", "V": "...-", "W": ".--", "X": "-..-",
"Y": "-.--", "Z": "--..",
"1": ".---", "2": "..---", "3": "...--", "4": "....-",
"5": ".....", "6": "-....", "7": "--...", "8": "---..",
"9": "----.", "0": "-----",
"(": ".--.-", "-": "-....-", "?": "..--..", "/": "-..-.",
".": ".-.-.-", "@": ".--.-."
}
def encode(self, plaintext):
"""Encode AscII chars in plaintext to morse code"""
charList = list(plaintext.upper())
morsecodeList = \
[self.encode_alphabet[char] if char in self.encode_alphabet.keys() else " " for char in charList]
return " ".join(morsecodeList)
def decode(self, morsecode):
morsecodeList = morsecode.split(" ")
charList = \
[self.decode_alphabet[char] if char in self.decode_alphabet.keys() else char for char in morsecodeList]
return "".join(charList)
def get_encode_alphabet(self):
return self.encode_alphabet
def get_decode_alphabet(self):
return self.decode_alphabet
def strEncrypt(self, msg):
return self.encode(msg)
if __name__ == '__main__':
test = MorseCoder()
result = test.strEncrypt('ABCD12345678')
print(result)
| 41.0625 | 115 | 0.367326 | 152 | 1,971 | 4.611842 | 0.526316 | 0.071327 | 0.10271 | 0.051355 | 0.068474 | 0.068474 | 0 | 0 | 0 | 0 | 0 | 0.014377 | 0.364789 | 1,971 | 47 | 116 | 41.93617 | 0.545527 | 0.060883 | 0 | 0 | 0 | 0 | 0.124864 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.171429 | false | 0 | 0 | 0.085714 | 0.342857 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da1fedbc0ae28396d7b17794e490d8e258826958 | 4,800 | py | Python | Yolov3_deepsort/Badminton_Service/player.py | Haosam/BadmintonAI | 4a1e837109cd279fb7480b90b31003c259e063cf | [
"Apache-2.0"
] | null | null | null | Yolov3_deepsort/Badminton_Service/player.py | Haosam/BadmintonAI | 4a1e837109cd279fb7480b90b31003c259e063cf | [
"Apache-2.0"
] | null | null | null | Yolov3_deepsort/Badminton_Service/player.py | Haosam/BadmintonAI | 4a1e837109cd279fb7480b90b31003c259e063cf | [
"Apache-2.0"
] | null | null | null | from tkinter import *
import cv2
# Global Variables, can be translated to database if it becomes production
lcw = "Lee Chong Wei"
swh = "Son Wan Ho"
lyd = "Lee Yong Dae"
kgj = "Kim Gi Jung"
ksh = "Ko Sung Hyun"
yys = "Yo Yeon Seong"
csg = "Choi Sol Gyu"
wcl = "Wang Chi-Lin"
chl = "Chen Hung-Lin"
lcw_height = 1.72
swh_height = 1.77
lyd_height = 1.76
kkj_height = 1.79
ksh_height = 1.79
yys_height = 1.81
csg_height = 1.81
wcl_height = 1.86
chl_height = 1.77
################################################################################
player_names1 = ["Player 1",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_names2 = ["Player 2",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_names3 = ["Player 3",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_names4 = ["Player 4",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_heights = [lcw_height,swh_height,lyd_height,kkj_height,ksh_height,yys_height,csg_height]
#################################################################################
def player_main():
print("If no player is present, please at least select None")
def callback1(selection):
global name_1, height_1
name_1 = selection
height_1 = playercheck(selection)
return(name_1, height_1)
def callback2(selection):
global name_2, height_2
name_2 = selection
height_2 = playercheck(selection)
return(name_1, height_1)
def callback3(selection):
global name_3, height_3
name_3 = selection
height_3 = playercheck(selection)
return(name_3, height_3)
def callback4(selection):
global name_4, height_4
name_4 = selection
height_4 = playercheck(selection)
return(name_4, height_4)
def playercheck(selection):
if selection == "Lee Chong Wei":
return lcw_height
elif selection == "Son Wan Ho":
return swh_height
elif selection == "Lee Yong Dae":
return swh_height
elif selection == "Kim Gi Jung":
return kkj_height
elif selection == "Ko Sung Hyun":
return ksh_height
elif selection == "Yo Yeon Seong":
return yys_height
elif selection == "Choi Sol Gyu":
return csg_height
elif selection == "Wang Chi-Lin":
return wcl_height
elif selection == "Chen Hung-Lin":
return chl_height
elif "None" or "Select Player" or "Player 1" or "Player 2" or "Player 3" or "Player 4":
return 1
else:
return 1
def playerselection():
window = Tk()
window.geometry('400x400')
window.title("Player Selection")
label1 = Label(window, text="Player 1: ")
label1.config(width=10, font=('Helvetica', 10))
label2 = Label(window, text="Player 2: ")
label2.config(width=10, font=('Helvetica', 10))
label3 = Label(window, text="Player 3: ")
label3.config(width=10, font=('Helvetica', 10))
label4 = Label(window, text="Player 4: ")
label4.config(width=10, font=('Helvetica', 10))
label5 = Label(window, text="If no player is present,")
label6 = Label(window, text=", please at least select None")
label1.grid(row=0,column=0)
label2.grid(row=1,column=0)
label3.grid(row=2,column=0)
label4.grid(row=3,column=0)
label5.grid(row=8,column=0)
label6.grid(row=8,column=1)
clicked1 = StringVar()
clicked1.set("Select Player")
clicked2 = StringVar()
clicked2.set("Select Player")
clicked3 = StringVar()
clicked3.set("Select Player")
clicked4 = StringVar()
clicked4.set("Select Player")
drop1 = OptionMenu(window, clicked1, *player_names1, command=callback1)
drop1.config(width=20, font=('Helvetica', 10))
drop2 = OptionMenu(window, clicked2, *player_names2, command=callback2)
drop2.config(width=20, font=('Helvetica', 10))
drop3 = OptionMenu(window, clicked3, *player_names3, command=callback3)
drop3.config(width=20, font=('Helvetica', 10))
drop4 = OptionMenu(window, clicked4, *player_names4, command=callback4)
drop4.config(width=20, font=('Helvetica', 10))
drop1.grid(row=0,column=1)
drop2.grid(row=1,column=1)
drop3.grid(row=2,column=1)
drop4.grid(row=3,column=1)
labelTest1 = Label(text="", font=('Helvetica', 8), fg='red')
labelTest1.grid(row=4,column=1)
labelTest2 = Label(text="", font=('Helvetica', 8), fg='red')
labelTest2.grid(row=5,column=1)
labelTest3 = Label(text="", font=('Helvetica', 8 ), fg='red')
labelTest3.grid(row=6,column=1)
labelTest4 = Label(text="", font=('Helvetica', 8), fg='red')
labelTest4.grid(row=7,column=1)
window.mainloop()
playerselection()
return(name_1,height_1,name_2,height_2,name_3,height_3,name_4,height_4)
# print(name_1,height_1,",", name_2,height_2,",",name_3,height_3,",",name_4,height_4)
if __name__ == "__main__":
# stuff only to run when not called via 'import' here
player_main()
print(name_1)
print(name_2)
print(name_3)
print(name_4) | 33.103448 | 96 | 0.660417 | 692 | 4,800 | 4.453757 | 0.208092 | 0.034069 | 0.049319 | 0.019468 | 0.27255 | 0.209604 | 0.136924 | 0.100584 | 0.073978 | 0.073978 | 0 | 0.055221 | 0.17 | 4,800 | 145 | 97 | 33.103448 | 0.718373 | 0.043333 | 0 | 0.047244 | 0 | 0 | 0.15056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055118 | false | 0 | 0.015748 | 0 | 0.15748 | 0.03937 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da2374aced1b72eebcc58d79ed22779475feb324 | 4,749 | py | Python | scripts/install.py | discord-package-bot/discord-package-bot | 109603c57a668d75f6939e3c97aae72f2691640e | [
"MIT"
] | 1 | 2021-07-12T05:56:00.000Z | 2021-07-12T05:56:00.000Z | scripts/install.py | discord-package-bot/discord-package-bot | 109603c57a668d75f6939e3c97aae72f2691640e | [
"MIT"
] | null | null | null | scripts/install.py | discord-package-bot/discord-package-bot | 109603c57a668d75f6939e3c97aae72f2691640e | [
"MIT"
] | null | null | null | """
syntax: |
install <パッケージ>
install update:<パッケージ>
install file:<ファイル>
syntax_description: |
パッケージ: インストールするパッケージ。update:をつけると、パッケージが更新されます。
ファイル: エクスポートしたファイルのパス。
---
パッケージをインストールします。
"""
import os
import re
import requests
import shlex
import shutil
import subprocess
import sys
import yaml
import zipfile
from colorama import Fore, Style # , Back
from .utils import command, token
def get_info(repo):
resp = None
repo_data = requests.get(
f"https://api.github.com/repos/{repo}",
headers={"authorization": token.github_token},
)
if repo_data.status_code != 200:
print(Fore.RED + f"パッケージ{repo}が見付かりませんでした。" + Fore.RESET)
return False
resp = requests.get(f"https://raw.githubusercontent.com/{repo}/dpb/dpb.yml")
if resp.status_code == 200:
branch = "dpb"
else:
branch = repo_data.json()["default_branch"]
resp = requests.get(
f"https://raw.githubusercontent.com/{repo}/{branch}/dpb.yml"
)
if resp.status_code != 200:
print(Fore.RED + f"{repo}の情報を取得できませんでした。" + Fore.RESET)
return False
print(Fore.GREEN + f"{repo}の情報を取得しました。" + Fore.RESET)
info = yaml.safe_load(resp.text)
print(Fore.CYAN + f"{repo}の情報" + Fore.RESET)
print(f"名前: {info['name']}")
print(f"作者: {repo.split('/')[0]}")
info["branch"] = branch
return info
def download_repo(repo, info):
if os.path.exists("./savedata/delete-install-tmp"):
try:
subprocess.run(shlex.split("rm -rf ./.install-tmp"))
except PermissionError:
print(Fore.RED + "展開先が使用中のため、インストール出来ませんでした。" + Fore.RESET)
sys.exit(1)
except FileNotFoundError:
os.unlink("./savedata/delete-install-tmp")
else:
os.unlink("./savedata/delete-install-tmp")
print(Fore.LIGHTBLACK_EX + f"{info['name']}をダウンロードしています..." + Fore.RESET)
with requests.get(
f"https://github.com/{repo}/archive/refs/heads/{info['branch']}.zip",
stream=True,
) as r:
with open(".install-tmp.zip", "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
with zipfile.ZipFile(".install-tmp.zip") as existing_zip:
existing_zip.extractall(".install-tmp")
print(Fore.LIGHTBLACK_EX + "インストールしています..." + Fore.RESET)
zip_dir = repo.split("/")[1] + "-" + info["branch"]
shutil.copytree(f"./.install-tmp/{zip_dir}", f"./packages/{repo.replace('/', '@')}")
if info["requirements"] is not None and os.path.exists(
f"./.install-tmp/{zip_dir}/"
+ (info.get("requirements", None) or "dpb_requirements.txt")
):
with open(f"./.install-tmp/{zip_dir}/{info['requirements']}", "r") as f:
requirements = re.sub(r"#.*|\n{2,}", "", f.read())
with open("./savedata/package_requirements.txt", "a") as f:
f.write(f"#!==={repo}===!\n" + requirements.strip() + "\n")
subprocess.run(
shlex.split(command.pip + "install -r ./savedata/package_requirements.txt"),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
try:
subprocess.run(shlex.split("rm -rf ./.install-tmp .install-tmp.zip"))
except PermissionError:
with open("./savedata/delete-install-tmp", "w"):
pass
print(Fore.GREEN + "インストールが完了しました。" + Fore.RESET)
def main():
if len(sys.argv) <= 2:
repos = input("インストールするパッケージを○○/○○で入力して下さい。")
elif sys.argv[2].startswith("file:"):
try:
os.chdir("..")
with open(sys.argv[2][5:]) as f:
repos = re.sub(r"#.*|\n{2,}", "", f.read()).replace("\n", " ")
os.chdir(".main")
except FileNotFoundError:
print(Fore.RED + "ファイルが見付かりませんでした。" + Fore.RESET)
sys.exit(1)
else:
repos = " ".join(sys.argv[2:])
for repo in repos.split():
if os.path.exists(
f"./packages/{repo.replace('/', '@')}"
) and not repo.startswith("update:"):
with open(f"./packages/{repo.replace('/', '@')}/dpb.yml") as f:
info = yaml.safe_load(f)
print(
f"{Fore.RED}パッケージ {Style.BRIGHT}{info['name']}({repo}){Style.NORMAL}はすでにインストールされています。{Fore.RESET}\n"
f"{Fore.CYAN}アップデートするには {Style.BRIGHT}dpb install update:{repo}{Style.NORMAL} を実行して下さい。{Fore.RESET}"
)
continue
if repo.startswith("update:"):
repo = repo.replace("update:", "")
subprocess.run(shlex.split(f"rm -rf ./packages/{repo.replace('/', '@')}"))
info = get_info(repo)
if info is False:
continue
download_repo(repo, info)
| 34.664234 | 116 | 0.579912 | 576 | 4,749 | 4.739583 | 0.28125 | 0.047619 | 0.028571 | 0.024908 | 0.18315 | 0.164469 | 0.106593 | 0.064469 | 0.064469 | 0 | 0 | 0.006683 | 0.243841 | 4,749 | 136 | 117 | 34.919118 | 0.752437 | 0.042114 | 0 | 0.159292 | 0 | 0.017699 | 0.299934 | 0.148646 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026549 | false | 0.00885 | 0.097345 | 0 | 0.150442 | 0.106195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da23f0ddb62bb0c0988bd093b73535c31a660639 | 72 | py | Python | plugin/src/test/resources/refactoring/extractmethod/Comment.after.py | consulo/consulo-python | 586c3eaee3f9c2cc87fb088dc81fb12ffa4b3a9d | [
"Apache-2.0"
] | null | null | null | plugin/src/test/resources/refactoring/extractmethod/Comment.after.py | consulo/consulo-python | 586c3eaee3f9c2cc87fb088dc81fb12ffa4b3a9d | [
"Apache-2.0"
] | 11 | 2017-02-27T22:35:32.000Z | 2021-12-24T08:07:40.000Z | plugin/src/test/resources/refactoring/extractmethod/Comment.after.py | consulo/consulo-python | 586c3eaee3f9c2cc87fb088dc81fb12ffa4b3a9d | [
"Apache-2.0"
] | null | null | null | def bar():
print("Hello")
#Comment to method
def foo():
bar()
| 9 | 18 | 0.555556 | 10 | 72 | 4 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.263889 | 72 | 7 | 19 | 10.285714 | 0.754717 | 0.236111 | 0 | 0 | 0 | 0 | 0.092593 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0 | 0.5 | 0.25 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
da24c7b3d7ea12e45a63e2df57343289b27d952a | 1,565 | py | Python | rlkeras/utils/memory.py | will-hcau/rlkeras | 9cc36b238dae794197fcb8689a5a1ffa1c0a42c0 | [
"MIT"
] | null | null | null | rlkeras/utils/memory.py | will-hcau/rlkeras | 9cc36b238dae794197fcb8689a5a1ffa1c0a42c0 | [
"MIT"
] | null | null | null | rlkeras/utils/memory.py | will-hcau/rlkeras | 9cc36b238dae794197fcb8689a5a1ffa1c0a42c0 | [
"MIT"
] | null | null | null | from collections import deque
import numpy as np
import random
class RandomReplayBuffer(object):
"""Experience replay buffer that samples uniformly."""
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.buffer = deque(maxlen=buffer_size)
def __len__(self):
return len(self.buffer)
def append(self, state, action, reward, next_state, done):
""" Store transition into replay buffer "D"
Refering to the DQN paper (S, A, R, S t+1, terminate)
should be stored into a buffer with limited size.
When hitting the maximum size of buffer, the oldest
transition will be discard.
"""
self.buffer.append((state, action, reward, next_state, done))
def sample(self, batch_size, num_of_step=1):
""" Sampling
Random sample a minibatch from the replay buffer
"""
sample_data = []
sample_indices = np.random.random_integers(0, len(self.buffer) - num_of_step, size=batch_size)
for s in sample_indices:
n_state = []
n_action = []
n_reward = []
n_next_state = []
n_done = []
for n in range(num_of_step):
exp = self.buffer[s + n]
n_state.append(exp[0])
n_action.append(exp[1])
n_reward.append(exp[2])
n_next_state.append(exp[3])
n_done.append(exp[4])
sample_data.append((n_state, n_action, n_reward, n_next_state, n_done))
return sample_data | 28.454545 | 102 | 0.600639 | 207 | 1,565 | 4.318841 | 0.362319 | 0.0783 | 0.030201 | 0.044743 | 0.145414 | 0.145414 | 0.0783 | 0.0783 | 0.0783 | 0.0783 | 0 | 0.007353 | 0.304792 | 1,565 | 55 | 103 | 28.454545 | 0.814338 | 0.212141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.103448 | 0.034483 | 0.344828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da24d2c86b3410255d8a070349c1d9c6e890f449 | 6,335 | py | Python | PathPlanning/RRTStar/rrt_star.py | cmuehlbacher/PythonRobotics | c66fccc71c681387ff61b59554694b25399ca790 | [
"MIT"
] | 38 | 2019-12-08T12:26:04.000Z | 2022-03-06T11:29:08.000Z | PathPlanning/RRTStar/rrt_star.py | YoungGer/PythonRobotics | 9b8f2bd88a3d516d8deb473693661c1aea59fe68 | [
"MIT"
] | null | null | null | PathPlanning/RRTStar/rrt_star.py | YoungGer/PythonRobotics | 9b8f2bd88a3d516d8deb473693661c1aea59fe68 | [
"MIT"
] | 15 | 2020-02-12T15:57:28.000Z | 2021-08-28T07:39:18.000Z | """
Path planning Sample Code with RRT*
author: Atsushi Sakai(@Atsushi_twi)
"""
import copy
import math
import os
import sys
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.abspath(__file__)) +
"/../RRT/")
try:
from rrt import RRT
except ImportError:
raise
show_animation = True
class RRTStar(RRT):
"""
Class for RRT Star planning
"""
class Node:
def __init__(self, x, y):
self.x = x
self.y = y
self.cost = 0.0
self.parent = None
def __init__(self, start, goal, obstacle_list, rand_area,
expand_dis=0.5,
goal_sample_rate=20,
max_iter=500,
connect_circle_dist=50.0
):
super().__init__(start, goal, obstacle_list,
rand_area, expand_dis, goal_sample_rate, max_iter)
"""
Setting Parameter
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y,size],...]
randArea:Random Sampling Area [min,max]
"""
self.connect_circle_dist = connect_circle_dist
def planning(self, animation=True, search_until_maxiter=True):
"""
rrt star path planning
animation: flag for animation on or off
search_until_maxiter: search until max iteration for path improving or not
"""
self.node_list = [self.start]
for i in range(self.max_iter):
rnd = self.get_random_point()
nearest_ind = self.get_nearest_list_index(self.node_list, rnd)
new_node = self.steer(rnd, self.node_list[nearest_ind])
if self.check_collision(new_node, self.obstacleList):
near_inds = self.find_near_nodes(new_node)
new_node = self.choose_parent(new_node, near_inds)
if new_node:
self.node_list.append(new_node)
self.rewire(new_node, near_inds)
if animation and i % 5 == 0:
self.draw_graph(rnd)
if not search_until_maxiter and new_node: # check reaching the goal
d, _ = self.calc_distance_and_angle(new_node, self.end)
if d <= self.expand_dis:
return self.generate_final_course(len(self.node_list) - 1)
print("reached max iteration")
last_index = self.search_best_goal_node()
if last_index:
return self.generate_final_course(last_index)
return None
def choose_parent(self, new_node, near_inds):
if not near_inds:
return None
# search nearest cost in near_inds
costs = []
for i in near_inds:
d, theta = self.calc_distance_and_angle(self.node_list[i], new_node)
if self.check_collision_extend(self.node_list[i], theta, d):
costs.append(self.node_list[i].cost + d)
else:
costs.append(float("inf")) # the cost of collision node
min_cost = min(costs)
if min_cost == float("inf"):
print("There is no good path.(min_cost is inf)")
return None
new_node.cost = min_cost
min_ind = near_inds[costs.index(min_cost)]
new_node.parent = self.node_list[min_ind]
return new_node
def search_best_goal_node(self):
dist_to_goal_list = [self.calc_dist_to_goal(n.x, n.y) for n in self.node_list]
goal_inds = [dist_to_goal_list.index(i) for i in dist_to_goal_list if i <= self.expand_dis]
if not goal_inds:
return None
min_cost = min([self.node_list[i].cost for i in goal_inds])
for i in goal_inds:
if self.node_list[i].cost == min_cost:
return i
return None
def find_near_nodes(self, new_node):
nnode = len(self.node_list) + 1
r = self.connect_circle_dist * math.sqrt((math.log(nnode) / nnode))
dist_list = [(node.x - new_node.x) ** 2 +
(node.y - new_node.y) ** 2 for node in self.node_list]
near_inds = [dist_list.index(i) for i in dist_list if i <= r ** 2]
return near_inds
def rewire(self, new_node, near_inds):
for i in near_inds:
near_node = self.node_list[i]
d, theta = self.calc_distance_and_angle(near_node, new_node)
new_cost = new_node.cost + d
if near_node.cost > new_cost:
if self.check_collision_extend(near_node, theta, d):
near_node.parent = new_node
near_node.cost = new_cost
self.propagate_cost_to_leaves(new_node)
def propagate_cost_to_leaves(self, parent_node):
for node in self.node_list:
if node.parent == parent_node:
d, _ = self.calc_distance_and_angle(parent_node, node)
node.cost = parent_node.cost + d
self.propagate_cost_to_leaves(node)
def check_collision_extend(self, near_node, theta, d):
tmp_node = copy.deepcopy(near_node)
for i in range(int(d / self.expand_dis)):
tmp_node.x += self.expand_dis * math.cos(theta)
tmp_node.y += self.expand_dis * math.sin(theta)
if not self.check_collision(tmp_node, self.obstacleList):
return False
return True
def main():
print("Start " + __file__)
# ====Search Path with RRT====
obstacle_list = [
(5, 5, 1),
(3, 6, 2),
(3, 8, 2),
(3, 10, 2),
(7, 5, 2),
(9, 5, 2)
] # [x,y,size(radius)]
# Set Initial parameters
rrt = RRTStar(start=[0, 0],
goal=[10, 10],
rand_area=[-2, 15],
obstacle_list=obstacle_list)
path = rrt.planning(animation=show_animation, search_until_maxiter=False)
if path is None:
print("Cannot find path")
else:
print("found path!!")
# Draw final path
if show_animation:
rrt.draw_graph()
plt.plot([x for (x, y) in path], [y for (x, y) in path], '-r')
plt.grid(True)
plt.pause(0.01) # Need for Mac
plt.show()
if __name__ == '__main__':
main()
| 30.311005 | 99 | 0.572691 | 857 | 6,335 | 3.968495 | 0.199533 | 0.047339 | 0.056454 | 0.022934 | 0.206116 | 0.0788 | 0.051749 | 0.022346 | 0 | 0 | 0 | 0.011762 | 0.328966 | 6,335 | 208 | 100 | 30.456731 | 0.788285 | 0.06693 | 0 | 0.065693 | 0 | 0 | 0.020982 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072993 | false | 0 | 0.051095 | 0 | 0.226277 | 0.036496 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da2640c8cad77dff99c17a878df82c7bc7beb176 | 786 | py | Python | accountlist.py | beitnes/accountlist | 5bca4960405a23b20bce0d9928536ed6db8c39d0 | [
"Apache-2.0"
] | null | null | null | accountlist.py | beitnes/accountlist | 5bca4960405a23b20bce0d9928536ed6db8c39d0 | [
"Apache-2.0"
] | null | null | null | accountlist.py | beitnes/accountlist | 5bca4960405a23b20bce0d9928536ed6db8c39d0 | [
"Apache-2.0"
] | null | null | null | import boto3
import pprint
def account_list(output_format = "json"):
#TODO: Add other output formats
client = boto3.client('organizations')
pretty_printer = pprint.PrettyPrinter(indent=4)
accounts = list()
response = client.list_accounts()
while True:
next_token = response.get('NextToken')
for account in response['Accounts']:
accounts.append(account)
if next_token == None:
break
else:
response = client.list_accounts(NextToken=next_token)
# print("quantity: " + str(len(accounts)))
if output_format == "json":
pretty_printer.pprint(accounts)
def main():
#TODO: parse args
#TODO: Set up logging
account_list()
if __name__ == "__main__":
main()
| 18.714286 | 65 | 0.625954 | 87 | 786 | 5.436782 | 0.528736 | 0.057082 | 0.067653 | 0.109937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005199 | 0.265903 | 786 | 41 | 66 | 19.170732 | 0.814558 | 0.136132 | 0 | 0 | 0 | 0 | 0.068148 | 0 | 0 | 0 | 0 | 0.02439 | 0 | 1 | 0.095238 | false | 0 | 0.095238 | 0 | 0.190476 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da2652bdef2ea0254c65e10f3f8343f49c9b32ff | 745 | py | Python | test/commentProcessor_test.py | ponder-lab/GitHub-Issue-Mining | 5cff97bd2322894338c71f5ba7bd743e2e204a72 | [
"MIT"
] | 3 | 2021-04-18T04:07:35.000Z | 2021-12-25T06:35:32.000Z | test/commentProcessor_test.py | ponder-lab/GitHub-Issue-Classifier | 5cff97bd2322894338c71f5ba7bd743e2e204a72 | [
"MIT"
] | 4 | 2021-04-06T01:06:36.000Z | 2021-08-06T00:34:53.000Z | test/commentProcessor_test.py | ponder-lab/GitHub-Issue-Mining | 5cff97bd2322894338c71f5ba7bd743e2e204a72 | [
"MIT"
] | null | null | null | from utils.commentProcessor import processComment
TEST_CASES = [
{
"test": "Hello this is a pre processed string",
"expected_result": "hello pre processed string"
},
{
"test": "This string contains a screen name @y3pio tag",
"expected_result": "this string contains screen name SCREEN_NAME tag"
},
{
"test": "Testing this url string https://test.foo.com token",
"expected_result": "testing url string URL token"
},
{
"test": "> This line is a quote, should expect a single QUOTE token",
"expected_result": "QUOTE"
}
]
def test_comment_processor():
for TEST in TEST_CASES:
assert(processComment(TEST['test'])) == TEST['expected_result'] | 31.041667 | 77 | 0.628188 | 88 | 745 | 5.204545 | 0.431818 | 0.152838 | 0.078603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001808 | 0.257718 | 745 | 24 | 78 | 31.041667 | 0.826401 | 0 | 0 | 0 | 0 | 0 | 0.524129 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.045455 | false | 0 | 0.045455 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da26ac275ef766fda1ea905a5a0277b1855e977b | 7,275 | py | Python | plus_reader/plus_highlighting.py | ShashkovS/plus_reader | e53a7af01ae480f7a63e33d01a0a99ea681e7fee | [
"MIT"
] | 3 | 2017-11-27T10:01:42.000Z | 2018-05-07T09:37:24.000Z | plus_reader/plus_highlighting.py | ShashkovS/plus_reader | e53a7af01ae480f7a63e33d01a0a99ea681e7fee | [
"MIT"
] | 5 | 2017-09-28T09:53:13.000Z | 2017-11-25T20:10:00.000Z | plus_reader/plus_highlighting.py | ShashkovS/plus_reader | e53a7af01ae480f7a63e33d01a0a99ea681e7fee | [
"MIT"
] | 2 | 2017-09-14T11:56:07.000Z | 2017-09-14T12:49:46.000Z | import logging
import sys
import traceback
import numpy as np
from PyQt5.QtGui import QPixmap, QPainter, QMouseEvent
from PyQt5.QtWidgets import QApplication, QWidget, QGridLayout, QMenu, QSlider, QLabel
from PyQt5.QtCore import Qt
sys._excepthook = sys.excepthook
def excepthook(excType, excValue, tracebackobj):
traceback.print_tb(tracebackobj, excType, excValue)
sys.excepthook = excepthook
VIRTUAL_BORDER_WIDTH = 5
class Label(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent=parent)
self.page = self.parentWidget()
self.p = None
def setPixmap(self, p):
self.p = p
def paintEvent(self, event):
if self.p:
painter = QPainter(self)
painter.setRenderHint(QPainter.SmoothPixmapTransform)
painter.drawPixmap(self.rect(), self.p)
def contextMenuEvent(self, QContextMenuEvent):
cmenu = QMenu(self)
positionx = QContextMenuEvent.x()
positiony = QContextMenuEvent.y()
im_pos_x, im_pos_y = list(
map(int, self.page.image.window_coords_to_image_coords(positionx, positiony, self.width(), self.height())))
logging.info(str(positionx) + ' ' + str(positiony) + ' -> ' + str(im_pos_x) + ' ' + str(im_pos_y))
min_vline_dist = min(abs(im_pos_x - vl) for vl in self.page.image.coords_of_vert_lns) if self.page.image.coords_of_vert_lns\
else float('inf')
min_hline_dist = min(abs(im_pos_y - vl) for vl in self.page.image.coords_of_horiz_lns) if self.page.image.coords_of_horiz_lns\
else float('inf')
self._actions = []
self._actions_objects = []
if min_hline_dist <= VIRTUAL_BORDER_WIDTH * 3:
DelHorAction = cmenu.addAction('Delete Horizontal line here')
self._actions.append('DelHorAction')
self._actions_objects.append(DelHorAction)
else:
AddHorAction = cmenu.addAction('Add Horizontal line here')
self._actions.append('AddHorAction')
self._actions_objects.append(AddHorAction)
if min_vline_dist <= VIRTUAL_BORDER_WIDTH * 3:
DelVertAction = cmenu.addAction('Delete Vertical line here')
self._actions.append('DelVertAction')
self._actions_objects.append(DelVertAction)
else:
AddVertAction = cmenu.addAction('Add Vertical line here')
self._actions.append('AddVertAction')
self._actions_objects.append(AddVertAction)
action = cmenu.exec_(self.mapToGlobal(QContextMenuEvent.pos()))
if action:
selected_action_index = self._actions_objects.index(action)
selected_action = self._actions[selected_action_index]
logging.info(str(selected_action))
# TODO работающих методов ещё нет поэтому этот кусок пока не нужен
method = getattr(self, selected_action)
method((im_pos_x, im_pos_y))
def AddHorAction(self, coords):
logging.info('ДОБАВИТЬ ГОРИЗОНТАЛЬ')
self.page.image.coords_of_horiz_lns.append(coords[1]) # TODO: Сделать бисектом
self.page.image.coords_of_horiz_lns.sort()
self.page.image.find_filled_cells()
self.page.image.initial_mark_filled_cells()
self.page.reload_image()
def DelHorAction(self, coords):
logging.info('УДАЛИТЬ ГОРИЗОНТАЛЬ')
min_dist = float('inf')
min_line = float('inf')
for i in self.page.image.coords_of_horiz_lns:
dist = abs(i - coords[1])
if dist < min_dist:
min_dist = dist
min_line = i
self.page.image.coords_of_horiz_lns.remove(min_line)
self.page.image.find_filled_cells()
self.page.image.initial_mark_filled_cells()
self.page.reload_image()
def DelVertAction(self, coords):
logging.info('УДАЛИТЬ ВЕРТИКАЛЬ')
min_dist = float('inf')
min_line = float('inf')
for i in self.page.image.coords_of_vert_lns:
dist = abs(i - coords[0])
if dist < min_dist:
min_dist = dist
min_line = i
self.page.image.coords_of_vert_lns.remove(min_line)
self.page.image.find_filled_cells()
self.page.image.initial_mark_filled_cells()
self.page.reload_image()
def AddVertAction(self, coords):
logging.info('ДОБАВИТЬ ВЕРТИКАЛЬ')
self.page.image.coords_of_vert_lns.append(coords[0])
self.page.image.coords_of_vert_lns.sort()
self.page.image.find_filled_cells()
self.page.image.initial_mark_filled_cells()
self.page.reload_image()
def mousePressEvent(self, a0: QMouseEvent):
button_pressed = a0.button()
cursor_pos_x = int(a0.x())
cursor_pos_y = int(a0.y())
logging.info(str(cursor_pos_x) + ' ' + str(cursor_pos_y))
if button_pressed == 1:
cell_pos = self.page.image.coord_to_cell(cursor_pos_x, cursor_pos_y, self.width(), self.height())
if cell_pos:
self.page.image.toggle_highlight_cell(*cell_pos)
self.page.reload_image()
class ScannedPageWidget(QWidget):
def __init__(self, image):
super(ScannedPageWidget, self).__init__()
self.image = image
self.initUi()
def reload_image(self, *, update=True):
self.qp.loadFromData(self.image.to_bin())
self.lb.setPixmap(self.qp)
if update:
self.lb.update()
def initUi(self):
self.lay = QGridLayout(self)
self.lay.setSpacing(10)
self.lay.setContentsMargins(0, 0, 0, 0)
self.slide = QSlider(Qt.Horizontal, self)
self.slide.setFocusPolicy(Qt.NoFocus)
self.slide.setTickInterval(5)
self.slide.setMaximum(255)
self.slide.setMinimum(0)
self.slide.setTickPosition(QSlider.TicksBelow)
self.slide.setTickInterval(5)
self.slide.setValue(self.image.black_threshold)
self.slide.valueChanged.connect(self.sliderchange)
self.slide.sliderReleased.connect(self.valuechange)
self.lb = Label(self)
self.qp = QPixmap()
self.reload_image(update=False)
self.slval = QLabel(str(self.slide.sliderPosition()))
self.lay.addWidget(QLabel('Change B/W Threshold'), 0, 0)
self.lay.addWidget(self.slide, 0, 2)
self.lay.addWidget(self.slval, 0, 9)
self.lay.addWidget(self.lb, 1, 0, 10, 10)
self.setLayout(self.lay)
def sliderchange(self):
self.slval.setText(str(self.slide.sliderPosition()))
def valuechange(self):
self.image.black_threshold = self.slide.sliderPosition()
self.image.bitmap_lines_filled_cells_and_marking()
self.reload_image()
def show(image):
app = QApplication(sys.argv)
_, _, screen_w, screen_h = app.primaryScreen().availableGeometry().getRect()
img_scale = max(image.W / screen_w, image.H / screen_h)
w_height, w_width = int(image.H / img_scale), int(image.W / img_scale),
w = ScannedPageWidget(image)
w.resize(w_width, w_height)
w.show()
app.exec_()
def feature_qt(image_cls):
show(image_cls)
return image_cls.filled_cells
if __name__ == '__main__':
pass
| 36.742424 | 134 | 0.650034 | 908 | 7,275 | 4.976872 | 0.227974 | 0.051339 | 0.066165 | 0.050454 | 0.315114 | 0.258022 | 0.193406 | 0.154016 | 0.147378 | 0.1341 | 0 | 0.007063 | 0.240962 | 7,275 | 197 | 135 | 36.928934 | 0.8113 | 0.011959 | 0 | 0.177914 | 0 | 0 | 0.038274 | 0 | 0 | 0 | 0 | 0.005076 | 0 | 1 | 0.104294 | false | 0.006135 | 0.042945 | 0 | 0.165644 | 0.006135 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da27bfe1b6414a6b5de205fb3cd12650ba9370f4 | 22,206 | py | Python | dlrnapi_client/shell.py | softwarefactory-project/dlrnapi_client | ad21fe759597968c0f691b37dc681232dcd8f2aa | [
"Apache-2.0"
] | 1 | 2017-10-02T19:36:52.000Z | 2017-10-02T19:36:52.000Z | dlrnapi_client/shell.py | softwarefactory-project/dlrnapi_client | ad21fe759597968c0f691b37dc681232dcd8f2aa | [
"Apache-2.0"
] | 4 | 2018-07-16T20:14:58.000Z | 2022-02-04T07:03:03.000Z | dlrnapi_client/shell.py | softwarefactory-project/dlrnapi_client | ad21fe759597968c0f691b37dc681232dcd8f2aa | [
"Apache-2.0"
] | 1 | 2019-12-09T14:40:47.000Z | 2019-12-09T14:40:47.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import json
import os
import sys
import dlrnapi_client
from dlrnapi_client.rest import ApiException
# Helper class to allow us to convert API response objects into JSON for output
class ResponseEncoder(json.JSONEncoder):
def default(self, obj):
# All the API response objects have a "swagger_types" attribute
if hasattr(obj, 'swagger_types'):
return obj.to_dict()
# Use the default encoder for anything else
return json.JSONEncoder.default(self, obj)
def get_last_tested_repo(api_instance, options):
params = dlrnapi_client.Params() # Params | The JSON params to post
params.max_age = options.max_age
if options.success:
params.success = str(options.success)
if options.component:
params.component = str(options.component)
params.job_id = options.job_id
params.sequential_mode = str(options.sequential)
params.previous_job_id = options.previous_job_id
try:
api_response = api_instance.api_last_tested_repo_get(params)
return api_response
except ApiException as e:
raise e
def post_last_tested_repo(api_instance, options):
params = dlrnapi_client.Params1() # Params1 | The JSON params to post
params.max_age = options.max_age
params.reporting_job_id = options.reporting_job_id
if options.success:
params.success = str(options.success)
if options.component:
params.component = str(options.component)
params.job_id = options.job_id
params.sequential_mode = str(options.sequential)
params.previous_job_id = options.previous_job_id
try:
api_response = api_instance.api_last_tested_repo_post(params)
return api_response
except ApiException as e:
raise e
def repo_status(api_instance, options):
params = dlrnapi_client.Params2() # Params2 | The JSON params to post
params.commit_hash = options.commit_hash
params.distro_hash = options.distro_hash
if options.success:
params.success = str(options.success)
if options.extended_hash and options.extended_hash != 'None':
params.extended_hash = options.extended_hash
try:
api_response = api_instance.api_repo_status_get(params)
return api_response
except ApiException as e:
raise e
def agg_status(api_instance, options):
params = dlrnapi_client.AggQuery() # AggQuery | The JSON params to post
params.aggregate_hash = options.agg_hash
if options.success:
params.success = str(options.success)
try:
api_response = api_instance.api_agg_status_get(params)
return api_response
except ApiException as e:
raise e
def repo_promote(api_instance, options):
params = dlrnapi_client.Promotion() # Promotion | The JSON params to post
params.commit_hash = options.commit_hash
params.distro_hash = options.distro_hash
if options.extended_hash != 'None':
params.extended_hash = options.extended_hash
else:
params.extended_hash = None
params.promote_name = options.promote_name
try:
api_response = api_instance.api_promote_post(params)
return api_response
except ApiException as e:
raise e
def repo_promote_batch(api_instance, options):
params = list()
hash_pairs = options.hash_pairs.split(',')
for pair in hash_pairs:
pair_list = pair.split('_')
commit_hash = pair_list[0]
distro_hash = pair_list[1]
if len(pair_list) > 2:
extended_hash = '_'.join(pair_list[2:])
else:
extended_hash = None
param = dlrnapi_client.Promotion()
param.commit_hash = commit_hash
param.distro_hash = distro_hash
if extended_hash == 'None':
param.extended_hash = None
else:
param.extended_hash = extended_hash
param.promote_name = options.promote_name
params.append(param)
try:
api_response = api_instance.api_promote_batch_post(params)
return api_response
except ApiException as e:
raise e
def get_promotions(api_instance, options):
params = dlrnapi_client.PromotionQuery() # PromotionQuery
if options.commit_hash:
params.commit_hash = options.commit_hash
if options.distro_hash:
params.distro_hash = options.distro_hash
if options.extended_hash and options.extended_hash != 'None':
params.extended_hash = options.extended_hash
if options.agg_hash:
params.aggregate_hash = options.agg_hash
if options.promote_name:
params.promote_name = options.promote_name
if options.offset:
params.offset = options.offset
if options.limit:
params.limit = options.limit
if options.component:
params.component = options.component
try:
api_response = api_instance.api_promotions_get(params)
return api_response
except ApiException as e:
raise e
def report_result(api_instance, options):
params = dlrnapi_client.Params3() # Params3 | The JSON params to post
params.job_id = options.job_id
params.commit_hash = options.commit_hash
params.distro_hash = options.distro_hash
params.aggregate_hash = options.agg_hash
params.success = str(options.success)
params.url = options.info_url
params.timestamp = options.timestamp
params.notes = options.notes
if options.extended_hash and options.extended_hash != 'None':
params.extended_hash = options.extended_hash
if (params.commit_hash and not params.distro_hash) or\
(not params.commit_hash and params.distro_hash):
raise Exception('Both --commit-hash and --distro-hash must be '
'specified together')
if params.aggregate_hash and (params.commit_hash or params.distro_hash):
raise Exception('--agg-hash is mutually exclusive with --commit-hash '
'and --distro-hash')
if (not params.aggregate_hash and not params.commit_hash and
not params.distro_hash):
raise Exception('Must specify either --agg-hash or --commit-hash and '
'--distro-hash')
try:
api_response = api_instance.api_report_result_post(params)
return api_response
except ApiException as e:
raise e
def import_commit(api_instance, options):
params = dlrnapi_client.ModelImport() # ModelImport | JSON params to post
params.repo_url = options.repo_url
try:
api_response = api_instance.api_remote_import_post(params)
return api_response
except ApiException as e:
raise e
def get_metrics_builds(api_instance, options):
# MetricRequest | JSON params to post
params = dlrnapi_client.MetricsRequest()
params.start_date = options.start_date
params.end_date = options.end_date
if options.package_name:
params.package_name = options.package_name
try:
api_response = api_instance.api_build_metrics_get(params)
return api_response
except ApiException as e:
raise e
command_funcs = {
'repo-get': get_last_tested_repo,
'repo-use': post_last_tested_repo,
'repo-status': repo_status,
'agg-status': agg_status,
'report-result': report_result,
'repo-promote': repo_promote,
'repo-promote-batch': repo_promote_batch,
'commit-import': import_commit,
'promotion-get': get_promotions,
'build-metrics': get_metrics_builds,
}
def main():
parser = argparse.ArgumentParser(prog='dlrnapi')
parser.add_argument('--url',
required=True,
help='URL to use')
parser.add_argument('--username', '-u',
help='username for authentication, defaults to '
'"DLRNAPI_USERNAME" environment variable if set',
default=os.getenv('DLRNAPI_USERNAME', None)
)
parser.add_argument('--password', '-p',
help='password for authentication, defaults to '
'"DLRNAPI_PASSWORD" environment variable if set',
default=os.getenv('DLRNAPI_PASSWORD', None)
)
subparsers = parser.add_subparsers(dest='command',
title='subcommands',
description='available subcommands')
# Subcommand repo-get
parser_last = subparsers.add_parser('repo-get',
help='Get last tested repo')
parser_last.add_argument('--max-age', type=int, default=0,
help='max_age')
parser_last.add_argument('--success', type=str, default=None,
help='Find repos with a successful/unsuccessful '
'vote, if true or false are specified')
parser_last.add_argument('--job-id', type=str, default=None,
help='Name of the CI that sent the vote. If not '
'set, no filter will be set on CI')
parser_last.add_argument('--sequential-mode', dest='sequential',
action='store_true',
help='Use the sequential mode algorithm. In this '
'case, return the last tested repo within '
'that timeframe for the CI job described by '
'--previous-job-id')
parser_last.set_defaults(sequential=False)
parser_last.add_argument('--previous-job-id', type=str, default=None,
help='If --sequential-mode is set, look for jobs'
' tested by this CI')
parser_last.add_argument('--component', type=str, default=None,
required=False,
help='Only search for repos related to '
'this component.')
# Subcommand repo-use
parser_use_last = subparsers.add_parser('repo-use',
help='Get the last tested repo '
'since a specific time '
'(optionally for a CI job), '
'and add an "in progress" '
'entry in the CI job table '
'for this.')
parser_use_last.add_argument('--max-age', type=int, default=0,
help='max_age')
parser_use_last.add_argument('--reporting-job-id', type=str, required=True,
help=' Name of the CI that will add the "in '
'progress" entry in the CI job table.')
parser_use_last.add_argument('--success', type=str, default=None,
help='Find repos with a successful/'
'unsuccessful vote, if true or false '
'are specified')
parser_use_last.add_argument('--job-id', type=str, default=None,
help='Name of the CI that sent the vote. If '
'not set, no filter will be set on CI')
parser_use_last.add_argument('--sequential-mode', dest='sequential',
action='store_true',
help='Use the sequential mode algorithm. In '
'this case, return the last tested repo '
'within that timeframe for the CI job '
'described by --previous-job-id')
parser_use_last.set_defaults(sequential=False)
parser_use_last.add_argument('--previous-job-id', type=str, default=None,
help='If --sequential-mode is true, look for '
'jobs tested by this CI')
parser_use_last.add_argument('--component', type=str, default=None,
required=False,
help='Only search for repos related to '
'this component.')
# Subcommand repo-status
parser_st = subparsers.add_parser('repo-status',
help='Get all the CI reports for a '
'specific repository.')
parser_st.add_argument('--commit-hash', type=str, required=True,
help='commit_hash of the repo to fetch '
'information for.')
parser_st.add_argument('--distro-hash', type=str, required=True,
help='distro_hash of the repo to fetch '
'information for.')
parser_st.add_argument('--extended-hash', type=str, required=False,
help='extended_hash of the repo to fetch '
'information for.')
parser_st.add_argument('--success', type=str, default=None,
help='If set to a value (true/false), only return '
'the CI reports with the specified vote. If '
'not set, return all CI reports.')
# Subcommand agg-status
parser_st = subparsers.add_parser('agg-status',
help='Get all the CI reports for a '
'specific aggregated repository.')
parser_st.add_argument('--agg-hash', type=str, required=True,
help='hash of the aggregated repo to fetch '
'information for.')
parser_st.add_argument('--success', type=str, default=None,
help='If set to a value (true/false), only return '
'the CI reports with the specified vote. If '
'not set, return all CI reports.')
# Subcommand report-result
parser_rep = subparsers.add_parser('report-result',
help='Report the result of a CI job')
parser_rep.add_argument('--job-id', type=str, required=True,
help='Name of the CI sending the vote')
parser_rep.add_argument('--commit-hash', type=str, required=False,
help='commit_hash of tested repo')
parser_rep.add_argument('--distro-hash', type=str, required=False,
help='distro_hash of tested repo')
parser_rep.add_argument('--extended-hash', type=str, required=False,
help='extended_hash of tested repo')
parser_rep.add_argument('--agg-hash', type=str, required=False,
help='hash of the tested aggregated repo. Note '
'that either --commit-hash and --distro-hash or'
' --agg-hash must be specified.')
parser_rep.add_argument('--info-url', type=str, required=True,
help='URL where to find additional information '
'from the CI execution')
parser_rep.add_argument('--timestamp', type=str, required=True,
help='Timestamp (in seconds since the epoch)')
parser_rep.add_argument('--success', type=str, required=True,
help='Was the CI execution successful? Set to '
'true or false.')
parser_rep.add_argument('--notes', type=str,
help='Additional notes')
# Subcommand promote
parser_prom = subparsers.add_parser('repo-promote',
help='Promote a repository')
parser_prom.add_argument('--commit-hash', type=str, required=True,
help='commit_hash of the repo to be promoted')
parser_prom.add_argument('--distro-hash', type=str, required=True,
help='distro_hash of the repo to be promoted')
parser_prom.add_argument('--extended-hash', type=str, required=False,
help='extended_hash of the repo to be promoted')
parser_prom.add_argument('--promote-name', type=str, required=True,
help='Name to be used for the promotion')
# Subcommand repo-promote-batch
parser_prom = subparsers.add_parser('repo-promote-batch',
help='Promote multiple repositories '
'at the same time, as an atomic '
'operation.')
parser_prom.add_argument('--hash-pairs', type=str, required=True,
help='commit_hash+distro_hash or '
'commit_hash+distro_hash+extended_hash of '
'the repos to be promoted, specified as a '
'comma-separated list of commit_distro or '
'commit_distro_extended hash groups. If no '
'extended hash is included, the latest '
'commit matching the commit and distro '
'hashes will be promoted.')
parser_prom.add_argument('--promote-name', type=str, required=True,
help='Name to be used for the promotion')
# Subcommand promotion-get
parser_promget = subparsers.add_parser('promotion-get',
help='Get information about '
'promotions')
parser_promget.add_argument('--commit-hash', type=str, required=False,
help='commit_hash of the repo to search '
'promotions for. Requires --distro-hash '
'if specified.')
parser_promget.add_argument('--distro-hash', type=str, required=False,
help='distro_hash of the repo to search '
'promotions for. Requires --commit-hash '
'if specified.')
parser_promget.add_argument('--extended-hash', type=str, required=False,
help='extended_hash of the repo to search '
'promotions for. Requires --commit-hash '
'and --distro-hash if specified.')
parser_promget.add_argument('--agg-hash', type=str, required=False,
help='hash of the tested aggregated repo.')
parser_promget.add_argument('--promote-name', type=str, required=False,
help='Filter results for this promotion name.')
parser_promget.add_argument('--offset', type=int, required=False,
help='Show results after this offset. Each '
'query will only return 100 entries by '
'default.')
parser_promget.add_argument('--limit', type=int, required=False,
help='Limit the results to the first limit '
'items')
parser_promget.add_argument('--component', type=str, required=False,
help='Only search for promotions related to '
'this component.')
# Subcommand commit-import
parser_imp = subparsers.add_parser('commit-import',
help='Import a commit built by another'
' instance')
parser_imp.add_argument('--repo-url', type=str, required=True,
help='Base repository URL for the remote repo '
'to import')
# Subcommand build-metrics
parser_metrics = subparsers.add_parser(
'build-metrics',
help='Fetch build metrics in a time period')
parser_metrics.add_argument(
'--start-date', type=str, required=True,
help='Start date for the query, in YYYY-MM-DD format')
parser_metrics.add_argument(
'--end-date', type=str, required=True,
help='End date for the query, in YYYY-MM-DD format')
parser_metrics.add_argument(
'--package-name', type=str, required=False,
help='If specified, only fetch metrics for this package name')
options, args = parser.parse_known_args(sys.argv[1:])
# create an instance of the API class
api_client = dlrnapi_client.ApiClient(host=options.url)
dlrnapi_client.configuration.username = options.username
dlrnapi_client.configuration.password = options.password
api_instance = dlrnapi_client.DefaultApi(api_client=api_client)
try:
api_response = command_funcs[options.command](api_instance, options)
print(json.dumps(api_response, cls=ResponseEncoder, indent=2,
sort_keys=True))
except ApiException as e:
# Handle 404 exceptions gracefully
if e.status == 404:
print("ERROR: Got error 404, probably endpoint %s is not available"
% options.url)
return 1
else:
raise
except Exception as e:
raise e
| 45.880165 | 79 | 0.577141 | 2,483 | 22,206 | 4.996778 | 0.125654 | 0.043443 | 0.035061 | 0.024502 | 0.610623 | 0.556541 | 0.497139 | 0.455307 | 0.416136 | 0.400983 | 0 | 0.002111 | 0.338737 | 22,206 | 483 | 80 | 45.975155 | 0.842822 | 0.058453 | 0 | 0.313433 | 0 | 0 | 0.231219 | 0.005031 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0.012438 | 0.034826 | 0 | 0.099502 | 0.007463 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da27fd506b778e15d02b14a496203d5d175a39c3 | 1,051 | py | Python | python-mundo3/ex094.py | abm-astro/estudos-python | c0dcd71489e528d445efa25d4986bf2fd08f8fe6 | [
"MIT"
] | 1 | 2021-08-15T18:18:43.000Z | 2021-08-15T18:18:43.000Z | python-mundo3/ex094.py | abm-astro/estudos-python | c0dcd71489e528d445efa25d4986bf2fd08f8fe6 | [
"MIT"
] | null | null | null | python-mundo3/ex094.py | abm-astro/estudos-python | c0dcd71489e528d445efa25d4986bf2fd08f8fe6 | [
"MIT"
] | null | null | null | cadastro = dict()
pessoas = list()
soma = 0
while True:
cadastro.clear()
cadastro['nome'] = str(input('Nome: ')).capitalize()
while True:
cadastro['sexo'] = str(input('Sexo [M/F]: ')).upper()
if cadastro['sexo'] in 'MF':
break
print('ERRO! Digite apenas M ou F!')
cadastro['idade'] = int(input('Idade: '))
soma += cadastro['idade']
pessoas.append(cadastro.copy())
while True:
res = str(input('Quer continuar? [S/N] ')).upper()
if res in 'SN':
break
print('ERRO! Digite apenas S ou N!')
if res in "Nn":
break
print(20*'-=')
media = soma / len(pessoas)
print(f'- Ao todo temos {len(pessoas)} pessoas cadastradas.')
print(f'- A média de idade é de {media:5.2f} anos.')
print('- As mulheres cadastradas foram:', end=' ')
for m in pessoas:
if m['sexo'] in 'F':
print(f"{m['nome']}", end=" ; ")
print()
print('- A lista de pessoas acima da média:')
for p in pessoas:
print(' ')
for k, v in p.items():
if p['idade'] > media:
print(f'{k} = {v}', end=' ')
print('\n>> ENCERRADO <<')
| 27.657895 | 61 | 0.586108 | 157 | 1,051 | 3.923567 | 0.407643 | 0.038961 | 0.055195 | 0.064935 | 0.084416 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005988 | 0.205519 | 1,051 | 37 | 62 | 28.405405 | 0.731737 | 0 | 0 | 0.162162 | 0 | 0 | 0.32921 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.324324 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da284cabcfa7c599a3b1aad0183ba6d119a7c17a | 23,536 | py | Python | vendor/packages/translate-toolkit/translate/storage/base.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | 2 | 2019-08-19T17:08:47.000Z | 2019-10-05T11:37:02.000Z | vendor/packages/translate-toolkit/translate/storage/base.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | vendor/packages/translate-toolkit/translate/storage/base.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2006-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Base classes for storage interfaces.
@organization: Zuza Software Foundation
@copyright: 2006-2009 Zuza Software Foundation
@license: U{GPL <http://www.fsf.org/licensing/licenses/gpl.html>}
"""
try:
import cPickle as pickle
except:
import pickle
from exceptions import NotImplementedError
import translate.i18n
from translate.storage.placeables import StringElem, general, parse as rich_parse
from translate.misc.typecheck import accepts, Self, IsOneOf
from translate.misc.multistring import multistring
def force_override(method, baseclass):
"""Forces derived classes to override method."""
if type(method.im_self) == type(baseclass):
# then this is a classmethod and im_self is the actual class
actualclass = method.im_self
else:
actualclass = method.im_class
if actualclass != baseclass:
raise NotImplementedError(
"%s does not reimplement %s as required by %s" % \
(actualclass.__name__, method.__name__, baseclass.__name__)
)
class ParseError(Exception):
def __init__(self, inner_exc):
self.inner_exc = inner_exc
def __str__(self):
return repr(self.inner_exc)
class TranslationUnit(object):
"""Base class for translation units.
Our concept of a I{translation unit} is influenced heavily by XLIFF:
U{http://www.oasis-open.org/committees/xliff/documents/xliff-specification.htm}
As such most of the method- and variable names borrows from XLIFF terminology.
A translation unit consists of the following:
- A I{source} string. This is the original translatable text.
- A I{target} string. This is the translation of the I{source}.
- Zero or more I{notes} on the unit. Notes would typically be some
comments from a translator on the unit, or some comments originating from
the source code.
- Zero or more I{locations}. Locations indicate where in the original
source code this unit came from.
- Zero or more I{errors}. Some tools (eg. L{pofilter <filters.pofilter>}) can run checks on
translations and produce error messages.
@group Source: *source*
@group Target: *target*
@group Notes: *note*
@group Locations: *location*
@group Errors: *error*
"""
rich_parsers = []
"""A list of functions to use for parsing a string into a rich string tree."""
def __init__(self, source):
"""Constructs a TranslationUnit containing the given source string."""
self.notes = ""
self._store = None
self.source = source
self._target = None
self._rich_source = None
self._rich_target = None
def __eq__(self, other):
"""Compares two TranslationUnits.
@type other: L{TranslationUnit}
@param other: Another L{TranslationUnit}
@rtype: Boolean
@return: Returns True if the supplied TranslationUnit equals this unit.
"""
return self.source == other.source and self.target == other.target
def __str__(self):
"""Converts to a string representation that can be parsed back using L{parsestring()}."""
# no point in pickling store object, so let's hide it for a while.
store = getattr(self, "_store", None)
self._store = None
dump = pickle.dumps(self)
self._store = store
return dump
def rich_to_multistring(cls, elem_list):
"""Convert a "rich" string tree to a C{multistring}:
>>> from translate.storage.placeables.interfaces import X
>>> rich = [StringElem(['foo', X(id='xxx', sub=[' ']), 'bar'])]
>>> TranslationUnit.rich_to_multistring(rich)
multistring(u'foo bar')
"""
return multistring([unicode(elem) for elem in elem_list])
rich_to_multistring = classmethod(rich_to_multistring)
def multistring_to_rich(cls, mulstring):
"""Convert a multistring to a list of "rich" string trees:
>>> target = multistring([u'foo', u'bar', u'baz'])
>>> TranslationUnit.multistring_to_rich(target)
[<StringElem([<StringElem([u'foo'])>])>,
<StringElem([<StringElem([u'bar'])>])>,
<StringElem([<StringElem([u'baz'])>])>]
"""
if isinstance(mulstring, multistring):
return [rich_parse(s, cls.rich_parsers) for s in mulstring.strings]
return [rich_parse(mulstring, cls.rich_parsers)]
def setsource(self, source):
"""Sets the source string to the given value."""
self._rich_source = None
self._source = source
source = property(lambda self: self._source, setsource)
def settarget(self, target):
"""Sets the target string to the given value."""
self._rich_target = None
self._target = target
target = property(lambda self: self._target, settarget)
def _get_rich_source(self):
if self._rich_source is None:
self._rich_source = self.multistring_to_rich(self.source)
return self._rich_source
def _set_rich_source(self, value):
if not hasattr(value, '__iter__'):
raise ValueError('value must be iterable')
if len(value) < 1:
raise ValueError('value must have at least one element.')
if not isinstance(value[0], StringElem):
raise ValueError('value[0] must be of type StringElem.')
self._rich_source = list(value)
self.source = self.rich_to_multistring(value)
rich_source = property(_get_rich_source, _set_rich_source)
""" @see: rich_to_multistring
@see: multistring_to_rich"""
def _get_rich_target(self):
if self._rich_target is None:
self._rich_target = self.multistring_to_rich(self.target)
return self._rich_target
def _set_rich_target(self, value):
if not hasattr(value, '__iter__'):
raise ValueError('value must be iterable')
if len(value) < 1:
raise ValueError('value must have at least one element.')
if not isinstance(value[0], StringElem):
raise ValueError('value[0] must be of type StringElem.')
self._rich_target = list(value)
self.target = self.rich_to_multistring(value)
rich_target = property(_get_rich_target, _set_rich_target)
""" @see: rich_to_multistring
@see: multistring_to_rich"""
def gettargetlen(self):
"""Returns the length of the target string.
@note: Plural forms might be combined.
@rtype: Integer
"""
length = len(self.target or "")
strings = getattr(self.target, "strings", [])
if strings:
length += sum([len(pluralform) for pluralform in strings[1:]])
return length
def getid(self):
"""A unique identifier for this unit.
@rtype: string
@return: an identifier for this unit that is unique in the store
Derived classes should override this in a way that guarantees a unique
identifier for each unit in the store.
"""
return self.source
def setid(self, value):
"""Sets the unique identified for this unit.
only implemented if format allows ids independant from other
unit properties like source or context"""
pass
def getlocations(self):
"""A list of source code locations.
@note: Shouldn't be implemented if the format doesn't support it.
@rtype: List
"""
return []
def addlocation(self, location):
"""Add one location to the list of locations.
@note: Shouldn't be implemented if the format doesn't support it.
"""
pass
def addlocations(self, location):
"""Add a location or a list of locations.
@note: Most classes shouldn't need to implement this,
but should rather implement L{addlocation()}.
@warning: This method might be removed in future.
"""
if isinstance(location, list):
for item in location:
self.addlocation(item)
else:
self.addlocation(location)
def getcontext(self):
"""Get the message context."""
return ""
def setcontext(self, context):
"""Set the message context"""
pass
def getnotes(self, origin=None):
"""Returns all notes about this unit.
It will probably be freeform text or something reasonable that can be
synthesised by the format.
It should not include location comments (see L{getlocations()}).
"""
return getattr(self, "notes", "")
def addnote(self, text, origin=None, position="append"):
"""Adds a note (comment).
@type text: string
@param text: Usually just a sentence or two.
@type origin: string
@param origin: Specifies who/where the comment comes from.
Origin can be one of the following text strings:
- 'translator'
- 'developer', 'programmer', 'source code' (synonyms)
"""
if getattr(self, "notes", None):
self.notes += '\n'+text
else:
self.notes = text
def removenotes(self):
"""Remove all the translator's notes."""
self.notes = u''
def adderror(self, errorname, errortext):
"""Adds an error message to this unit.
@type errorname: string
@param errorname: A single word to id the error.
@type errortext: string
@param errortext: The text describing the error.
"""
pass
def geterrors(self):
"""Get all error messages.
@rtype: Dictionary
"""
return {}
def markreviewneeded(self, needsreview=True, explanation=None):
"""Marks the unit to indicate whether it needs review.
@keyword needsreview: Defaults to True.
@keyword explanation: Adds an optional explanation as a note.
"""
pass
def istranslated(self):
"""Indicates whether this unit is translated.
This should be used rather than deducing it from .target,
to ensure that other classes can implement more functionality
(as XLIFF does).
"""
return bool(self.target) and not self.isfuzzy()
def istranslatable(self):
"""Indicates whether this unit can be translated.
This should be used to distinguish real units for translation from
header, obsolete, binary or other blank units.
"""
return True
def isfuzzy(self):
"""Indicates whether this unit is fuzzy."""
return False
def markfuzzy(self, value=True):
"""Marks the unit as fuzzy or not."""
pass
def isobsolete(self):
"""indicate whether a unit is obsolete"""
return False
def makeobsolete(self):
"""Make a unit obsolete"""
pass
def isheader(self):
"""Indicates whether this unit is a header."""
return False
def isreview(self):
"""Indicates whether this unit needs review."""
return False
def isblank(self):
"""Used to see if this unit has no source or target string.
@note: This is probably used more to find translatable units,
and we might want to move in that direction rather and get rid of this.
"""
return not (self.source or self.target)
def hasplural(self):
"""Tells whether or not this specific unit has plural strings."""
#TODO: Reconsider
return False
def getsourcelanguage(self):
return getattr(self._store, "sourcelanguage", "en")
def gettargetlanguage(self):
return getattr(self._store, "targetlanguage", None)
def merge(self, otherunit, overwrite=False, comments=True, authoritative=False):
"""Do basic format agnostic merging."""
if not self.target or overwrite:
self.rich_target = otherunit.rich_target
def unit_iter(self):
"""Iterator that only returns this unit."""
yield self
def getunits(self):
"""This unit in a list."""
return [self]
def buildfromunit(cls, unit):
"""Build a native unit from a foreign unit, preserving as much
information as possible."""
if type(unit) == cls and hasattr(unit, "copy") and callable(unit.copy):
return unit.copy()
newunit = cls(unit.source)
newunit.target = unit.target
newunit.markfuzzy(unit.isfuzzy())
locations = unit.getlocations()
if locations:
newunit.addlocations(locations)
notes = unit.getnotes()
if notes:
newunit.addnote(notes)
return newunit
buildfromunit = classmethod(buildfromunit)
xid = property(lambda self: None, lambda self, value: None)
rid = property(lambda self: None, lambda self, value: None)
class TranslationStore(object):
"""Base class for stores for multiple translation units of type UnitClass."""
UnitClass = TranslationUnit
"""The class of units that will be instantiated and used by this class"""
Name = "Base translation store"
"""The human usable name of this store type"""
Mimetypes = None
"""A list of MIME types associated with this store type"""
Extensions = None
"""A list of file extentions associated with this store type"""
_binary = False
"""Indicates whether a file should be accessed as a binary file."""
suggestions_in_format = False
"""Indicates if format can store suggestions and alternative translation for a unit"""
def __init__(self, unitclass=None):
"""Constructs a blank TranslationStore."""
self.units = []
self.sourcelanguage = None
self.targetlanguage = None
if unitclass:
self.UnitClass = unitclass
super(TranslationStore, self).__init__()
def getsourcelanguage(self):
"""Gets the source language for this store"""
return self.sourcelanguage
def setsourcelanguage(self, sourcelanguage):
"""Sets the source language for this store"""
self.sourcelanguage = sourcelanguage
def gettargetlanguage(self):
"""Gets the target language for this store"""
return self.targetlanguage
def settargetlanguage(self, targetlanguage):
"""Sets the target language for this store"""
self.targetlanguage = targetlanguage
def unit_iter(self):
"""Iterator over all the units in this store."""
for unit in self.units:
yield unit
def getunits(self):
"""Return a list of all units in this store."""
return [unit for unit in self.unit_iter()]
def addunit(self, unit):
"""Appends the given unit to the object's list of units.
This method should always be used rather than trying to modify the
list manually.
@type unit: L{TranslationUnit}
@param unit: The unit that will be added.
"""
unit._store = self
self.units.append(unit)
def addsourceunit(self, source):
"""Adds and returns a new unit with the given source string.
@rtype: L{TranslationUnit}
"""
unit = self.UnitClass(source)
self.addunit(unit)
return unit
def findid(self, id):
"""find unit with matching id by checking id_index"""
self.require_index()
return self.id_index.get(id, None)
def findunit(self, source):
"""Finds the unit with the given source string.
@rtype: L{TranslationUnit} or None
"""
if len(getattr(self, "sourceindex", [])):
if source in self.sourceindex:
return self.sourceindex[source][0]
else:
for unit in self.units:
if unit.source == source:
return unit
return None
def findunits(self, source):
"""Finds the units with the given source string.
@rtype: L{TranslationUnit} or None
"""
if len(getattr(self, "sourceindex", [])):
if source in self.sourceindex:
return self.sourceindex[source]
else:
#FIXME: maybe we should generate index here instead since
#we'll scan all units anyway
result = []
for unit in self.units:
if unit.source == source:
result.append(unit)
return result
return None
def translate(self, source):
"""Returns the translated string for a given source string.
@rtype: String or None
"""
unit = self.findunit(source)
if unit and unit.target:
return unit.target
else:
return None
def remove_unit_from_index(self, unit):
"""Remove a unit from source and locaton indexes"""
def remove_unit(source):
if source in self.sourceindex:
try:
self.sourceindex[source].remove(unit)
if len(self.sourceindex[source]) == 0:
del(self.sourceindex[source])
except ValueError:
pass
if unit.hasplural():
for source in unit.source.strings:
remove_unit(source)
else:
remove_unit(unit.source)
for location in unit.getlocations():
if location in self.locationindex and self.locationindex[location] is not None \
and self.locationindex[location] == unit:
del(self.locationindex[location])
def add_unit_to_index(self, unit):
"""Add a unit to source and location idexes"""
self.id_index[unit.getid()] = unit
def insert_unit(source):
if not source in self.sourceindex:
self.sourceindex[source] = [unit]
else:
self.sourceindex[source].append(unit)
if unit.hasplural():
for source in unit.source.strings:
insert_unit(source)
else:
insert_unit(unit.source)
for location in unit.getlocations():
if location in self.locationindex:
# if sources aren't unique, don't use them
#FIXME: maybe better store a list of units like sourceindex
self.locationindex[location] = None
else:
self.locationindex[location] = unit
def makeindex(self):
"""Indexes the items in this store. At least .sourceindex should be usefull."""
self.locationindex = {}
self.sourceindex = {}
self.id_index = {}
for index, unit in enumerate(self.units):
unit.index = index
if unit.istranslatable():
self.add_unit_to_index(unit)
def require_index(self):
"""make sure source index exists"""
if not hasattr(self, "sourceindex"):
self.makeindex()
def getids(self):
"""return a list of unit ids"""
self.require_index()
return self.id_index.keys()
def __getstate__(self):
odict = self.__dict__.copy()
odict['fileobj'] = None
return odict
def __setstate__(self, dict):
self.__dict__.update(dict)
if getattr(self, "filename", False):
self.fileobj = open(self.filename)
def __str__(self):
"""Converts to a string representation that can be parsed back using L{parsestring()}."""
# We can't pickle fileobj if it is there, so let's hide it for a while.
fileobj = getattr(self, "fileobj", None)
self.fileobj = None
dump = pickle.dumps(self)
self.fileobj = fileobj
return dump
def isempty(self):
"""Returns True if the object doesn't contain any translation units."""
if len(self.units) == 0:
return True
for unit in self.units:
if unit.istranslatable():
return False
return True
def _assignname(self):
"""Tries to work out what the name of the filesystem file is and
assigns it to .filename."""
fileobj = getattr(self, "fileobj", None)
if fileobj:
filename = getattr(fileobj, "name", getattr(fileobj, "filename", None))
if filename:
self.filename = filename
def parsestring(cls, storestring):
"""Converts the string representation back to an object."""
newstore = cls()
if storestring:
newstore.parse(storestring)
return newstore
parsestring = classmethod(parsestring)
def parse(self, data):
"""parser to process the given source string"""
self.units = pickle.loads(data).units
def savefile(self, storefile):
"""Writes the string representation to the given file (or filename)."""
if isinstance(storefile, basestring):
mode = 'w'
if self._binary:
mode = 'wb'
storefile = open(storefile, mode)
self.fileobj = storefile
self._assignname()
storestring = str(self)
storefile.write(storestring)
storefile.close()
def save(self):
"""Save to the file that data was originally read from, if available."""
fileobj = getattr(self, "fileobj", None)
mode = 'w'
if self._binary:
mode = 'wb'
if not fileobj:
filename = getattr(self, "filename", None)
if filename:
fileobj = file(filename, mode)
else:
fileobj.close()
filename = getattr(fileobj, "name", getattr(fileobj, "filename", None))
if not filename:
raise ValueError("No file or filename to save to")
fileobj = fileobj.__class__(filename, mode)
self.savefile(fileobj)
def parsefile(cls, storefile):
"""Reads the given file (or opens the given filename) and parses back to an object."""
mode = 'r'
if cls._binary:
mode = 'rb'
if isinstance(storefile, basestring):
storefile = open(storefile, mode)
mode = getattr(storefile, "mode", mode)
#For some reason GzipFile returns 1, so we have to test for that here
if mode == 1 or "r" in mode:
storestring = storefile.read()
storefile.close()
else:
storestring = ""
newstore = cls.parsestring(storestring)
newstore.fileobj = storefile
newstore._assignname()
return newstore
parsefile = classmethod(parsefile)
| 34.209302 | 97 | 0.613443 | 2,795 | 23,536 | 5.088372 | 0.194991 | 0.008438 | 0.00443 | 0.007031 | 0.20187 | 0.160245 | 0.126916 | 0.113275 | 0.104556 | 0.076079 | 0 | 0.001928 | 0.29491 | 23,536 | 687 | 98 | 34.259098 | 0.855077 | 0.337313 | 0 | 0.307479 | 0 | 0 | 0.034779 | 0 | 0 | 0 | 0 | 0.004367 | 0 | 1 | 0.204986 | false | 0.022161 | 0.019391 | 0.00831 | 0.412742 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da296592fb077c1bc7a27382f8604a31a8ab30e3 | 520 | py | Python | task2C.py | jfs60/Group-147-PartIA-Flood-Warning-System | 3fb52e3e028ec8e0b70ccb1cfc61bcf76b42f2c1 | [
"MIT"
] | null | null | null | task2C.py | jfs60/Group-147-PartIA-Flood-Warning-System | 3fb52e3e028ec8e0b70ccb1cfc61bcf76b42f2c1 | [
"MIT"
] | null | null | null | task2C.py | jfs60/Group-147-PartIA-Flood-Warning-System | 3fb52e3e028ec8e0b70ccb1cfc61bcf76b42f2c1 | [
"MIT"
] | 1 | 2022-02-06T06:45:15.000Z | 2022-02-06T06:45:15.000Z | from floodsystem.station import MonitoringStation
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level, stations_level_over_threshold
def run ():
stations = build_station_list()
update_water_levels(stations)
list = stations_highest_rel_level(stations, 9)
return(list)
stations_Task_2C = run()
print (stations_Task_2C)
if __name__ == "__main__":
print("*** Task 2C: CUED Part IA Flood Warning System ***")
run() | 30.588235 | 87 | 0.775 | 67 | 520 | 5.58209 | 0.492537 | 0.120321 | 0.085562 | 0.117647 | 0.342246 | 0.176471 | 0 | 0 | 0 | 0 | 0 | 0.008989 | 0.144231 | 520 | 17 | 88 | 30.588235 | 0.831461 | 0 | 0 | 0 | 0 | 0 | 0.111324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.307692 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da2e43a4657d302992b18fd2e6651b3dd93dac4f | 6,112 | py | Python | docs/examples/viz_emwave_animation.py | iamansoni/fury | 2e7971a176c2540e10a9a6da861097583d08cb4a | [
"BSD-3-Clause"
] | 149 | 2018-09-20T18:36:16.000Z | 2022-03-29T05:16:25.000Z | docs/examples/viz_emwave_animation.py | iamansoni/fury | 2e7971a176c2540e10a9a6da861097583d08cb4a | [
"BSD-3-Clause"
] | 523 | 2018-09-20T16:57:16.000Z | 2022-03-31T18:52:41.000Z | docs/examples/viz_emwave_animation.py | iamansoni/fury | 2e7971a176c2540e10a9a6da861097583d08cb4a | [
"BSD-3-Clause"
] | 150 | 2018-10-10T07:21:27.000Z | 2022-03-29T08:33:17.000Z | """
===============================================
Electromagnetic Wave Propagation Animation
===============================================
A linearly polarized sinusoidal electromagnetic wave, propagating in the
direction +x through a homogeneous, isotropic, dissipationless medium,
such as vacuum. The electric field (blue arrows) oscillates in the
±z-direction, and the orthogonal magnetic field (red arrows) oscillates in
phase with the electric field, but in the ±y-direction.
Function of the sinusoid used in the animation = sin(k*x - w*t + d)
Where, k:wavenumber, x:abscissa, w:angular frequency, t:time, d:phase angle
Importing necessary modules
"""
from fury import window, actor, utils, ui
import numpy as np
import itertools
###############################################################################
# function that updates and returns the coordinates of the waves which are
# changing with time
def update_coordinates(wavenumber, ang_frq, time, phase_angle):
x = np.linspace(-3, 3, npoints)
y = np.sin(wavenumber*x - ang_frq*time + phase_angle)
z = np.array([0 for i in range(npoints)])
return x, y, z
###############################################################################
# Variable(s) and their description-
# npoints: For high quality rendering, keep the number of npoints high
# but kindly note that higher values for npoints will slow down the
# rendering process (default = 800)
# wavelength : wavelength of the wave (default = 2)
# wavenumber : 2*pi/wavelength
# time: time (default time i.e. time at beginning of the animation = 0)
# incre_time: value by which time is incremented for each call of
# timer_callback (default = 0.1)
# angular_frq: angular frequency (default = 0.1)
# phase_angle: phase angle (default = 0.002)
npoints = 800
wavelength = 2
wavenumber = 2*np.pi/wavelength
time = 0
incre_time = 0.1
angular_frq = 0.1
phase_angle = 0.002
###############################################################################
# Creating a scene object and configuring the camera's position
scene = window.Scene()
scene.set_camera(position=(-6, 5, -10), focal_point=(0.0, 0.0, 0.0),
view_up=(0.0, 0.0, 0.0))
showm = window.ShowManager(scene,
size=(800, 600), reset_camera=True,
order_transparent=True)
showm.initialize()
###############################################################################
# Creating a yellow colored arrow to show the direction of propagation of
# electromagnetic wave
centers = np.array([[3, 0, 0]])
directions = np.array([[-1, 0, 0]])
heights = np.array([6.4])
arrow_actor = actor.arrow(centers, directions, window.colors.yellow, heights,
resolution=20, tip_length=0.06, tip_radius=0.012,
shaft_radius=0.005)
scene.add(arrow_actor)
###############################################################################
# Creating point actor that renders the magnetic field
x = np.linspace(-3, 3, npoints)
y = np.sin(wavenumber*x - angular_frq*time + phase_angle)
z = np.array([0 for i in range(npoints)])
pts = np.array([(a, b, c) for (a, b, c) in zip(x, y, z)])
pts = [pts]
colors = window.colors.red
wave_actor1 = actor.line(pts, colors, linewidth=3)
scene.add(wave_actor1)
vertices = utils.vertices_from_actor(wave_actor1)
vcolors = utils.colors_from_actor(wave_actor1, 'colors')
no_vertices_per_point = len(vertices)/npoints
initial_vertices = vertices.copy() - \
np.repeat(pts, no_vertices_per_point, axis=0)
###############################################################################
# Creating point actor that renders the electric field
xx = np.linspace(-3, 3, npoints)
yy = np.array([0 for i in range(npoints)])
zz = np.sin(wavenumber*xx - angular_frq*time + phase_angle)
pts2 = np.array([(a, b, c) for (a, b, c) in zip(xx, yy, zz)])
pts2 = [pts2]
colors2 = window.colors.blue
wave_actor2 = actor.line(pts2, colors2, linewidth=3)
scene.add(wave_actor2)
vertices2 = utils.vertices_from_actor(wave_actor2)
vcolors2 = utils.colors_from_actor(wave_actor2, 'colors')
no_vertices_per_point2 = len(vertices2)/npoints
initial_vertices2 = vertices2.copy() - \
np.repeat(pts2, no_vertices_per_point2, axis=0)
###############################################################################
# Initializing text box to display the title of the animation
tb = ui.TextBlock2D(bold=True, position=(160, 90))
tb.message = "Electromagnetic Wave"
scene.add(tb)
###############################################################################
# end is used to decide when to end the animation
end = 300
###############################################################################
# Initializing counter
counter = itertools.count()
###############################################################################
# Coordinates to be plotted are changed everytime timer_callback is called by
# using the update_coordinates function. The wave is rendered here.
def timer_callback(_obj, _event):
global pts, pts2, time, time_incre, angular_frq, phase_angle, wavenumber
time += incre_time
cnt = next(counter)
x, y, z = update_coordinates(wavenumber, angular_frq, phase_angle, time)
pts = np.array([(a, b, c) for (a, b, c) in zip(x, y, z)])
vertices[:] = initial_vertices + \
np.repeat(pts, no_vertices_per_point, axis=0)
utils.update_actor(wave_actor1)
xx, zz, yy = update_coordinates(wavenumber, angular_frq, phase_angle, time)
pts2 = np.array([(a, b, c) for (a, b, c) in zip(xx, yy, zz)])
vertices2[:] = initial_vertices2 + \
np.repeat(pts2, no_vertices_per_point2, axis=0)
utils.update_actor(wave_actor2)
showm.render()
# to end the animation
if cnt == end:
showm.exit()
###############################################################################
# Run every 25 milliseconds
showm.add_timer_callback(True, 25, timer_callback)
interactive = False
if interactive:
showm.start()
window.record(showm.scene, size=(800, 600), out_path="viz_emwave.png")
| 34.925714 | 79 | 0.59375 | 786 | 6,112 | 4.507634 | 0.301527 | 0.006774 | 0.006774 | 0.006774 | 0.248095 | 0.186847 | 0.154107 | 0.154107 | 0.117979 | 0.078465 | 0 | 0.026153 | 0.155432 | 6,112 | 174 | 80 | 35.126437 | 0.659822 | 0.310046 | 0 | 0.146341 | 0 | 0 | 0.013872 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.036585 | 0 | 0.073171 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da2f601feb319bbef64c8038bd332c6cea544cb4 | 3,243 | py | Python | report_templates.py | averlarque/l1-zabbix-reporter | 5d8ea4d432b7b518f954d806a86fe5bcafca3f9d | [
"MIT"
] | 1 | 2017-03-27T02:59:09.000Z | 2017-03-27T02:59:09.000Z | report_templates.py | averlarque/l1-zabbix-reporter | 5d8ea4d432b7b518f954d806a86fe5bcafca3f9d | [
"MIT"
] | 1 | 2018-01-16T04:56:16.000Z | 2018-01-16T04:56:16.000Z | report_templates.py | averlarque/l1-zabbix-reporter | 5d8ea4d432b7b518f954d806a86fe5bcafca3f9d | [
"MIT"
] | 2 | 2016-11-24T07:06:51.000Z | 2019-11-16T15:12:54.000Z | from report_generator import *
class PeriodReport:
"""
Parent class for time periods reports
"""
def __init__(self, since, till, report_format='count', report_type='txt'):
# Define time limits
self.since = since
self.till = till
self.report_type = report_type
self.report_format = report_format
# Generate a title for a report
self.report_name = self.get_report_name(self.report_format + '_report_all')
# According to the db_path and redefinition of child classes
self.report_class = self.choose_report_class()
# Generating the data for the report
self.report_data = self.report_class.generate_report_data()
# For further generating a report the self.generate_report() should be called
def get_report_name(self, slug):
time_format = '%H.%M_%d%m%y'
since = self.since.strftime(time_format)
till = self.till.strftime(time_format)
time_alias = since + '-' + till
report_name = slug + '(' + time_alias + ')'
return report_name
def choose_report_class(self):
if self.report_format == 'count':
report_class = CountPeriodReport(self.since, self.till)
elif self.report_format == 'event':
report_class = EventPeriodReport(self.since, self.till)
else:
report_class = CountPeriodReport(self.since, self.till)
return report_class
def generate_report(self):
"""
Main reporting function
:return: None
"""
if self.report_type == 'txt':
self.report_class.create_txt_report(self.report_data, self.report_name)
elif self.report_type == 'html':
self.report_class.create_html_report(self.report_data, self.report_name)
else:
self.report_class.create_txt_report(self.report_data, self.report_name)
class ProjectPeriodReport(PeriodReport):
def __init__(self, since, till, project, report_format='count', report_type='txt'):
self.project = project
super().__init__(since, till, report_format=report_format, report_type=report_type)
# Redefines report name according to the sibling class alias
self.report_name = self.get_report_name(self.report_format + '_' + self.project + '_project_report')
def choose_report_class(self):
if self.report_format == 'count':
report_class = ProjectCountPeriodReport(self.since, self.till, self.project)
elif self.report_format == 'event':
report_class = ProjectEventPeriodReport(self.since, self.till, self.project)
else:
report_class = ProjectCountPeriodReport(self.since, self.till, self.project)
return report_class
class ItemPeriodReport(PeriodReport):
def __init__(self, since, till, item, report_format='count', report_type='txt'):
self.item = item
super().__init__(since, till, report_format=report_format, report_type=report_type)
# Redefines report name according to the sibling class alias
self.report_name = self.get_report_name(self.report_format + '_' + self.item + '_item_report')
def choose_report_class(self):
if self.report_format == 'count':
report_class = ItemCountPeriodReport(self.since, self.till, self.item)
elif self.report_format == 'event':
report_class = ItemEventPeriodReport(self.since, self.till, self.item)
self.report_name = 'event_' + self.report_name
else:
report_class = ItemCountPeriodReport(self.since, self.till, self.item)
return report_class
| 38.152941 | 102 | 0.755782 | 442 | 3,243 | 5.266968 | 0.171946 | 0.128866 | 0.055842 | 0.065722 | 0.592354 | 0.583763 | 0.507732 | 0.383591 | 0.383591 | 0.289948 | 0 | 0 | 0.136602 | 3,243 | 84 | 103 | 38.607143 | 0.831429 | 0.127351 | 0 | 0.440678 | 0 | 0 | 0.043556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135593 | false | 0 | 0.016949 | 0 | 0.271186 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da32e41f6a6f935279a78a5bb3c83c1544bb8fec | 494 | py | Python | CodeWars/Python/EvenTimesLast.py | BobbyRobillard/CodingChallenges | 71d5ca0b7f7c470c547d858dde7a799ce7d0d1a0 | [
"MIT"
] | null | null | null | CodeWars/Python/EvenTimesLast.py | BobbyRobillard/CodingChallenges | 71d5ca0b7f7c470c547d858dde7a799ce7d0d1a0 | [
"MIT"
] | null | null | null | CodeWars/Python/EvenTimesLast.py | BobbyRobillard/CodingChallenges | 71d5ca0b7f7c470c547d858dde7a799ce7d0d1a0 | [
"MIT"
] | null | null | null | # First Correct Solution #
def even_last(numbers):
return (
0
if len(numbers) == 0
else sum(numbers[x] for x in range(0, len(numbers), 2))
* numbers[len(numbers) - 1]
)
# REFACTORED Solution #
def even_last(numbers):
return sum(numbers[::2]) * numbers[-1] if numbers else 0
# EXAMPLE AND TESTING #
print("\nInput: {0}\nEven Times Last: {1}".format("[2,3,4,5]", even_last([2, 3, 4, 5])))
assert even_last([2, 3, 4, 5]) == 30 # Simple Unit Tests
| 24.7 | 88 | 0.595142 | 77 | 494 | 3.766234 | 0.467532 | 0.110345 | 0.031034 | 0.041379 | 0.303448 | 0.303448 | 0 | 0 | 0 | 0 | 0 | 0.063492 | 0.234818 | 494 | 19 | 89 | 26 | 0.703704 | 0.168016 | 0 | 0.181818 | 0 | 0 | 0.1067 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.181818 | false | 0 | 0 | 0.181818 | 0.363636 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
da35f10f0968905caacfa6b0d419287aa50e3f84 | 603 | py | Python | scatter.py | Deego88/pands-project | 89f0baad690b0772502d5d336c9fd56ad5470cdc | [
"MIT"
] | null | null | null | scatter.py | Deego88/pands-project | 89f0baad690b0772502d5d336c9fd56ad5470cdc | [
"MIT"
] | null | null | null | scatter.py | Deego88/pands-project | 89f0baad690b0772502d5d336c9fd56ad5470cdc | [
"MIT"
] | null | null | null | # Third output required for project- A scatter plot of the output of the variables
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
# First output required for project- Read iris CSV file to into DataFrame
iris = pd.read_csv("iris.csv")
sns.set_style("darkgrid")
sns.pairplot(iris, hue="species", height=2, markers= ["o","s", "D"]) #hue distinguished by species
plt.show()
# Fit a linear regression line to the scatter plots
sns.pairplot(iris, kind="reg")
plt.show()
#References
#1.seaborn.pairplot ,https://seaborn.pydata.org/generated/seaborn.pairplot.html | 35.470588 | 99 | 0.739635 | 94 | 603 | 4.723404 | 0.606383 | 0.063063 | 0.076577 | 0.108108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003899 | 0.149254 | 603 | 17 | 100 | 35.470588 | 0.861598 | 0.529022 | 0 | 0.222222 | 0 | 0 | 0.110687 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
da36cd3ef7bb152a5c978fa2a7968dad8b22592a | 28 | py | Python | image_predictor/__init__.py | shakedlokits/pyArt | ffac65e91b7f97f0fa95f4ee7bfffcff4f6249aa | [
"Unlicense"
] | 3 | 2017-03-26T16:42:18.000Z | 2021-12-30T06:28:34.000Z | image_predictor/__init__.py | shakedlokits/pyArt | ffac65e91b7f97f0fa95f4ee7bfffcff4f6249aa | [
"Unlicense"
] | null | null | null | image_predictor/__init__.py | shakedlokits/pyArt | ffac65e91b7f97f0fa95f4ee7bfffcff4f6249aa | [
"Unlicense"
] | null | null | null | __author__ = 'shakedlokits'
| 14 | 27 | 0.785714 | 2 | 28 | 9 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 28 | 1 | 28 | 28 | 0.72 | 0 | 0 | 0 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
da378e0eafeab07a79465181d9fc51e82389ac0a | 6,674 | py | Python | officevideo/officevideo.py | introp-software/xblock-officevideo | 6e475df782a4b0a2d2d9f7e2e5b9bae441b56024 | [
"MIT"
] | 4 | 2020-02-09T09:39:06.000Z | 2021-12-30T09:50:57.000Z | officevideo/officevideo.py | acidburn0zzz/xblock-officevideo | 6e475df782a4b0a2d2d9f7e2e5b9bae441b56024 | [
"MIT"
] | null | null | null | officevideo/officevideo.py | acidburn0zzz/xblock-officevideo | 6e475df782a4b0a2d2d9f7e2e5b9bae441b56024 | [
"MIT"
] | 8 | 2019-11-02T21:34:20.000Z | 2021-12-30T09:50:59.000Z | """ Copyright (c) Microsoft Corporation. All Rights Reserved. """
""" Licensed under the MIT license. See LICENSE file on the project webpage for details. """
import textwrap
import pkg_resources
import urllib2
import mimetypes
import urlparse, requests, json
import xml.etree.ElementTree as ET
from xblock.core import XBlock
from xblock.fragment import Fragment
from xblock.fields import Scope, String
from django.conf import settings
from django.contrib.auth.models import User
from social.apps.django_app.utils import load_strategy
import logging
LOG = logging.getLogger(__name__)
import time
import re
from urlparse import parse_qs, urlsplit, urlunsplit
from urllib import urlencode
"""test url: https://wwedudemo17.sharepoint.com/portals/hub/_layouts/15/PointPublishing.aspx?app=video&p=p&chid=4fe89746-6fd9-4a2b-9a42-ea41c5853a53&vid=70113d75-9a34-494a-972d-dc498c12168f """
"""
<iframe width=640 height=360
src='https://wwedudemo17.sharepoint.com/portals/hub/_layouts/15/VideoEmbedHost.aspx?chId=4fe89746%2D6fd9%2D4a2b%2D9a42%2Dea41c5853a53&vId=70113d75%2D9a34%2D494a%2D972d%2Ddc498c12168f&width=640&height=360&autoPlay=false&showInfo=true' allowfullscreen></iframe>
"""
DEFAULT_VIDEO_URL = ('https://www.youtube.com/embed/uXsJ_9lQubc')
class OfficeVideoXBlock(XBlock):
EMBED_CODE_TEMPLATE = textwrap.dedent("""
<iframe
src="{}"
width="640"
height="360"
allowfullscreen>
</iframe>
""")
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
scope=Scope.settings,
default="OfficeVideo",
)
video_url = String(
display_name="Video URL",
help="Navigate to the video in your browser and ensure that it is accessible to your intended audience. Copy its URL or embed code and paste it into this field.",
scope=Scope.settings,
default=EMBED_CODE_TEMPLATE.format(DEFAULT_VIDEO_URL)
)
output_code = String(
display_name="Output Iframe Embed Code",
help="Copy the embed code into this field.",
scope=Scope.settings,
default=EMBED_CODE_TEMPLATE.format(DEFAULT_VIDEO_URL)
)
message = String(
display_name="video display status message",
help="Message to help students in case of errors.",
scope=Scope.settings,
default=""
)
message_display_state = String(
display_name="Whether to display the status message",
help="Determines whether to display the message to help students in case of errors.",
scope=Scope.settings,
default="block"
)
def resource_string(self, path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def student_view(self, context=None):
"""
The primary view of the OfficeVideoXBlock, shown to students
when viewing courses.
"""
embed_code = self.output_code
if embed_code == '':
embed_code = self.get_officevideo_embed_code(officevideo_url=self.video_url)
html = self.resource_string("static/html/officevideo.html")
frag = Fragment(html.format(embed_code=embed_code, message=self.message, message_display_state=self.message_display_state))
frag.add_css(self.resource_string("static/css/officevideo.css"))
frag.add_javascript(self.resource_string("static/js/src/officevideo.js"))
frag.initialize_js('OfficeVideoXBlock')
return frag
def studio_view(self, context=None):
"""
he primary view of the OfficeVideoXBlock, shown to teachers
when viewing courses.
"""
html = self.resource_string("static/html/officevideo_edit.html")
frag = Fragment(html.format(self=self))
frag.add_css(self.resource_string("static/css/officevideo.css"))
frag.add_javascript(self.resource_string("static/js/src/officevideo_edit.js"))
frag.initialize_js('OfficeVideoXBlock')
return frag
@XBlock.json_handler
def studio_submit(self, submissions, suffix=''): # pylint: disable=unused-argument
"""
Change the settings for this XBlock given by the Studio user
"""
if not isinstance(submissions, dict):
LOG.error("submissions object from Studio is not a dict - %r", submissions)
return {
'result': 'error'
}
self.display_name = submissions['display_name']
self.video_url = submissions['video_url']
# Check if user have entered embed code
embed_code_regex = '<iframe '
matched = re.match(embed_code_regex, self.video_url, re.IGNORECASE)
if matched is not None:
self.output_code = self.video_url
else:
self.output_code = ''
self.message = ""
self.message_display_state = "block"
return {'result': 'success'}
def get_officevideo_embed_code(self, officevideo_url):
embed_code = ''
try:
django_user_social = User.objects.get(id=self.xmodule_runtime.user_id).social_auth.get(provider='azuread-oauth2')
if int(django_user_social.extra_data['expires_on']) < int(time.time()):
django_user_social.refresh_token(load_strategy())
django_user_social = User.objects.get(id=self.xmodule_runtime.user_id).social_auth.get(provider='azuread-oauth2')
url = self.video_url
parsed = urlparse.urlparse(url)
query_params = urlparse.parse_qs(parsed.query)
resp = requests.get("https://" + parsed.netloc + "/portals/hub/_api/VideoService/Channels('" + query_params['chid'][0] + "')/Videos('" + query_params['vid'][0] + "')/GetVideoEmbedCode",
headers={'Authorization': 'Bearer ' + django_user_social.tokens,
'Content-Type': 'application/json;odata=verbose'})
root = ET.fromstring(resp._content)
embed_code = unicode(root.text, "utf-8")
except:
embed_code = '<a target="_blank" href="'+ officevideo_url +'">Office 365 Video</a>'
return embed_code
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("OfficeVideoXBlock",
"""<vertical_demo>
<officevideo/>
<officevideo/>
</vertical_demo>
"""),
]
| 38.578035 | 284 | 0.657177 | 791 | 6,674 | 5.384324 | 0.353982 | 0.042263 | 0.025358 | 0.033811 | 0.251702 | 0.239493 | 0.239493 | 0.179385 | 0.156844 | 0.156844 | 0 | 0.026491 | 0.23644 | 6,674 | 172 | 285 | 38.802326 | 0.809262 | 0.067726 | 0 | 0.125 | 0 | 0.008333 | 0.234288 | 0.045556 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.141667 | 0 | 0.308333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da37fa1cf8aa9ced7ef291cee98575d2cbc3ace4 | 4,594 | py | Python | stn/task.py | anenriquez/STNU | a02a13730cc0f31521e01e186c158533479090f5 | [
"Unlicense"
] | null | null | null | stn/task.py | anenriquez/STNU | a02a13730cc0f31521e01e186c158533479090f5 | [
"Unlicense"
] | null | null | null | stn/task.py | anenriquez/STNU | a02a13730cc0f31521e01e186c158533479090f5 | [
"Unlicense"
] | null | null | null | from stn.utils.as_dict import AsDictMixin
class Edge(AsDictMixin):
def __init__(self, name, mean, variance, **kwargs):
self.name = name
self.mean = round(mean, 3)
self.variance = round(variance, 3)
self.standard_dev = round(variance ** 0.5, 3)
def __str__(self):
to_print = ""
to_print += "{}: N({}, {})".format(self.name, self.mean, self.standard_dev)
return to_print
def __sub__(self, other):
# Difference of two independent random variables
mean = self.mean - other.mean
variance = self.variance + other.variance
return mean, variance
def __add__(self, other):
# Addition of two independent random variables
mean = self.mean + other.mean
variance = self.variance + other.variance
return mean, variance
class Timepoint(AsDictMixin):
"""
r_earliest_time (float): earliest time relative to a ztp
r_latest_time (float): latest time relative to a ztp
"""
def __init__(self, name, r_earliest_time, r_latest_time, **kwargs):
self.name = name
self.r_earliest_time = round(r_earliest_time, 3)
self.r_latest_time = round(r_latest_time, 3)
def __str__(self):
to_print = ""
to_print += "{}: [{}, {}]".format(self.name, self.r_earliest_time, self.r_latest_time)
return to_print
class Task(AsDictMixin):
def __init__(self, task_id, timepoints, edges, pickup_action_id, delivery_action_id):
""" Constructor for the Task object
Args:
task_id (UUID): An instance of an UUID object
timepoints (list): list of timepoints (Timepoints)
Edges (list): list of edges (Edges)
pickup_action_id (UUID): Action id of the pickup action
delivery_action_id (UUID): Action id of te delivery action
"""
self.task_id = task_id
self.timepoints = timepoints
self.edges = edges
self.pickup_action_id = pickup_action_id
self.delivery_action_id = delivery_action_id
def __str__(self):
to_print = ""
to_print += "{} \n".format(self.task_id)
to_print += "Timepoints: \n"
for timepoint in self.timepoints:
to_print += str(timepoint) + "\t"
to_print += "\n Edges: \n"
for edge in self.edges:
to_print += str(edge) + "\t"
to_print += "\n Pickup action:" + str(self.pickup_action_id)
to_print += "\n Delivery action:" + str(self.delivery_action_id)
return to_print
def get_timepoint(self, timepoint_name):
for timepoint in self.timepoints:
if timepoint.name == timepoint_name:
return timepoint
def get_edge(self, edge_name):
for edge in self.edges:
if edge.name == edge_name:
return edge
def update_timepoint(self, timepoint_name, r_earliest_time, r_latest_time=float('inf')):
in_list = False
for timepoint in self.timepoints:
if timepoint.name == timepoint_name:
in_list = True
timepoint.r_earliest_time = r_earliest_time
timepoint.r_latest_time = r_latest_time
if not in_list:
self.timepoints.append(Timepoint(timepoint_name, r_earliest_time, r_latest_time))
def update_edge(self, edge_name, mean, variance):
in_list = False
for edge in self.edges:
if edge.name == edge_name:
in_list = True
edge.mean = round(mean, 3)
edge.variance = round(variance, 3)
edge.standard_dev = round(variance ** 0.5, 3)
if not in_list:
self.edges.append(Edge(name=edge_name, mean=mean, variance=variance))
def to_dict(self):
dict_repr = super().to_dict()
timepoints = list()
edges = list()
for t in self.timepoints:
timepoints.append(t.to_dict())
for e in self.edges:
edges.append(e.to_dict())
dict_repr.update(timepoints=timepoints)
dict_repr.update(edges=edges)
return dict_repr
@classmethod
def to_attrs(cls, dict_repr):
attrs = super().to_attrs(dict_repr)
timepoints = list()
edges = list()
for t in attrs.get("timepoints"):
timepoints.append(Timepoint.from_dict(t))
for e in attrs.get("edges"):
edges.append(Edge.from_dict(e))
attrs.update(timepoints=timepoints)
attrs.update(edges=edges)
return attrs
| 34.80303 | 94 | 0.60579 | 581 | 4,594 | 4.538726 | 0.137694 | 0.039818 | 0.044369 | 0.021236 | 0.372014 | 0.28176 | 0.265074 | 0.212363 | 0.174441 | 0.174441 | 0 | 0.003706 | 0.295168 | 4,594 | 131 | 95 | 35.068702 | 0.810686 | 0.110797 | 0 | 0.360825 | 0 | 0 | 0.028557 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.14433 | false | 0 | 0.010309 | 0 | 0.278351 | 0.154639 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da382dde5d81096600a758eea608666b31d3c7b7 | 2,959 | py | Python | src/bxgateway/messages/btc/data_btc_message.py | blockchain-development-resources/bxgateway | 761b5085f9c7c6527f0b9aaae06d2f70f3786db2 | [
"MIT"
] | 1 | 2021-11-26T07:49:24.000Z | 2021-11-26T07:49:24.000Z | src/bxgateway/messages/btc/data_btc_message.py | beepool/bxgateway | 761b5085f9c7c6527f0b9aaae06d2f70f3786db2 | [
"MIT"
] | null | null | null | src/bxgateway/messages/btc/data_btc_message.py | beepool/bxgateway | 761b5085f9c7c6527f0b9aaae06d2f70f3786db2 | [
"MIT"
] | 1 | 2021-09-06T02:10:08.000Z | 2021-09-06T02:10:08.000Z | import struct
from bxgateway.btc_constants import BTC_HDR_COMMON_OFF, BTC_SHA_HASH_LEN
from bxgateway.messages.btc.btc_message import BtcMessage
from bxgateway.messages.btc.btc_message_type import BtcMessageType
from bxgateway.messages.btc.btc_messages_util import btc_varint_to_int, pack_int_to_btc_varint
from bxgateway.utils.btc.btc_object_hash import BtcObjectHash
class DataBtcMessage(BtcMessage):
def __init__(self, magic=None, version=None, hashes=None, hash_stop=None, command=None, buf=None):
if hashes is None:
hashes = []
if buf is None:
buf = bytearray(BTC_HDR_COMMON_OFF + 9 + (len(hashes) + 1) * 32)
self.buf = buf
off = BTC_HDR_COMMON_OFF
struct.pack_into("<I", buf, off, version)
off += 4
off += pack_int_to_btc_varint(len(hashes), buf, off)
for hash_val in hashes:
buf[off:off + 32] = hash_val.get_big_endian()
off += 32
buf[off:off + 32] = hash_stop.get_big_endian()
off += 32
BtcMessage.__init__(self, magic, command, off - BTC_HDR_COMMON_OFF, buf)
else:
self.buf = buf
self._memoryview = memoryview(buf)
self._magic = self._command = self._payload_len = self._checksum = None
self._payload = None
self._version = self._hash_count = self._hashes = self._hash_stop = None
def version(self):
if self._version is None:
self._version, = struct.unpack_from("<I", self.buf, BTC_HDR_COMMON_OFF)
return self._version
def hash_count(self):
if self._hash_count is None:
off = BTC_HDR_COMMON_OFF + 4
self._hash_count, size = btc_varint_to_int(self.buf, off)
return self._hash_count
def __iter__(self):
off = BTC_HDR_COMMON_OFF + 4 # For the version field.
b_count, size = btc_varint_to_int(self.buf, off)
off += size
for i in range(b_count):
yield BtcObjectHash(buf=self.buf, offset=off, length=BTC_SHA_HASH_LEN)
off += 32
def hash_stop(self):
return BtcObjectHash(buf=self.buf, offset=BTC_HDR_COMMON_OFF + self.payload_len() - 32, length=BTC_SHA_HASH_LEN)
class GetHeadersBtcMessage(DataBtcMessage):
MESSAGE_TYPE = BtcMessageType.GET_HEADERS
def __init__(self, magic=None, version=None, hashes=None, hash_stop=None, buf=None):
if hashes is None:
hashes = []
super(GetHeadersBtcMessage, self).__init__(magic, version, hashes, hash_stop, self.MESSAGE_TYPE, buf)
class GetBlocksBtcMessage(DataBtcMessage):
MESSAGE_TYPE = BtcMessageType.GET_BLOCKS
def __init__(self, magic=None, version=None, hashes=None, hash_stop=None, buf=None):
if hashes is None:
hashes = []
super(GetBlocksBtcMessage, self).__init__(magic, version, hashes, hash_stop, self.MESSAGE_TYPE, buf)
| 36.9875 | 120 | 0.657993 | 396 | 2,959 | 4.580808 | 0.184343 | 0.026461 | 0.052922 | 0.066152 | 0.481257 | 0.292172 | 0.233738 | 0.233738 | 0.216648 | 0.180265 | 0 | 0.00857 | 0.25076 | 2,959 | 79 | 121 | 37.455696 | 0.809653 | 0.007435 | 0 | 0.254237 | 0 | 0 | 0.001363 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.118644 | false | 0 | 0.101695 | 0.016949 | 0.355932 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da39df4b22e6c016cc25f0ba477072a025a6794f | 8,188 | py | Python | alexber/utils/_ymlparsers_extra.py | AndreyRub/AlexBerUtils | b2d79c98c083533534470b62632a36dfd730be48 | [
"BSD-2-Clause"
] | null | null | null | alexber/utils/_ymlparsers_extra.py | AndreyRub/AlexBerUtils | b2d79c98c083533534470b62632a36dfd730be48 | [
"BSD-2-Clause"
] | 8 | 2019-12-15T05:13:27.000Z | 2021-02-16T20:03:40.000Z | alexber/utils/_ymlparsers_extra.py | AndreyRub/AlexBerUtils | b2d79c98c083533534470b62632a36dfd730be48 | [
"BSD-2-Clause"
] | 2 | 2019-12-12T03:52:37.000Z | 2021-05-21T21:14:34.000Z | """
This module adopts its behavior dependent on availability of Python packages.
This module optionally depends on ymlparseser module.
Method format_template() is used in emails module.
Note: This module will work if you have only standard Python package. You just can't change delimiters values.
Note: API and implementation of this module is unstable and can change without prior notice.
"""
import warnings
def format_template(template, **kwargs):
"""
This is main method of this module.
Note: API of this method is unstable and can change without prior notice.
Template is expected to be compatible with Jinja2 one.
Current implementation make delimiters compatible with str.format() and use it.
:param template: str, typically with {{my_variable}}
:param jinja2ctx: Jinja2 Environment that is consulted what is delimiter for variable's names.
if is not provided, HiYaPyCo.jinja2ctx is used. See ymlparsers.initConfig().
if is not provided, than defaults are used (see jinja2.defaults).
:param jinja2Lock: Lock to be used to atomically get variable_start_string and variable_end_string from jinja2ctx.
if is not provided, HiYaPyCo.jinja2Lock is used.. See ymlparsers.initConfig().
:return: fromated str
"""
if template is None:
return None
s = _convert_template_to_string_format(template, **kwargs)
ret = s.format(template, **kwargs)
return ret
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=r'.*?yaml*?', module=r'.*?ymlparsers.*?')
from . ymlparsers import HiYaPyCo
_isHiYaPyCoAvailable = True
except ImportError:
_isHiYaPyCoAvailable = False
_a1 = None
_a2 = None
try:
try:
from jinja2.defaults import VARIABLE_START_STRING as _a1, VARIABLE_END_STRING as _a2
_isJinja2DefaultAvailable = True
except ImportError:
try:
from jinja2.environment import VARIABLE_START_STRING as _a1, VARIABLE_END_STRING as _a2
_isJinja2DefaultAvailable = True
except ImportError:
_isJinja2DefaultAvailable = False
finally:
del _a1
del _a2
_VARIABLE_START_STRING = None
_VARIABLE_END_STRING = None
def _init_globals():
"""
This method is called during module import.
This method is idempotent.
"""
global _VARIABLE_START_STRING, _VARIABLE_END_STRING
if _isJinja2DefaultAvailable:
p1 = None
p2 = None
try:
from jinja2.defaults import VARIABLE_START_STRING as p1, VARIABLE_END_STRING as p2
except ImportError:
from jinja2.environment import VARIABLE_START_STRING as p1, VARIABLE_END_STRING as p2
if p1 is None or p2 is None:
raise ImportError('VARIABLE_START_STRING or VARIABLE_END_STRING are not defined')
_VARIABLE_START_STRING = p1
_VARIABLE_END_STRING = p2
else:
_VARIABLE_START_STRING = '{{'
_VARIABLE_END_STRING = '}}'
_init_globals()
def _normalize_var_name(text, start_del, end_del):
"""
Search&replace all pairs of (start_del, end_del) with pairs of ({, }).
:param text: str to normalize
:param start_del: delimiter that indicates start of variable name, typically {{
:param end_del: delimiter that indicates end of variable name, typically }}
:return:
"""
if start_del is None or start_del not in text or end_del not in text:
return text
first_ind = 0
len_end_del = len(end_del)
while True:
first_ind = text.find(start_del, first_ind)
if first_ind < 0:
break
last_ind = text.find(end_del, first_ind)
var = text[first_ind:last_ind+len_end_del]
var = var.replace('.', '_')
#text[first_ind:last_ind] = var
text = text[:first_ind]+var+text[last_ind+len_end_del:]
first_ind = last_ind+len_end_del
return text
def __convert_template_to_string_format(template, **kwargs):
"""
This is utility method that make template usable with string format.
:param template: str, typically with {{my_variable}}
:param default_start: Typically {{ but can be any other delimiter that points to start of the token variable.
:param default_end: Typically }} but can be any other delimiter that points to end of the token variable.
:return: template: str with {my_variable}
"""
if template is None:
return None
default_start = kwargs.pop('default_start', None)
default_end = kwargs.pop('default_end', None)
template = _normalize_var_name(template, default_start, default_end)
ret = template.replace(f'{default_start} ', f'{default_start}') \
.replace(f'{default_start}', '{') \
.replace(f' {default_end}', f'{default_end}') \
.replace(f'{default_end}', '}')
return ret
def _convert_template_to_string_format(template, **kwargs):
"""
This is utility method that make template usable with string format.
if both jinja2ctx and jinja2Lock are provided, than they are used to determine various delimiters
(jinja2Lock is used to read the values from jinja2ctx atomically).
if both jinja2ctx and jinja2Lock are not provided, than
If ymlparsers is usable (it's 3rd party dependencies are available, one if each is jinja2)
than it's jinja2ctx (Jinja2's Environment) will be consulted for the various delimiters.
Otherwise, if jinja2 is available than we will use it's defaults for constricting Jinja2's Environment
for the various delimiters.
Otherwise, some sensible defaults (default values from some version of Jinja2) will be used.
You can't provide jinja2Lock without providing jinja2ctx (you can't provide your jinja2Lock for HiYaPyCo.jinja2ctx).
You can provide jinja2ctx without jinja2Lock. Than you will give up atomicity for determining various delimiters.
:param template: str, typically with {{my_variable}}
:param jinja2ctx: Jinja2 Environment that is consulted what is delimiter for variable's names.
if is not provided, HiYaPyCo.jinja2ctx is used. See ymlparsers.initConfig().
if is not provided, than defaults are used (see jinja2.defaults).
:param jinja2Lock: Lock to be used to atomically get variable_start_string and variable_end_string from jinja2ctx.
if is not provided, HiYaPyCo.jinja2Lock is used.. See ymlparsers.initConfig().
:return: template: str with {my_variable}
"""
if template is None:
return None
jinja2ctx = kwargs.pop('jinja2ctx', None)
jinja2Lock = kwargs.pop('jinja2Lock', None)
if _isHiYaPyCoAvailable and jinja2ctx is None and jinja2Lock is not None:
raise ValueError("You can't provide your jinja2Lock for HiYaPyCo.jinja2ctx")
if _isHiYaPyCoAvailable and jinja2ctx is None:
jinja2ctx = HiYaPyCo.jinja2ctx
jinja2Lock = HiYaPyCo.jinja2Lock #we should use HiYaPyCo.jinja2Lock for HiYaPyCo.jinja2ctx
#default_start, default_end
if jinja2ctx is None:
if jinja2Lock is None:
default_start = _VARIABLE_START_STRING
default_end = _VARIABLE_END_STRING
else:
with jinja2Lock:
default_start = _VARIABLE_START_STRING
default_end = _VARIABLE_END_STRING
else:
if _isHiYaPyCoAvailable and HiYaPyCo.jinja2ctx is not None and HiYaPyCo.jinja2Lock is None:
raise ValueError('HiYaPyCo.jinja2ctx is not None, but HiYaPyCo.jinja2Lock is None')
if jinja2Lock is None:
# jinja2ctx was provided, but jinja2Lock wasn't, it is ok
# (maybe jinja2ctx is local variable?)
default_start = jinja2ctx.variable_start_string
default_end = jinja2ctx.variable_end_string
else:
with jinja2Lock:
default_start = jinja2ctx.variable_start_string
default_end = jinja2ctx.variable_end_string
ret = __convert_template_to_string_format(template, default_start=default_start, default_end=default_end)
return ret
| 38.261682 | 120 | 0.69553 | 1,052 | 8,188 | 5.230989 | 0.173954 | 0.035435 | 0.05179 | 0.016355 | 0.491914 | 0.463747 | 0.396329 | 0.379793 | 0.343812 | 0.318735 | 0 | 0.014947 | 0.240107 | 8,188 | 213 | 121 | 38.441315 | 0.869495 | 0.447606 | 0 | 0.365385 | 0 | 0 | 0.080886 | 0.004895 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048077 | false | 0 | 0.105769 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da3bd0652c70b407476c6f160e8a128d0c51ee92 | 32 | py | Python | pyglview/__init__.py | weltonrodrigo/python_glview | 6680601aea5ae67ca8c5ccbf03847e6abc6f270c | [
"MIT"
] | 6 | 2019-04-29T05:44:51.000Z | 2022-01-28T15:31:57.000Z | pyglview/__init__.py | weltonrodrigo/python_glview | 6680601aea5ae67ca8c5ccbf03847e6abc6f270c | [
"MIT"
] | 1 | 2021-06-12T01:49:10.000Z | 2021-06-12T01:49:10.000Z | pyglview/__init__.py | weltonrodrigo/python_glview | 6680601aea5ae67ca8c5ccbf03847e6abc6f270c | [
"MIT"
] | 2 | 2019-12-16T20:34:59.000Z | 2020-08-24T14:57:50.000Z | from pyglview.pyglview import *
| 16 | 31 | 0.8125 | 4 | 32 | 6.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 32 | 1 | 32 | 32 | 0.928571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 5 |
da3c5141c6711e22ea8ee4bbbf6d8f4d99c372b5 | 163 | py | Python | unused_stuff/save_image.py | JDJGInc/JDJGBotSupreme | fd8a5679f05cb90ebec8dbfc297445f9773ebe5f | [
"MIT"
] | 4 | 2020-07-10T04:02:23.000Z | 2021-02-13T16:38:54.000Z | unused_stuff/save_image.py | JDJGInc/JDJGBotSupreme | fd8a5679f05cb90ebec8dbfc297445f9773ebe5f | [
"MIT"
] | 3 | 2021-07-13T15:38:39.000Z | 2022-02-15T15:17:17.000Z | unused_stuff/save_image.py | johndpope/JDJGBotSupreme | 64fde0e169811e1866eb29174ac5dd8e052d830a | [
"MIT"
] | 2 | 2020-08-01T11:15:09.000Z | 2022-02-15T11:46:22.000Z | async with aiohttp.ClientSession() as cs:
async with cs.get(url) as r:
image=await r.read()
f = open("reverse.png","wb")
f.write(image)
f.close() | 27.166667 | 41 | 0.625767 | 27 | 163 | 3.777778 | 0.703704 | 0.176471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208589 | 163 | 6 | 42 | 27.166667 | 0.790698 | 0 | 0 | 0 | 0 | 0 | 0.079268 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
da3cdf32ec5c491bbbd379b8659c9adccf3080ca | 2,653 | py | Python | models/house_water_drain.py | susundberg/python-freecad-3dparts | 248e6f5eb4ce3d1921b3d4875e9c1d112f7b7498 | [
"MIT"
] | null | null | null | models/house_water_drain.py | susundberg/python-freecad-3dparts | 248e6f5eb4ce3d1921b3d4875e9c1d112f7b7498 | [
"MIT"
] | null | null | null | models/house_water_drain.py | susundberg/python-freecad-3dparts | 248e6f5eb4ce3d1921b3d4875e9c1d112f7b7498 | [
"MIT"
] | null | null | null | import supalib
TOLE=0.2
OUTSIZE=60.0
SIZE_CONST=25.0
SIZE_DROP=20.0
ANGLE_DROP=45.0
BASE_THICK=5.0
BASE_WIDE=20.0
PIPE_RAD=OUTSIZE/2.0 + TOLE
hole = supalib.create_cyl( radius=PIPE_RAD , size_z = OUTSIZE, place=(0, PIPE_RAD + 1.0, -OUTSIZE/2.0) )
outer_hole = supalib.create_cyl( radius=PIPE_RAD + 5.0 , size_z = OUTSIZE, place=(0, PIPE_RAD + 5.0, -OUTSIZE/2.0) )
tr1 = supalib.create_triangle( SIZE_DROP, BASE_THICK, BASE_WIDE/2.0 )
tr2 = supalib.create_triangle( SIZE_DROP, BASE_THICK, BASE_WIDE/2.0,rotate=(1,0,0,180),place=(0,+BASE_THICK,0) )
drop = supalib.create_union( (tr1, tr2) )
drop = supalib.relocate( drop, rotate=(0,1,0,90) )
drop = supalib.create_cut( drop, hole )
drop = supalib.relocate( drop, rotate=(1,0,0,30) )
drop = supalib.relocate( drop, place=(0,0,SIZE_CONST + SIZE_DROP ) )
drop = supalib.relocate( drop, place=(0,-9,-1) )
base = supalib.create_box( (BASE_WIDE,BASE_THICK,SIZE_CONST + 4.0), place = ( -BASE_WIDE/2.0,0.0,0.0) )
base = supalib.create_intersection( ( base, outer_hole ) )
base = supalib.create_union( ( base, drop ) )
base = supalib.create_cut( base, hole )
base.Label="house_drain"
holder_rad = PIPE_RAD + 0.5 + TOLE
HOLDER_SIZE=5.0
outer_hole2 = supalib.create_cyl( radius=holder_rad , size_z = HOLDER_SIZE, place=(0, 0, 0) )
outer_hole3 = supalib.create_cyl( radius=holder_rad + 1.0 , size_z = HOLDER_SIZE, place=(0, 0, 0) )
outer_holder = supalib.create_cut( outer_hole3, outer_hole2 )
outer_holder = supalib.relocate( outer_holder, place=(0,+holder_rad,0) )
outer_holder.Label="house_holder"
thight = supalib.create_box( (BASE_WIDE,BASE_THICK,10), place = ( -BASE_WIDE/2.0,0.0,0.0) )
thight = supalib.create_cut( thight, hole )
thight = supalib.create_intersection( ( thight, outer_hole ) )
thight = supalib.relocate( thight, rotate=(0,0,1,180), place=(0,2*holder_rad,0) )
thight.Label = "house_wedge"
parts = [ thight, outer_holder, base ]
for p in parts:
supalib.creta_mesh_from( p, save_to="/home/pauli/", version=3 )
#hole_app = supalib.create_box( (0.5,0.25 + TOLE,5.0) , place=(offset - 0.25, 5.0 - rad_size/2.0 - 2*TOLE, 2.5 ) )
#offset += rad_size + RADS[loop+1] + 2.0
#holes.append(hole)
#hole_adds.append( hole_app )
#holes = supalib.create_union( holes )
#hole_adds = supalib.create_union( hole_adds )
#box_bound = supalib.create_box( (offset, 10.0, 10 ) )
#box_bound = supalib.create_fillet( box_bound )
#box_bound = supalib.create_cut( box_bound, holes )
#box_bound = supalib.create_union( (box_bound,hole_adds) )
#box_bound.Label="Tool_holder"
#mesh = supalib.creta_mesh_from( box_bound, save_to="/home/pauli/", version=1 )
supalib.finish()
| 35.851351 | 119 | 0.710516 | 449 | 2,653 | 3.973274 | 0.167038 | 0.167601 | 0.013453 | 0.049327 | 0.33296 | 0.275785 | 0.20852 | 0.106502 | 0.106502 | 0.053812 | 0 | 0.058185 | 0.131926 | 2,653 | 73 | 120 | 36.342466 | 0.716457 | 0.223897 | 0 | 0 | 0 | 0 | 0.022483 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.025 | 0 | 0.025 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da3d14bdecf4a0d019902eff7b7a5c16a38e1ceb | 2,434 | py | Python | privatebin.py | iomintz/python-snippets | 982861c173bf4bcd5d908514a9e8b1914a580a5d | [
"CC0-1.0"
] | 2 | 2020-04-10T07:29:56.000Z | 2020-05-27T03:45:21.000Z | privatebin.py | LyricLy/python-snippets | 9d868b7bbccd793ea1dc513f51290963584a1dee | [
"CC0-1.0"
] | null | null | null | privatebin.py | LyricLy/python-snippets | 9d868b7bbccd793ea1dc513f51290963584a1dee | [
"CC0-1.0"
] | 2 | 2018-11-24T08:16:59.000Z | 2019-02-24T04:41:30.000Z | #!/usr/bin/env python3
# encoding: utf-8
"""
privatebin.py: uploads text to privatebin
using code from <https://github.com/r4sas/PBinCLI/blob/master/pbincli/actions.py>,
© 2017–2018 R4SAS <r4sas@i2pmail.org>
using code from <https://github.com/khazhyk/dango.py/blob/master/dango/zerobin.py>,
© 2017 khazhyk
"""
import asyncio
import base64
import json
import os
import sys
import zlib
import aiohttp
from sjcl import SJCL
def encrypt(text):
key = base64.urlsafe_b64encode(os.urandom(32))
# Encrypting text
encrypted_data = SJCL().encrypt(compress(text.encode('utf-8')), key, mode='gcm')
return encrypted_data, key
def compress(s: bytes):
co = zlib.compressobj(wbits=-zlib.MAX_WBITS)
b = co.compress(s) + co.flush()
return base64.b64encode(''.join(map(chr, b)).encode('utf-8'))
def make_payload(text):
# Formatting request
request = dict(
expire='never',
formatter='plaintext',
burnafterreading='0',
opendiscussion='0',
)
cipher, key = encrypt(text)
# TODO: should be implemented in upstream
for k in ['salt', 'iv', 'ct']: cipher[k] = cipher[k].decode()
request['data'] = json.dumps(cipher, ensure_ascii=False, indent=None, default=lambda x: x.decode('utf-8'))
return request, key
lock = asyncio.Lock()
class PrivateBinException(Exception): pass
async def upload(text, loop=None):
loop = loop or asyncio.get_event_loop()
await lock.acquire()
result = None
payload, key = await loop.run_in_executor(None, make_payload, text)
python_version = '.'.join(map(str, sys.version_info[:3]))
async with aiohttp.ClientSession(headers={
'User-Agent': 'privatebin.py/0.0.2 aiohttp/%s python/%s' % (aiohttp.__version__, python_version),
'X-Requested-With': 'JSONHttpRequest'
}) as session:
for tries in range(2):
async with session.post('https://privatebin.net/', data=payload) as resp:
resp_json = await resp.json()
if resp_json['status'] == 0:
result = url(resp_json['id'], key)
break
elif resp_json['status'] == 1: # rate limited
await asyncio.sleep(10)
lock.release()
if result is None:
raise PrivateBinException('Failed to upload to privatebin')
else:
return result
def url(paste_id, key):
return 'https://privatebin.net/?%s#%s' % (paste_id, key.decode('utf-8'))
if __name__ == '__main__':
import contextlib
loop = asyncio.get_event_loop()
with closing(asyncio.get_event_loop()) as loop:
print(loop.run_until_complete(upload(sys.stdin.read())))
| 26.747253 | 107 | 0.711586 | 356 | 2,434 | 4.766854 | 0.457865 | 0.011786 | 0.026517 | 0.033589 | 0.031821 | 0.031821 | 0 | 0 | 0 | 0 | 0 | 0.021408 | 0.136401 | 2,434 | 90 | 108 | 27.044444 | 0.784491 | 0.158998 | 0 | 0 | 0 | 0 | 0.116519 | 0 | 0 | 0 | 0 | 0.011111 | 0 | 1 | 0.067797 | false | 0.016949 | 0.152542 | 0.016949 | 0.322034 | 0.016949 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da3d5161d87de56a9bc36edca5bba5b60b08bd39 | 6,556 | py | Python | dace/libraries/standard/nodes/gearbox.py | thobauma/dace | 668e4c49e476437e1ea3b272e9dbefca2b92d2e7 | [
"BSD-3-Clause"
] | null | null | null | dace/libraries/standard/nodes/gearbox.py | thobauma/dace | 668e4c49e476437e1ea3b272e9dbefca2b92d2e7 | [
"BSD-3-Clause"
] | null | null | null | dace/libraries/standard/nodes/gearbox.py | thobauma/dace | 668e4c49e476437e1ea3b272e9dbefca2b92d2e7 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import collections
import copy
import dace
@dace.library.expansion
class ExpandGearbox(dace.transformation.ExpandTransformation):
environments = []
@staticmethod
def expansion(node: "Gearbox", parent_state: dace.SDFGState,
parent_sdfg: dace.SDFG):
(in_edge, in_desc, out_edge, out_desc, is_pack,
gear_factor) = node.validate(parent_sdfg, parent_state)
if is_pack:
vtype = out_desc.dtype
else:
vtype = in_desc.dtype
sdfg = dace.SDFG("gearbox")
in_desc_inner = copy.deepcopy(in_desc)
in_desc_inner.transient = False
sdfg.add_datadesc(in_edge.dst_conn, in_desc_inner)
out_desc_inner = copy.deepcopy(out_desc)
out_desc_inner.transient = False
sdfg.add_datadesc(out_edge.src_conn, out_desc_inner)
sdfg.add_array("gearbox_buffer", (1, ),
vtype,
storage=in_desc.storage,
transient=True)
state = sdfg.add_state("gearbox")
buffer_read = state.add_read("gearbox_buffer")
buffer_write = state.add_write("gearbox_buffer")
input_read = state.add_read(in_edge.dst_conn)
output_write = state.add_write(out_edge.src_conn)
iteration_space = {
"_gearbox_i": f"0:{node.size}",
"_gearbox_w": f"0:{gear_factor}"
}
entry, exit = state.add_map("gearbox",
iteration_space,
schedule=node.schedule)
tasklet = state.add_tasklet(
"gearbox", {
"val_in",
"buffer_in"
}, {
"val_out",
"buffer_out"
}, f"""\
wide = buffer_in
wide[_gearbox_w] = val_in
if _gearbox_w == {gear_factor} - 1:
val_out = wide
buffer_out = wide""" if is_pack else """\
wide = val_in if _gearbox_w == 0 else buffer_in
val_out = wide[_gearbox_w]
buffer_out = wide""")
state.add_memlet_path(input_read,
entry,
tasklet,
dst_conn="val_in",
memlet=dace.Memlet(f"{in_edge.dst_conn}[0]",
dynamic=not is_pack))
state.add_memlet_path(buffer_read,
entry,
tasklet,
dst_conn="buffer_in",
memlet=dace.Memlet(f"gearbox_buffer[0]"))
state.add_memlet_path(tasklet,
exit,
output_write,
src_conn="val_out",
memlet=dace.Memlet(f"{out_edge.src_conn}[0]",
dynamic=is_pack))
state.add_memlet_path(tasklet,
exit,
buffer_write,
src_conn="buffer_out",
memlet=dace.Memlet(f"gearbox_buffer[0]"))
return sdfg
@dace.library.node
class Gearbox(dace.sdfg.nodes.LibraryNode):
"""
Provides a library node that converts from a stream of type
vector(vector(dtype, w0)) to a stream of type vector(dtype, w1), or vice
versa. This is useful for achieving efficient memory reads on Xilinx FPGAs,
where modules accessing memories should always read or write 512-bit
vectors, which then potentially need to be narrowed down to the vector width
of the computational kernel.
The node expects to have a single input and a single output, where one end
is of type vector(vector(dtype, w0)), and the other is of type
vector(dtype, w1).
"""
implementations = {
"pure": ExpandGearbox,
}
default_implementation = "pure"
# Properties
size = dace.properties.SymbolicProperty(
desc="Number of wide vectors to convert to/from narrow vectors.",
default=0)
def __init__(self, size, name=None, schedule=None, **kwargs):
"""
:param size: Number of wide vectors to convert to/from narrow vectors.
For example, if converting n/16 reads (vector size 16) from
memory into n/4 elements (vector size 4), this parameter
should be set to n/16.
"""
super().__init__(name=name or "gearbox",
schedule=schedule or dace.ScheduleType.FPGA_Device,
**kwargs)
self.size = size
if schedule is not None:
self.schedule = schedule
def validate(self, sdfg: dace.SDFG, state: dace.SDFGState):
try:
size = dace.symbolic.evaluate(self.size, sdfg.constants)
if size < 1:
raise ValueError(f"Invalid size parameter for {self}: {size}")
except TypeError:
pass # Not a constant
in_edge = state.in_edges(self)
if len(in_edge) != 1:
raise ValueError(
f"Expected only one input edge, found {len(in_edge)} edges.")
out_edge = state.out_edges(self)
if len(out_edge) != 1:
raise ValueError(
f"Expected only one input edge, found {len(out_edge)} edges.")
in_edge = in_edge[0]
in_desc = sdfg.arrays[in_edge.data.data]
if not isinstance(in_desc, dace.data.Stream):
raise TypeError(
f"Expected input to be a stream, got {type(in_desc)}.")
out_edge = out_edge[0]
out_desc = sdfg.arrays[out_edge.data.data]
if not isinstance(out_desc, dace.data.Stream):
raise TypeError(
f"Expected input to be a stream, got {type(out_desc)}.")
# The type of one side must be a vector of the other
if (isinstance(in_desc.dtype, dace.vector)
and in_desc.dtype.base_type == out_desc.dtype):
is_pack = False # Is unpack
gear_factor = in_desc.dtype.veclen
elif (isinstance(out_desc.dtype, dace.vector)
and out_desc.dtype.base_type == in_desc.dtype):
is_pack = True
gear_factor = out_desc.dtype.veclen
else:
raise TypeError(
f"Cannot gearbox between {in_desc.dtype} and {out_desc.dtype}.")
return (in_edge, in_desc, out_edge, out_desc, is_pack, gear_factor)
| 39.257485 | 80 | 0.558115 | 783 | 6,556 | 4.466156 | 0.240102 | 0.027452 | 0.020589 | 0.020589 | 0.278524 | 0.212182 | 0.157278 | 0.117815 | 0.117815 | 0.117815 | 0 | 0.008975 | 0.354179 | 6,556 | 166 | 81 | 39.493976 | 0.816958 | 0.145363 | 0 | 0.130769 | 0 | 0 | 0.157703 | 0.007813 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023077 | false | 0.007692 | 0.023077 | 0 | 0.107692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da3fd113ae5463775113a2aa795b9fc22645ae0c | 5,662 | py | Python | reprlearn/data/samplers/kshot_sampler.py | cocoaaa/ReprLearn | 58dc682aa62dbd59201ccc55b9b26480ff3d6773 | [
"MIT"
] | null | null | null | reprlearn/data/samplers/kshot_sampler.py | cocoaaa/ReprLearn | 58dc682aa62dbd59201ccc55b9b26480ff3d6773 | [
"MIT"
] | null | null | null | reprlearn/data/samplers/kshot_sampler.py | cocoaaa/ReprLearn | 58dc682aa62dbd59201ccc55b9b26480ff3d6773 | [
"MIT"
] | null | null | null | from reprlearn.data.datasets.base import ImageDataset
from collections import defaultdict
from typing import Iterable, Optional, Callable, List, Dict, Tuple
import numpy as np
# ===============
# Returns a list of datapoints from the dataset so that
# the list contains the same number of datapoints per class (if possible)
# ===============
class KShotSampler():
def __init__(self, shuffle:bool=True) -> None:
"""Given the dataset of labelled images, return the indices for sampling
the same number of datapts per class for each class the dataset's targets.
If shuffle, we shuffle the indices of the dataset before collecting
the datapoints.
Args
----
dset : ImageDataset
k_shot: int
number of images per class
shuffle : bool (default True)
"""
pass
def get_sample_inds_per_class(self,
dset: ImageDataset,
num_per_class: int,
shuffle: bool=True,
verify: bool=True,
):
"""Given the dataset of labelled images, return the indices for sampling
`num_per_class` number of images per class in the dataset's classes.
If shuffle, we shuffle the indices of the dset for each call to the iterator
Returns:
(List[int]) : indices to the datapts to sample for this iteration
"""
unique_classes = np.unique(dset.targets)
n_ways = len(unique_classes)
if num_per_class * n_ways > len(dset.targets):
raise ValueError
inds = list(range(len(dset)))
if shuffle:
np.random.shuffle(inds) # shuffle in-place
inds_per_class = {c:[] for c in unique_classes}
done_for_class = {c:False for c in unique_classes}
for i in inds:
c = dset.targets[i]
if not done_for_class[c]: # len(inds_per_class[c]) < num_per_class:
inds_per_class[c].append(i)
if len(inds_per_class[c]) == num_per_class:
done_for_class[c] = True # done collecting dpts for this class
if np.alltrue(np.fromiter(done_for_class.values(), dtype=bool)):
break
print("Done collecting datapts for each class...")
if verify:
for c in np.unique(dset.targets):
inds = inds_per_class[c]
if len(inds) != num_per_class:
raise ValueError
return inds_per_class
def sample(self,
dset: ImageDataset,
num_per_class: int,
shuffle: bool=True,
collate_fn: Optional[Callable]=None
) -> List[Tuple]: # [(x,y),...] #List[int]:
"""Given the dataset of labelled images, return the collection/list
of datapoints from the dataset; the collection of datapoints (aka. sample)
contains equal number of datapoints per class (with best effort)
Args
----
dset : ImageDataset
source dataset to sample datapoints from
num_per_class : int
k in k-shot
shuffle : bool
if shuffle, shuffle the indices of the dataset before collecting
the datapoints
collate_fn : Callable
Similar to the collating function in torch's DataLoader argument;
It take a list of datapoints and apply it to turn the list into a
desired form of 'batch'
Returns:
(Batch or List[datapts]) : A collection of datapts sampled
"""
inds_per_class = self.get_sample_inds_per_class(dset, num_per_class, shuffle)
sample_inds = np.stack(
[np.fromiter(ilist, dtype=int) for ilist in inds_per_class.values()]
).flatten()
# we don't want to load imgs for one-class all in a row,
# and then next class's images in a row, etc
np.random.shuffle(sample_inds)
sample = [dset[i] for i in sample_inds] # apply current dataset's image transform if specified
if collate_fn is not None:
sample = collate_fn(sample)
return sample
def get_support_and_query(
self,
dset: ImageDataset,
num_per_class: int,
shuffle: bool=True,
collate_fn: Optional[Callable] = None
) -> Dict:
inds_per_class = self.get_sample_inds_per_class(dset, 2*num_per_class, shuffle)
n_way = len(np.unique(dset.targets))
support_inds = []
query_inds = []
for clabel, cinds in inds_per_class.items():
cids = np.fromiter(cinds, dtype=int)
support_inds.append(cids[:num_per_class])
query_inds.append(cinds[num_per_class:])
support_inds = np.array(support_inds)
query_inds = np.array(query_inds)
# we don't want to load imgs for one-class all in a row,
# and then next class's images in a row, etc
np.random.shuffle(support_inds)
support_sample = [dset[i] for i in support_inds] # apply current dataset's image transform if specified
if collate_fn is not None:
support_sample = collate_fn(support_sample)
# Similarly for the query sample
np.random.shuffle(query_inds)
query_sample = [dset[i] for i in query_inds] # apply current dataset's image transform if specified
if collate_fn is not None:
query_sample = collate_fn(query_sample)
return {'support': support_sample,
'query': query_sample}
| 39.048276 | 112 | 0.598905 | 733 | 5,662 | 4.472033 | 0.210096 | 0.075656 | 0.04759 | 0.019829 | 0.397804 | 0.350519 | 0.316657 | 0.316657 | 0.274558 | 0.274558 | 0 | 0.000261 | 0.32409 | 5,662 | 144 | 113 | 39.319444 | 0.856284 | 0.347051 | 0 | 0.202532 | 0 | 0 | 0.01575 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050633 | false | 0.012658 | 0.050633 | 0 | 0.151899 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da40b32e1d6bf126d545d746d9c0416f4eb38e0a | 7,117 | py | Python | phaseprep/workflows/preprocess_phase_wf.py | ostanley/phaseprep | 6e721ea43755f10eb8569b1f4d4461efa3d85a1a | [
"Apache-2.0"
] | 1 | 2019-10-11T17:04:25.000Z | 2019-10-11T17:04:25.000Z | phaseprep/workflows/preprocess_phase_wf.py | ostanley/phaseprep | 6e721ea43755f10eb8569b1f4d4461efa3d85a1a | [
"Apache-2.0"
] | 2 | 2019-10-16T13:13:52.000Z | 2019-12-10T19:38:39.000Z | phaseprep/workflows/preprocess_phase_wf.py | ostanley/phaseprep | 6e721ea43755f10eb8569b1f4d4461efa3d85a1a | [
"Apache-2.0"
] | 2 | 2019-11-18T19:21:44.000Z | 2021-10-19T18:01:03.000Z | import nipype.pipeline.engine as pe
import nipype.interfaces.fsl as fsl
import nipype.interfaces.afni as afni
import phaseprep.interfaces as pp
import nipype.interfaces.utility as ul
def findscalingarg(in_file, bit_depth=12):
import nibabel as nb
import numpy as np
img = nb.load(in_file)
if img.dataobj.slope != 1.0:
print('Removing rescale before conversion')
mul = np.pi/(2**(bit_depth-1)*img.dataobj.slope)
sub = np.pi*((img.dataobj.slope+1)/(2**(bit_depth-1)*img.dataobj.slope))
return '-mul %s -sub %s' % (mul, sub)
def create_preprocess_phase_wf():
"""Create's phase preprocessing workflow with the following steps:
1) Convert data to float
2) Determine scaling required for radians
3) Apply radian scaling
4) Convert to real and imaginary
5) Apply magnitude motion correction parameters
6) Correct geometry changes (AFNI issue)
7) Convert back to phase
8) Unwrap and detrend data
9) Mask data using magnitude mask
10) Calculate noise from data
"""
preprocphase = pe.Workflow(name="preprocphase")
preprocphase.config['execution']['remove_unnecessary_outputs'] = False
# define inputs
inputspec = pe.Node(ul.IdentityInterface(fields=['input_phase', # raw phase data
'input_mag', # raw mag data
'motion_par', # afni transform concatenated from magnitude data
'mask_file', # bet mask from magnitude data
'rest', # volumes of rest in block design
'task', # volumes of task in block design
]),
name='inputspec')
# 1) Convert data to float
img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float', op_string='', suffix='_dtype'),
iterfield=['in_file'],
name='img2float')
# 2) Determine radian scaling required
findscaling = pe.MapNode(interface=ul.Function(input_names=['in_file'],
output_names=['scaling_arg'],
function=findscalingarg),
name='findscaling', iterfield=['in_file'])
# 3) Apply radian scaling
convert2rad = pe.MapNode(interface=fsl.maths.MathsCommand(),
name='convert2rad', iterfield=['in_file', 'args'])
# 4) Convert to real and imaginary (2 step process)
# modified from fslcomplex to fslmaths in Sep 2020, bonus also preserves geometry info
convert2real = pe.MapNode(interface=fsl.maths.MultiImageMaths(op_string=' -cos -mul %s'), name='convert2real', iterfield=['in_file','operand_files'])
convert2imag = pe.MapNode(interface=fsl.maths.MultiImageMaths(op_string=' -sin -mul %s'), name='convert2imag', iterfield=['in_file','operand_files'])
# 5) Apply magnitude motion correction parameters
mocoreal = pe.MapNode(interface=afni.Allineate(), name='mocoreal',
iterfield=['in_file', 'in_matrix'])
mocoreal.inputs.outputtype = 'NIFTI_GZ'
mocoreal.inputs.out_file = 'mocophase.nii.gz'
mocoreal.inputs.num_threads = 2
mocoimag = mocoreal.clone('mocoimag')
# 6) Correct geometry changes (AFNI issue)
cpgeommocoreal = pe.MapNode(interface=fsl.CopyGeom(), name='cpgeommoco', iterfield=['dest_file', 'in_file'])
cpgeommocoimag = cpgeommocoreal.clone('cpgeommocoimag')
# 7) Convert back to phase custom interface to use atan2 and avoid sign ambiguity
convert2phase = pe.MapNode(interface=pp.Convert2Phase(), name='convert2phase', iterfield=['real_image','imaginary_image'])
# 8) Remove first volume, unwrap and detrend phase data
prepphase = pe.MapNode(interface=pp.PreprocessPhase(), name='prepphase', iterfield=['phase'])
# 9) Mask data using magnitude mask
maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet',
op_string='-mas'),
iterfield=['in_file'],
name='maskfunc')
# 10) Calculate noise from data
calcSNR = pe.MapNode(interface=pp.RestAverage(), name='calcSNR', iterfield=['func', 'rest', 'task'])
# outputspec
outputspec = pe.Node(ul.IdentityInterface(fields=['proc_phase', 'uw_phase', 'delta_phase','std_phase']),
name='outputspec')
preprocphase = pe.Workflow(name='preprocphase')
preprocphase.connect([(inputspec, img2float, [('input_phase', 'in_file')]), # 1
(inputspec, findscaling, [('input_phase', 'in_file')]), # 2
(findscaling, convert2rad, [('scaling_arg', 'args')]),
(img2float, convert2rad, [('out_file', 'in_file')]),
(convert2rad, convert2real, [('out_file', 'in_file')]),
(convert2rad, convert2imag, [('out_file', 'in_file')]),
(inputspec, convert2real, [('input_mag', 'operand_files')]),
(inputspec, convert2imag, [('input_mag', 'operand_files')]),
(inputspec, mocoreal, [('motion_par', 'in_matrix')]), # 5 real
(convert2real, mocoreal, [('out_file', 'in_file')]),
(mocoreal, cpgeommocoreal, [('out_file','dest_file')]), #6 real
(img2float, cpgeommocoreal, [('out_file', 'in_file')]),
(inputspec, mocoimag, [('motion_par', 'in_matrix')]), # 5 imag
(convert2imag, mocoimag, [('out_file', 'in_file')]),
(mocoimag, cpgeommocoimag, [('out_file','dest_file')]), # 6 imag
(img2float, cpgeommocoimag, [('out_file', 'in_file')]),
(cpgeommocoimag, convert2phase, [('out_file', 'imaginary_image')]), # 7
(cpgeommocoreal, convert2phase, [('out_file', 'real_image')]),
(convert2phase, prepphase, [('phase_image', 'phase')]), # 8
(prepphase, maskfunc, [('detrended_phase', 'in_file')]), # 9
(inputspec, maskfunc, [('mask_file', 'in_file2')]),
(maskfunc, outputspec, [('out_file', 'proc_phase')]),
(prepphase, outputspec, [('uw_phase', 'uw_phase')]),
(prepphase, outputspec, [('delta_phase', 'delta_phase')]),
(inputspec, calcSNR, [('rest', 'rest'), # 10
('task', 'task')]),
(prepphase, calcSNR, [('detrended_phase', 'func')]),
(calcSNR, outputspec, [('noise', 'std_phase')])
])
return preprocphase
if __name__ == "__main__":
workflow = create_preprocess_phase_wf()
| 53.511278 | 153 | 0.559646 | 701 | 7,117 | 5.53067 | 0.28388 | 0.032499 | 0.05107 | 0.023472 | 0.269796 | 0.128966 | 0.038174 | 0.025277 | 0 | 0 | 0 | 0.015925 | 0.311789 | 7,117 | 132 | 154 | 53.916667 | 0.775623 | 0.159618 | 0 | 0.023256 | 0 | 0 | 0.179292 | 0.004406 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.081395 | 0 | 0.127907 | 0.011628 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da426e6fadffb074142a9d08e0b642ab357e46fc | 2,514 | py | Python | zorg/buildbot/builders/AnnotatedBuilder.py | DalavanCloud/zorg | d55f03740e589d504dbfe2d5dc9fbc5d551f31fb | [
"Apache-2.0"
] | 1 | 2019-02-10T03:05:05.000Z | 2019-02-10T03:05:05.000Z | zorg/buildbot/builders/AnnotatedBuilder.py | DalavanCloud/llvm-zorg | 14d347a312d5a19bec421f553a3c1cbe1735b273 | [
"Apache-2.0"
] | null | null | null | zorg/buildbot/builders/AnnotatedBuilder.py | DalavanCloud/llvm-zorg | 14d347a312d5a19bec421f553a3c1cbe1735b273 | [
"Apache-2.0"
] | null | null | null | import os
import buildbot
from buildbot.process.properties import WithProperties
from buildbot.steps.shell import SetProperty, ShellCommand
from buildbot.steps.source import SVN
from zorg.buildbot.commands.AnnotatedCommand import AnnotatedCommand
from zorg.buildbot.process.factory import LLVMBuildFactory
def getAnnotatedBuildFactory(
script,
clean=False,
depends_on_projects=None,
env=None,
timeout=1200):
"""
Returns a new build factory that uses AnnotatedCommand, which
allows the build to be run by version-controlled scripts that do
not require a buildmaster restart to update.
"""
f = LLVMBuildFactory(
depends_on_projects=depends_on_projects,
llvm_srcdir='llvm.src')
if clean:
f.addStep(SetProperty(property='clean', command='echo 1'))
# We normally use the clean property to indicate that we want a
# clean build, but AnnotatedCommand uses the clobber property
# instead. Therefore, set clobber if clean is set to a truthy
# value. This will cause AnnotatedCommand to set
# BUILDBOT_CLOBBER=1 in the environment, which is how we
# communicate to the script that we need a clean build.
f.addStep(SetProperty(
property='clobber',
command='echo 1',
doStepIf=lambda step: step.build.getProperty('clean', False)))
merged_env = {
'TERM': 'dumb' # Be cautious and disable color output from all tools.
}
if env is not None:
# Overwrite pre-set items with the given ones, so user can set
# anything.
merged_env.update(env)
scripts_dir = "annotated"
f.addStep(SVN(name='update-annotate-scripts',
mode='update',
svnurl='http://llvm.org/svn/llvm-project/zorg/trunk/'
'zorg/buildbot/builders/annotated',
workdir=scripts_dir,
alwaysUseLatest=True))
# Explicitly use '/' as separator, because it works on *nix and Windows.
script_path = "../%s/%s" % (scripts_dir, script)
f.addStep(AnnotatedCommand(name="annotate",
description="annotate",
timeout=timeout,
haltOnFailure=True,
command=WithProperties(
"python %(script)s --jobs=%(jobs:-)s",
script=lambda _: script_path),
env=merged_env))
return f
| 36.970588 | 78 | 0.622514 | 285 | 2,514 | 5.431579 | 0.477193 | 0.020672 | 0.032946 | 0.034884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00395 | 0.295147 | 2,514 | 67 | 79 | 37.522388 | 0.869639 | 0.280827 | 0 | 0 | 0 | 0 | 0.122817 | 0.030986 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.159091 | 0 | 0.204545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da43c8aa3780e6b294ec06869cbcdecb77cd3961 | 196 | py | Python | ex001 a ex114/ex008.py | kesia-barros/exercicios-python | 12a019e61c4b29fa29803f394b15d0af304c2ff0 | [
"MIT"
] | null | null | null | ex001 a ex114/ex008.py | kesia-barros/exercicios-python | 12a019e61c4b29fa29803f394b15d0af304c2ff0 | [
"MIT"
] | null | null | null | ex001 a ex114/ex008.py | kesia-barros/exercicios-python | 12a019e61c4b29fa29803f394b15d0af304c2ff0 | [
"MIT"
] | null | null | null | m = float(input("Digite os metros a serem convertidos: "))
c = 100 * m
mm = 1000 * m
print("{} metros tem {:.0f} centimetros!".format(m, c))
print("{} metros tem {:.0f} milimetros!".format(m, mm)) | 39.2 | 58 | 0.632653 | 31 | 196 | 4 | 0.612903 | 0.048387 | 0.225806 | 0.258065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.054217 | 0.153061 | 196 | 5 | 59 | 39.2 | 0.692771 | 0 | 0 | 0 | 0 | 0 | 0.522843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
da44b530cb372e0a45fbccf1c2f6a0b2f6789174 | 511 | py | Python | oscar/shared/economy.py | Xaxetrov/OSCAR | f3a264e2bc7d4253756b11b0dbaa58c4f9ea82a6 | [
"Apache-2.0"
] | 5 | 2017-10-11T18:08:13.000Z | 2018-06-11T09:23:03.000Z | oscar/shared/economy.py | Xaxetrov/OSCAR | f3a264e2bc7d4253756b11b0dbaa58c4f9ea82a6 | [
"Apache-2.0"
] | 2 | 2018-04-18T16:25:20.000Z | 2019-04-26T14:49:52.000Z | oscar/shared/economy.py | Xaxetrov/OSCAR | f3a264e2bc7d4253756b11b0dbaa58c4f9ea82a6 | [
"Apache-2.0"
] | null | null | null | class Economy:
""" Stores economic state """
def __init__(self):
self.supply_depots = []
self.command_centers = []
self.scv = 8
def add_supply_depot(self, obs, shared, location):
location.compute_minimap_loc(obs, shared)
self.supply_depots.append(location)
def add_command_center(self, obs, shared, location):
location.compute_minimap_loc(obs, shared)
self.command_centers.append(location)
def add_scv(self):
self.scv += 1
| 26.894737 | 56 | 0.645793 | 62 | 511 | 5.048387 | 0.403226 | 0.115016 | 0.102236 | 0.134185 | 0.376997 | 0.376997 | 0.376997 | 0.376997 | 0.376997 | 0.376997 | 0 | 0.005208 | 0.248532 | 511 | 18 | 57 | 28.388889 | 0.809896 | 0.041096 | 0 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.307692 | false | 0 | 0 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da45407cbca582a2a771bd09b1b4379b9b0b026a | 10,818 | py | Python | tcga_encoder/analyses/old/survival_from_z_space3.py | tedmeeds/tcga_encoder | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | [
"MIT"
] | 2 | 2017-12-19T15:32:46.000Z | 2018-01-12T11:24:24.000Z | tcga_encoder/analyses/old/survival_from_z_space3.py | tedmeeds/tcga_encoder | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | [
"MIT"
] | null | null | null | tcga_encoder/analyses/old/survival_from_z_space3.py | tedmeeds/tcga_encoder | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | [
"MIT"
] | null | null | null | from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
import lifelines
from lifelines import CoxPHFitter
from lifelines.datasets import load_regression_dataset
from lifelines.utils import k_fold_cross_validation
from lifelines import KaplanMeierFitter
from lifelines.statistics import logrank_test, multivariate_logrank_test
def main( data_location, results_location ):
data_path = os.path.join( HOME_DIR ,data_location ) #, "data.h5" )
results_path = os.path.join( HOME_DIR, results_location )
data_filename = os.path.join( data_path, "data.h5")
fill_filename = os.path.join( results_path, "full_vae_fill.h5" )
save_dir = os.path.join( results_path, "survival_concordance" )
check_and_mkdir(save_dir)
survival_curves_dir = os.path.join( save_dir, "sig_curves" )
check_and_mkdir(survival_curves_dir)
print "HOME_DIR: ", HOME_DIR
print "data_filename: ", data_filename
print "fill_filename: ", fill_filename
print "LOADING stores"
data_store = pd.HDFStore( data_filename, "r" )
fill_store = pd.HDFStore( fill_filename, "r" )
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
Z = np.vstack( (Z_train.values, Z_val.values) )
n_z = Z.shape[1]
#pdb.set_trace()
z_names = ["z_%d"%z_idx for z_idx in range(Z.shape[1])]
Z = pd.DataFrame( Z, index = np.hstack( (Z_train.index.values, Z_val.index.values)), columns = z_names )
barcodes = np.union1d( Z_train.index.values, Z_val.index.values )
Z=Z.loc[barcodes]
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
#Overall Survival (OS) The event call is derived from "vital status" parameter. The time_to_event is in days, equals to days_to_death if patient deceased; in the case of a patient is still living, the time variable is the maximum(days_to_last_known_alive, days_to_last_followup). This pair of clinical parameters are called _EVENT and _TIME_TO_EVENT on the cancer browser.
ALL_SURVIVAL = data_store["/CLINICAL/data"][["patient.days_to_last_followup","patient.days_to_death","patient.days_to_birth"]]
tissue_barcodes = np.array( ALL_SURVIVAL.index.tolist(), dtype=str )
surv_barcodes = np.array([ x+"_"+y for x,y in tissue_barcodes])
NEW_SURVIVAL = pd.DataFrame( ALL_SURVIVAL.values, index =surv_barcodes, columns = ALL_SURVIVAL.columns )
NEW_SURVIVAL = NEW_SURVIVAL.loc[barcodes]
#clinical = data_store["/CLINICAL/data"].loc[barcodes]
Age = NEW_SURVIVAL[ "patient.days_to_birth" ].values.astype(int)
Times = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values.astype(int)+NEW_SURVIVAL[ "patient.days_to_death" ].fillna(0).values.astype(int)
Events = (1-np.isnan( NEW_SURVIVAL[ "patient.days_to_death" ].astype(float)) ).astype(int)
ok_age_query = Age<-10
ok_age = pp.find(ok_age_query )
tissues = tissues[ ok_age_query ]
#pdb.set_trace()
Age=-Age[ok_age]
Times = Times[ok_age]
Events = Events[ok_age]
barcodes = barcodes[ok_age]
NEW_SURVIVAL = NEW_SURVIVAL.loc[barcodes]
#ok_followup_query = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values>=0
#ok_followup = pp.find( ok_followup_query )
bad_followup_query = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values.astype(int)<0
bad_followup = pp.find( bad_followup_query )
ok_followup_query = 1-bad_followup_query
ok_followup = pp.find( ok_followup_query )
bad_death_query = NEW_SURVIVAL[ "patient.days_to_death" ].fillna(0).values.astype(int)<0
bad_death = pp.find( bad_death_query )
#pdb.set_trace()
Age=Age[ok_followup]
Times = Times[ok_followup]
Events = Events[ok_followup]
barcodes = barcodes[ok_followup]
NEW_SURVIVAL = NEW_SURVIVAL.loc[barcodes]
Z = Z.loc[barcodes]
Z["E"] = Events
Z["T"] = Times
Z["Age"] = np.log(Age)
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
tissue_names = tissues.columns
tissue_idx = np.argmax( tissues.values, 1 )
Z["Tissue"] = tissue_idx
n_tissues = len(tissue_names)
n_random = 100
random_names = ["r_%d"%(trial_idx) for trial_idx in range(n_random)]
alpha=0.02
nbr_to_plot = 5
concordance_values = {}
concordance_random = {}
concordance_z_values = pd.DataFrame( np.nan*np.ones((n_tissues,n_z) ), index = tissue_names, columns=z_names )
concordance_z_random = pd.DataFrame( np.nan*np.ones((n_tissues,n_random) ), index = tissue_names, columns=random_names )
concordance_z_values_xval = pd.DataFrame( np.nan*np.ones((n_tissues,n_z) ), index = tissue_names, columns=z_names )
concordance_I_values = pd.DataFrame( np.nan*np.ones((n_tissues,n_z) ), index = tissue_names, columns=z_names )
concordance_I_random = pd.DataFrame( np.nan*np.ones((n_tissues,n_random) ), index = tissue_names, columns=random_names )
concordance_z_p_values = pd.DataFrame( np.ones( (n_tissues,n_z) ), \
index = tissue_names, \
columns = z_names )
# cf = CoxPHFitter()
# scores = k_fold_cross_validation(cf, Z, 'T', event_col='E', k=5)
# pdb.set_trace()
split_nbr = 2
for t_idx in range(n_tissues):
t_ids = tissue_idx == t_idx
tissue_name = tissue_names[t_idx]
if tissue_name == "gbm":
print "skipping gbm"
continue
print "working %s"%(tissue_name)
bcs = barcodes[t_ids]
Z_tissue = Z.loc[ bcs ]
events = Z_tissue["E"]
times = Z_tissue["T"]
Z_values = Z_tissue[z_names].values
n_tissue = len(bcs)
print " using z_values"
for z_idx in range(n_z):
z = Z_values[:,z_idx]
z_data = Z_tissue[ ["z_%d"%(z_idx), "E","T"] ]
I = np.argsort(z)
z_concordance = lifelines.utils.concordance_index(times[I], z, event_observed=events[I])
z_concordance = max( z_concordance, 1.0-z_concordance )
concordance_z_values["z_%d"%(z_idx)].loc[tissue_name] = z_concordance
print " using random"
for r_idx in range(n_random):
#z = Z_values[:,z_idx]
z = np.random.randn(n_tissue)
I = np.argsort(z) #np.random.permutation(n_tissue)
z_concordance = lifelines.utils.concordance_index(times[I], z, event_observed=events[I])
z_concordance = max( z_concordance, 1.0-z_concordance )
concordance_z_random["r_%d"%(r_idx)].loc[tissue_name] = z_concordance
v = concordance_z_values.loc[tissue_name].values
r = concordance_z_random.loc[tissue_name].values
concordance_z_p_values.loc[tissue_name] = (1.0 + (v[:,np.newaxis]>r).sum(1))/(1.0+len(r))
conc=concordance_z_p_values.loc[tissue_name]
sig = (concordance_z_p_values.loc[tissue_name] < alpha ).astype(int)
z_sig_names = sig[ sig==1 ].index.values
for z_name in z_sig_names:
z_idx = int( z_name.split("_")[1] )
z = Z_values[:,z_idx]
#z_data = Z_tissue[ ["z_%d"%(z_idx), "E","T"] ]
I = np.argsort(z)
cum_events = events[I].cumsum()
I_splits = [] #[[],[]] #np.array_split( I, split_nbr )
I_splits.append( pp.find( cum_events <= events.sum()/2.0 ) )
I_splits.append( pp.find( cum_events > events.sum()/2.0 ) )
#groups = np.zeros(n_tissue)
# k = 1
# for splits in I_splits[1:]:
# groups[splits] = k; k+=1
results = logrank_test(times[I_splits[0]], times[I_splits[-1]], events[ I_splits[0] ], events[ I_splits[-1] ] )
p_value = results.p_value
#results2 = logrank_test(times[I_splits[0]]/365.0, times[I_splits[-1]]/365.0, events[ I_splits[0] ], events[ I_splits[-1] ] )
#pdb.set_trace()
c = conc[ z_name ]
f = pp.figure()
ax= f.add_subplot(111)
kmf = KaplanMeierFitter()
k=0
for splits in I_splits:
kmf.fit(times[splits], event_observed=events[splits], label="q=%d/%d"%(k+1,split_nbr) )
ax=kmf.plot(ax=ax,at_risk_counts=False,show_censors=True,ci_show=False)
k+=1
pp.ylim(0,1)
pp.title( "%s %s p-value = %0.4f concordance = %0.3f "%( tissue_name, z_name, p_value, c ) )
pp.savefig( survival_curves_dir + "/%s_%s_p%0.5f_c%0.3f.png"%(tissue_name, z_name, p_value, c), format="png", dpi=300)
pp.savefig( survival_curves_dir + "/%s_%s_p%0.5f_c%0.3f.png"%(z_name, tissue_name, p_value, c), format="png", dpi=300)
#pdb.set_trace()
concordance_z_random.drop("gbm",inplace=True)
concordance_z_values.drop("gbm",inplace=True)
concordance_z_p_values.drop("gbm",inplace=True)
# concordance_z_p_values = pd.DataFrame( np.ones( concordance_z_values.values.shape), \
# index = concordance_z_values.index, \
# columns = concordance_z_values.columns )
# for tissue in concordance_z_random.index.values:
# v = concordance_z_values.loc[tissue].values
# r = concordance_z_random.loc[tissue].values
# concordance_z_p_values.loc[tissue] = (1.0 + (v[:,np.newaxis]>r).sum(1))/(1.0+len(r))
concordance_z_p_values.to_csv( save_dir + "/concordance_z_p_values.csv" )
concordance_z_random.to_csv( save_dir + "/concordance_z_random.csv" )
concordance_z_values.to_csv( save_dir + "/concordance_z_values.csv" )
#pdb.set_trace()
f = pp.figure()
ax_z = f.add_subplot(221)
ax_log_z = f.add_subplot(223)
ax_p = f.add_subplot(222)
ax_log_p = f.add_subplot(224)
bins_conc=np.linspace(0.5,1,21)
bins_p=np.linspace(0.0,1,21)
ax_z.hist( concordance_z_values.values.flatten(), bins=bins_conc, normed=True, histtype="step", lw=2, log=False)
ax_z.hist( concordance_z_random.values.flatten(), bins=bins_conc, normed=True, histtype="step", lw=2, log=False)
ax_log_z.hist( concordance_z_values.values.flatten(), bins=bins_conc, normed=True, histtype="step", lw=2, log=True)
ax_log_z.hist( concordance_z_random.values.flatten(), bins=bins_conc, normed=True, histtype="step", lw=2, log=True)
ax_p.hist( concordance_z_p_values.values.flatten(), bins=bins_p, normed=True, histtype="step", lw=2, log=False)
ax_log_p.hist( concordance_z_p_values.values.flatten(), bins=bins_p, normed=True, histtype="step", lw=2, log=True)
pp.savefig( save_dir + "/p_values.png", format="png", dpi=300)
return concordance_z_random, concordance_z_values, concordance_z_p_values
#, concordance_z_p_values_xval
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
concordance_z_random, concordance_z_values, concordance_z_p_values = main( data_location, results_location )
| 41.930233 | 376 | 0.68839 | 1,667 | 10,818 | 4.170366 | 0.15117 | 0.070771 | 0.038838 | 0.038262 | 0.50561 | 0.44275 | 0.384494 | 0.321922 | 0.281789 | 0.270282 | 0 | 0.014281 | 0.177944 | 10,818 | 258 | 377 | 41.930233 | 0.767458 | 0.155574 | 0 | 0.106509 | 0 | 0 | 0.079213 | 0.037135 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.071006 | null | null | 0.047337 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da46e9da88a69412b02b4465644d5259231c811b | 274 | py | Python | genaperiodic.py | gmayankcse15/Poller-and-Deferrable-Server | 9c4c5cae670c6e97959570592135b5c22bdfa4f7 | [
"MIT"
] | null | null | null | genaperiodic.py | gmayankcse15/Poller-and-Deferrable-Server | 9c4c5cae670c6e97959570592135b5c22bdfa4f7 | [
"MIT"
] | null | null | null | genaperiodic.py | gmayankcse15/Poller-and-Deferrable-Server | 9c4c5cae670c6e97959570592135b5c22bdfa4f7 | [
"MIT"
] | null | null | null | import numpy as np
arr_time = np.random.exponential(3, 3)
print(arr_time)
Exec_Time = np.random.exponential(1, 3)
print(Exec_Time)
for i in range(0,3):
print "A(",round(arr_time[i],1),",",round(Exec_Time[i],1),")"
'''
Output
A(0.3, 1.1)
A(2.5, 2.0)
A(0.0, 4.7)
''' | 13.7 | 63 | 0.627737 | 58 | 274 | 2.862069 | 0.413793 | 0.126506 | 0.144578 | 0.277108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.084746 | 0.138686 | 274 | 20 | 64 | 13.7 | 0.618644 | 0 | 0 | 0 | 0 | 0 | 0.0181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.142857 | null | null | 0.428571 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 3 |
da4804a69488cffc401e2be47069232bee7d172a | 4,828 | py | Python | train.py | yazar1993/TextBoxes-mxnet | 89fbf4151473ab4575a032871683e76978deec0a | [
"MIT"
] | 1 | 2019-02-04T19:03:27.000Z | 2019-02-04T19:03:27.000Z | train.py | yazar1993/TextBoxes-mxnet | 89fbf4151473ab4575a032871683e76978deec0a | [
"MIT"
] | null | null | null | train.py | yazar1993/TextBoxes-mxnet | 89fbf4151473ab4575a032871683e76978deec0a | [
"MIT"
] | null | null | null | import time
from matplotlib import pyplot as plt
import numpy as np
import mxnet as mx
from mxnet import autograd, gluon
import gluoncv as gcv
from gluoncv.utils import download, viz
from model import model_zoo
import argparse
def get_dataloader(net, train_dataset, data_shape, batch_size, num_workers):
from gluoncv.data.batchify import Tuple, Stack, Pad
from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform
width, height = data_shape, data_shape
with autograd.train_mode():
_, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
batchify_fn = Tuple(Stack(), Stack(), Stack()) # stack image, cls_targets, box_targets
train_loader = gluon.data.DataLoader(
train_dataset.transform(SSDDefaultTrainTransform(width, height, anchors)),
batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
return train_loader
parser = argparse.ArgumentParser()
parser.add_argument('--images_root',type=str,help='root folder of images')
parser.add_argument('--LSTpath', type=str, help= 'path to LST file')
parser.add_argument('--batch_size', default = 16, type=int)
parser.add_argument('--num_epochs', default = 100, type=int)
parser.add_argument('--lr', type=float, default = 0.001, help='learning rate')
parser.add_argument('--wd', type=float, default = 0.0005)
parser.add_argument('--momentum',type=float,default = 0.9)
parser.add_argument('--netName', type=str, help='name of network to train')
parser.add_argument('--gpu_ind', type=str, help='comma seperated gpu indicies', default = '0')
parser.add_argument('--finetune_model',type=str, help='path to model to finetune from', default = '')
args = parser.parse_args()
images_root = args.images_root
LSTpath = args.LSTpath
classes = ['text']
batch_size = args.batch_size
num_epochs = args.num_epochs
lr = args.lr
wd = args.wd
momentum = args.momentum
netName = args.netName
gpu_ind=args.gpu_ind
path_to_model = args.finetune_model
# load dataset from Lst file
dataset = gcv.data.LstDetection(LSTpath, root=images_root)
print(dataset)
image= dataset[0][0]
label = dataset[0][1]
print('label:', label)
# display image and label
ax = viz.plot_bbox(image, bboxes=label[:, :4], labels=label[:, 4:5], class_names=classes)
plt.savefig('labeled_image.jpg')
#initalize model
net, input_size = model_zoo.get_model(netName, pretrained=False, classes=classes)
if finetune_model == '':
net.initialize()
net.reset_class(classes)
else:
net.load_parameters(path_to_model)
net.reset_class(classes)
print(net)
train_data = get_dataloader(net, dataset, input_size, batch_size, 0)
#############################################################################################
# Try use GPU for training
try:
gpu_ind = gpu_ind.split(',')
ctx = []
for cur_gpu in gpu_ind:
cur_gpu = int(cur_gpu)
a = mx.nd.zeros((1,), ctx=mx.gpu(cur_gpu))
ctx.append(mx.gpu(cur_gpu))
print('gpu mode is used')
except:
print('cpu mode is used')
ctx = [mx.cpu()]
#############################################################################################
# Start training
net.collect_params().reset_ctx(ctx)
trainer = gluon.Trainer(
net.collect_params(), 'sgd',
{'learning_rate': lr, 'wd': wd, 'momentum': momentum})
mbox_loss = gcv.loss.SSDMultiBoxLoss()
ce_metric = mx.metric.Loss('CrossEntropy')
smoothl1_metric = mx.metric.Loss('SmoothL1')
for epoch in range(0, num_epochs):
ce_metric.reset()
smoothl1_metric.reset()
tic = time.time()
btic = time.time()
net.hybridize(static_alloc=True, static_shape=True)
for i, batch in enumerate(train_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
cls_targets = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
box_targets = gluon.utils.split_and_load(batch[2], ctx_list=ctx, batch_axis=0)
with autograd.record():
cls_preds = []
box_preds = []
for x in data:
cls_pred, box_pred, _ = net(x)
cls_preds.append(cls_pred)
box_preds.append(box_pred)
sum_loss, cls_loss, box_loss = mbox_loss(
cls_preds, box_preds, cls_targets, box_targets)
autograd.backward(sum_loss)
trainer.step(1)
ce_metric.update(0, [l * batch_size for l in cls_loss])
smoothl1_metric.update(0, [l * batch_size for l in box_loss])
name1, loss1 = ce_metric.get()
name2, loss2 = smoothl1_metric.get()
if i % 20 == 0:
print('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}={:.3f}, {}={:.3f}'.format(
epoch, i, batch_size/(time.time()-btic), name1, loss1, name2, loss2))
btic = time.time()
net.save_parameters(netName + '_icdar2013.params')
| 37.426357 | 101 | 0.665493 | 673 | 4,828 | 4.579495 | 0.283804 | 0.029202 | 0.055159 | 0.016548 | 0.095717 | 0.069111 | 0.040883 | 0.018819 | 0.018819 | 0 | 0 | 0.015178 | 0.167564 | 4,828 | 128 | 102 | 37.71875 | 0.75168 | 0.029619 | 0 | 0.037383 | 0 | 0 | 0.095948 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009346 | false | 0 | 0.102804 | 0 | 0.121495 | 0.056075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da48925dd8d85e25b1591c7ad7324c1b91372e21 | 484 | py | Python | aws/build_saint_features.py | fabien-vavrand/kaggle-riiid | 3302955980e0d4bb2dbc72bcd369000b0724f1e7 | [
"MIT"
] | null | null | null | aws/build_saint_features.py | fabien-vavrand/kaggle-riiid | 3302955980e0d4bb2dbc72bcd369000b0724f1e7 | [
"MIT"
] | null | null | null | aws/build_saint_features.py | fabien-vavrand/kaggle-riiid | 3302955980e0d4bb2dbc72bcd369000b0724f1e7 | [
"MIT"
] | null | null | null | from doppel import DoppelProject
from riiid.utils import configure_console_logging
from riiid.config import SRC_PATH
from riiid.aws.config import CONTEXT, PACKAGES
configure_console_logging()
project = DoppelProject(
name='riiid-saint-features',
path=SRC_PATH,
entry_point='-m riiid.aws.build_saint_features',
packages=PACKAGES,
python='3.7.6',
n_instances=1,
min_memory=128,
env_vars={'PYTHONHASHSEED': '1'},
context=CONTEXT
)
project.start()
| 22 | 52 | 0.743802 | 64 | 484 | 5.4375 | 0.578125 | 0.077586 | 0.132184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019512 | 0.152893 | 484 | 21 | 53 | 23.047619 | 0.829268 | 0 | 0 | 0 | 0 | 0 | 0.150826 | 0.061983 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.235294 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da48982c5d0c6163ec2e9654c124d812f977e516 | 14,174 | py | Python | effects/card_draw.py | MrCoft/EngiMod | 65c90bd9231ac388d8af7849a1835914f1eefc78 | [
"MIT"
] | null | null | null | effects/card_draw.py | MrCoft/EngiMod | 65c90bd9231ac388d8af7849a1835914f1eefc78 | [
"MIT"
] | null | null | null | effects/card_draw.py | MrCoft/EngiMod | 65c90bd9231ac388d8af7849a1835914f1eefc78 | [
"MIT"
] | null | null | null | from engi_mod import *
Java(
path = "fruitymod.actions.ChooseCardActionBase",
base = "AbstractGameAction",
code = """
protected AbstractPlayer p;
private String text;
private boolean isDraw = false; // NOTE: to trigger events and disable on "No Draw"
private boolean putsIntoHand = true; // NOTE: to make decisions if hand is full
private boolean random = false; // NOTE: skip selection
public ChooseCardActionBase(final AbstractCreature source, int amount, String text, boolean isDraw, boolean putsIntoHand, boolean random) {
this.isDraw = isDraw;
if (isDraw && AbstractDungeon.player.hasPower("No Draw")) {
// NOTE: "put into hand" is not draw
AbstractDungeon.player.getPower("No Draw").flash();
setValues(AbstractDungeon.player, source, amount);
isDone = true;
duration = 0.0f;
actionType = ActionType.WAIT;
return;
}
if (isDraw)
putsIntoHand = true;
p = AbstractDungeon.player;
this.text = text;
this.putsIntoHand = putsIntoHand;
this.random = random;
setValues(null, source, amount);
actionType = ActionType.CARD_MANIPULATION;
if (isDraw)
actionType = ActionType.DRAW;
duration = Settings.ACTION_DUR_FASTER;
}
private CardGroup cards;
private HashMap<AbstractCard,AbstractCard> handMap;
@Override
public void update() {
if (AbstractDungeon.getCurrRoom().isBattleEnding()) {
isDone = true;
return;
}
if (duration == Settings.ACTION_DUR_FASTER) {
cards = new CardGroup(CardGroup.CardGroupType.UNSPECIFIED);
handMap = new HashMap<AbstractCard,AbstractCard>();
CardGroup[] groups = new CardGroup[]{p.drawPile, p.hand, p.discardPile, p.exhaustPile};
for (CardGroup group : groups) {
for (AbstractCard card : group.group) {
if (cardFilter(card)) {
if (group == p.hand) {
AbstractCard copy = card.makeStatEquivalentCopy();
handMap.put(copy, card);
card = copy;
}
cards.addToTop(card);
card.stopGlowing();
card.unhover();
card.unfadeOut();
}
}
if (group == p.drawPile)
cards.shuffle();
}
init();
if (cards.isEmpty()) {
isDone = true;
finish();
return;
}
if (random) {
cards.shuffle(); // NOTE: random order even if instant
if (cards.size() > amount)
cards.group.subList(amount, cards.size()).clear();
isDone = true;
for (AbstractCard card : cards.group) {
card = handMap.getOrDefault(card, card);
cardChosen(card);
if (isDraw)
SpireUtils.drawTriggers(card);
}
finish();
return;
}
if (cards.size() <= amount && (!putsIntoHand || (putsIntoHand && cards.size() <= 10 - p.hand.size()))) {
isDone = true;
for (AbstractCard card : cards.group) {
card = handMap.getOrDefault(card, card);
cardChosen(card);
if (isDraw)
SpireUtils.drawTriggers(card);
}
finish();
return;
} else {
AbstractDungeon.gridSelectScreen.open(cards, amount, text, false, false, false, false);
tickDuration();
return;
}
}
if (!AbstractDungeon.gridSelectScreen.selectedCards.isEmpty()) {
for (AbstractCard card : AbstractDungeon.gridSelectScreen.selectedCards) {
card = handMap.getOrDefault(card, card);
cardChosen(card);
if (isDraw)
SpireUtils.drawTriggers(card);
}
for (AbstractCard card : cards.group) {
card = handMap.getOrDefault(card, card);
card.unhover();
card.untip(); // NOTE: after duplicating a card, the original is drawn still showing the tooltip
}
AbstractDungeon.gridSelectScreen.selectedCards.clear();
finish();
}
tickDuration();
}
public boolean cardFilter(AbstractCard card) {
return true;
}
public void cardChosen(AbstractCard card) {}
protected CardDraw cardDraw;
public void init() {
cardDraw = new CardDraw();
}
public void finish() {
p.hand.refreshHandLayout();
cardDraw.msg();
}
"""
)
Java(
# NOTE: PRIORITY
# to upgrade and draw cards, you look through the deck,
# picking unupgraded cards first
# NOTE: SHUFFLE
# if there are less than `amount` unupgraded cards,
# you draw upgraded cards off the top
# this special seeking only sees to the bottom of the deck
# which is shuffled when empty, so drawing with an empty deck is worse
# than with a full one, as it has more prioritized cards to find
# NOTE: FULL HAND
# when the hand is full, the extra cards aren't drawn,
# but they can be selected, so the full amount is upgraded
# even if it isn't drawn
# NOTE: REPEATED UPGRADES
# if being upgradable gives a card priority, repeatedly upgradable cards
# could be found as the first prioritized card repeatedly
# e.g. Battle Trance
path = "fruitymod.actions.DrawCardActionBase",
base = "AbstractGameAction",
code = """
private boolean shuffleCheck;
private static final Logger logger;
public DrawCardActionBase(final AbstractCreature source, final int amount, final boolean endTurnDraw) {
this.shuffleCheck = false;
if (endTurnDraw) {
AbstractDungeon.topLevelEffects.add(new PlayerTurnEffect());
}
else if (AbstractDungeon.player.hasPower("No Draw")) {
AbstractDungeon.player.getPower("No Draw").flash();
this.setValues(AbstractDungeon.player, source, amount);
this.duration = 0.0f;
this.actionType = ActionType.WAIT;
return;
}
this.setValues(AbstractDungeon.player, source, amount);
this.actionType = ActionType.DRAW;
if (Settings.FAST_MODE) {
this.duration = Settings.ACTION_DUR_XFAST;
}
else {
this.duration = Settings.ACTION_DUR_FASTER;
}
}
public DrawCardActionBase(final AbstractCreature source, final int amount) {
this(source, amount, false);
}
@Override
public void update() {
if (this.actionType == ActionType.WAIT) {
this.isDone = true;
finish();
return;
}
if (this.amount <= 0) {
this.isDone = true;
return;
}
final int deckSize = AbstractDungeon.player.drawPile.size();
final int discardSize = AbstractDungeon.player.discardPile.size();
if (SoulGroup.isActive()) {
return;
}
if (deckSize + discardSize == 0) {
this.isDone = true;
return;
}
if (AbstractDungeon.player.hand.size() == 10) {
finish();
AbstractDungeon.player.createHandIsFullDialog();
this.isDone = true;
return;
}
if (!this.shuffleCheck) {
if (this.amount > deckSize) {
final int tmp = this.amount - deckSize;
AbstractDungeon.actionManager.addToTop(new DrawCardActionBase(AbstractDungeon.player, tmp));
AbstractDungeon.actionManager.addToTop(new EmptyDeckShuffleAction());
if (deckSize != 0) {
AbstractDungeon.actionManager.addToTop(new DrawCardActionBase(AbstractDungeon.player, deckSize));
}
this.amount = 0;
this.isDone = true;
}
this.shuffleCheck = true;
}
this.duration -= Gdx.graphics.getDeltaTime();
if (this.amount != 0 && this.duration < 0.0f) {
if (Settings.FAST_MODE) {
this.duration = Settings.ACTION_DUR_XFAST;
}
else {
this.duration = Settings.ACTION_DUR_FASTER;
}
--this.amount;
if (!AbstractDungeon.player.drawPile.isEmpty()) {
AbstractCard card = findCard();
AbstractDungeon.player.drawPile.group.remove(card);
AbstractDungeon.player.drawPile.addToTop(card);
onSelect(card);
AbstractDungeon.player.draw();
AbstractDungeon.player.hand.refreshHandLayout();
onDraw(card);
}
else {
DrawCardActionBase.logger.warn("Player attempted to draw from an empty drawpile mid-DrawAction?MASTER DECK: " + AbstractDungeon.player.masterDeck.getCardNames());
this.isDone = true;
}
if (this.amount == 0) {
this.isDone = true;
finish();
}
}
}
public AbstractCard findCard() {
if (AbstractDungeon.player.drawPile.isEmpty()) {
return null;
}
for (int i = 0; i < AbstractDungeon.player.drawPile.size(); ++i) {
AbstractCard card = AbstractDungeon.player.drawPile.getNCardFromTop(i);
if (cardPriority(card)) {
return card;
}
}
return AbstractDungeon.player.drawPile.getTopCard();
}
public void finish() {
for (int i = 0; i < amount; ++i) {
AbstractCard card = findCard();
if (card == null)
return;
onSelect(card);
}
}
public boolean cardPriority(AbstractCard card) { return false; }
public void onSelect(AbstractCard card) {}
public void onDraw(AbstractCard card) {}
static {
logger = LogManager.getLogger(DrawCardActionBase.class.getName());
}
"""
)
Java(
path = "fruitymod.actions.DrawSpecificCardAction",
base = "AbstractGameAction",
code = """
public DrawCardActionBase drawAction;
public AbstractCard card;
public DrawSpecificCardAction(final AbstractCreature source) {
if (AbstractDungeon.player.hasPower("No Draw")) {
AbstractDungeon.player.getPower("No Draw").flash();
this.setValues(AbstractDungeon.player, source, amount);
this.duration = 0.0f;
this.actionType = ActionType.WAIT;
return;
}
this.setValues(AbstractDungeon.player, source, 1);
this.actionType = ActionType.DRAW;
if (Settings.FAST_MODE) {
this.duration = Settings.ACTION_DUR_XFAST;
}
else {
this.duration = Settings.ACTION_DUR_FASTER;
}
}
@Override
public void update() {
if (this.actionType == ActionType.WAIT) {
drawAction.onSelect(card);
this.isDone = true;
return;
}
if (AbstractDungeon.player.hand.size() == 10) {
drawAction.onSelect(card);
AbstractDungeon.player.createHandIsFullDialog();
this.isDone = true;
return;
}
this.duration -= Gdx.graphics.getDeltaTime();
if (this.amount != 0 && this.duration < 0.0f) {
AbstractDungeon.player.drawPile.group.remove(card);
AbstractDungeon.player.drawPile.addToTop(card);
drawAction.onSelect(card);
AbstractDungeon.player.draw();
AbstractDungeon.player.hand.refreshHandLayout();
drawAction.onDraw(card);
this.isDone = true;
}
}
"""
)
Java(
path = "fruitymod.CardDraw",
code = """
private boolean failed = false;
public AbstractCard get(AbstractCard card) {
if (AbstractDungeon.player.hand.size() == 10) {
failed = true;
return null;
}
card.unfadeOut();
card.unhover();
card.fadingOut = false;
return card;
}
public void msg() {
if (failed)
AbstractDungeon.player.createHandIsFullDialog();
}
"""
)
# problem - because of animations, shuffling is solved by parting the problem into "draw deck / shuffle / draw rest"
# find the cards first, then push their specific
# continue doing it until empty, then push reshuffle, then a revive?
# i hate this code so much
| 38.832877 | 182 | 0.506632 | 1,146 | 14,174 | 6.247819 | 0.22164 | 0.105587 | 0.019553 | 0.027933 | 0.382682 | 0.334358 | 0.326257 | 0.272346 | 0.25419 | 0.217039 | 0 | 0.003319 | 0.404826 | 14,174 | 364 | 183 | 38.93956 | 0.845424 | 0.0678 | 0 | 0.439628 | 0 | 0.01548 | 0.975286 | 0.243348 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.003096 | 0 | 0.021672 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
da49b2e919a71a34082302973c4047cd68a8918b | 2,205 | py | Python | data_structures.py | davecom/MazeSolvingGUI | 291e0dfb146d7743ecb108413f5e7422e0719019 | [
"Apache-2.0"
] | 6 | 2020-06-26T00:45:35.000Z | 2022-01-15T19:37:36.000Z | data_structures.py | davecom/MazeSolvingGUI | 291e0dfb146d7743ecb108413f5e7422e0719019 | [
"Apache-2.0"
] | null | null | null | data_structures.py | davecom/MazeSolvingGUI | 291e0dfb146d7743ecb108413f5e7422e0719019 | [
"Apache-2.0"
] | 3 | 2021-05-03T16:48:29.000Z | 2021-11-19T20:21:29.000Z | # data_structures.py
# Copyright 2020 David Kopec
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TypeVar, Generic, List, Deque, Optional
T = TypeVar('T')
class Stack(Generic[T]):
def __init__(self) -> None:
self.container: List[T] = []
@property
def empty(self) -> bool:
return not self.container # not is true for empty container
def push(self, item: T) -> None:
self.container.append(item)
def pop(self) -> T:
return self.container.pop() # LIFO
def __repr__(self) -> str:
return repr(self.container)
class Node(Generic[T]):
def __init__(self, state: T, parent: Optional[Node], cost: float = 0.0, heuristic: float = 0.0) -> None:
self.state: T = state
self.parent: Optional[Node] = parent
self.cost: float = cost
self.heuristic: float = heuristic
def __lt__(self, other: Node) -> bool:
return (self.cost + self.heuristic) < (other.cost + other.heuristic)
def node_to_path(node: Node[T]) -> List[T]:
path: List[T] = [node.state]
# work backwards from end to front
while node.parent is not None:
node = node.parent
path.append(node.state)
path.reverse()
return path
class Queue(Generic[T]):
def __init__(self) -> None:
self.container: Deque[T] = Deque()
@property
def empty(self) -> bool:
return not self.container # not is true for empty container
def push(self, item: T) -> None:
self.container.append(item)
def pop(self) -> T:
return self.container.popleft() # FIFO
def __repr__(self) -> str:
return repr(self.container) | 29.4 | 108 | 0.65805 | 307 | 2,205 | 4.625407 | 0.371336 | 0.091549 | 0.047887 | 0.03169 | 0.320423 | 0.307042 | 0.307042 | 0.307042 | 0.204225 | 0.204225 | 0 | 0.007092 | 0.232653 | 2,205 | 75 | 109 | 29.4 | 0.832151 | 0.303855 | 0 | 0.428571 | 0 | 0 | 0.00066 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.309524 | false | 0 | 0.047619 | 0.166667 | 0.619048 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 2 |
da4a7e68c0832aca421b9ec0a6a9d00a1f584040 | 1,933 | py | Python | src/sentry/incidents/endpoints/organization_alert_rule_trigger_details.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | src/sentry/incidents/endpoints/organization_alert_rule_trigger_details.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/incidents/endpoints/organization_alert_rule_trigger_details.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from rest_framework import status
from rest_framework.response import Response
from sentry.api.serializers import serialize
from sentry.api.serializers.models.alert_rule_trigger import DetailedAlertRuleTriggerSerializer
from sentry.incidents.endpoints.bases import OrganizationAlertRuleTriggerEndpoint
from sentry.incidents.endpoints.serializers import AlertRuleTriggerSerializer
from sentry.incidents.logic import AlreadyDeletedError, delete_alert_rule_trigger
class OrganizationAlertRuleTriggerDetailsEndpoint(OrganizationAlertRuleTriggerEndpoint):
def get(self, request, organization, alert_rule, alert_rule_trigger):
"""
Fetch an alert rule trigger.
``````````````````
:auth: required
"""
data = serialize(alert_rule_trigger, request.user, DetailedAlertRuleTriggerSerializer())
return Response(data)
def put(self, request, organization, alert_rule, alert_rule_trigger):
serializer = AlertRuleTriggerSerializer(
context={
"organization": organization,
"alert_rule": alert_rule,
"access": request.access,
},
instance=alert_rule_trigger,
data=request.data,
partial=True,
)
if serializer.is_valid():
trigger = serializer.save()
return Response(serialize(trigger, request.user), status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, organization, alert_rule, alert_rule_trigger):
try:
delete_alert_rule_trigger(alert_rule_trigger)
return Response(status=status.HTTP_204_NO_CONTENT)
except AlreadyDeletedError:
return Response(
"This trigger has already been deleted", status=status.HTTP_400_BAD_REQUEST
)
| 39.44898 | 96 | 0.702535 | 188 | 1,933 | 6.989362 | 0.351064 | 0.10274 | 0.121766 | 0.079148 | 0.17656 | 0.153729 | 0.109589 | 0.109589 | 0 | 0 | 0 | 0.008 | 0.224004 | 1,933 | 48 | 97 | 40.270833 | 0.868 | 0.032592 | 0 | 0 | 0 | 0 | 0.035519 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.228571 | 0 | 0.485714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da4b5460725c138c9033a42b82f19db166cfb84d | 1,935 | py | Python | tests/test_one_of_schema.py | bwind/sticky-marshmallow | e3e7c215fe9b221164c17121197ffca1b396e81a | [
"MIT"
] | 2 | 2019-11-28T11:25:47.000Z | 2019-12-05T09:53:31.000Z | tests/test_one_of_schema.py | bwind/sticky-marshmallow | e3e7c215fe9b221164c17121197ffca1b396e81a | [
"MIT"
] | null | null | null | tests/test_one_of_schema.py | bwind/sticky-marshmallow | e3e7c215fe9b221164c17121197ffca1b396e81a | [
"MIT"
] | 1 | 2019-11-29T15:32:14.000Z | 2019-11-29T15:32:14.000Z | from dataclasses import dataclass
from typing import List
import bson
from sticky_marshmallow import Repository
from marshmallow_oneofschema import OneOfSchema
from marshmallow import fields, post_load, Schema
from tests.db import connect
@dataclass
class Foo:
id: str
foo: str
@dataclass
class A(Foo):
bar: str
@dataclass
class B(Foo):
baz: str
class BaseSchema(Schema):
id = fields.Str(allow_none=True)
foo = fields.Str()
class ASchema(BaseSchema):
bar = fields.Str()
@post_load
def make_object(self, data, **kwargs):
return A(**data)
class BSchema(BaseSchema):
baz = fields.Str()
class FooSchema(OneOfSchema):
type_schemas = {"a": ASchema, "b": BSchema}
def get_obj_type(self, obj):
return obj.__class__.__name__.lower()
class FooRepository(Repository):
class Meta:
schema = FooSchema
@dataclass
class Master:
foos: List[Foo]
class MasterSchema(Schema):
foos = fields.Nested(FooSchema, many=True)
class MasterRepository(Repository):
class Meta:
schema = MasterSchema
class TestOneOfSchema:
def setup(self):
connect()
FooRepository().delete_many()
MasterRepository().delete_many()
def teardown(self):
FooRepository().delete_many()
MasterRepository().delete_many()
def test_collection_name(self):
assert FooRepository().collection.name == "foo"
def test_saves_reference(self):
a = A(id=None, foo="x", bar="y")
master = Master(foos=[a])
MasterRepository().save(master)
assert isinstance(
MasterRepository().collection.find_one()["foos"][0], bson.ObjectId
)
assert FooRepository().collection.find_one()
def test_dereferences(self):
a = A(id=None, foo="x", bar="y")
master = Master(foos=[a])
MasterRepository().save(master)
MasterRepository().get()
| 19.744898 | 78 | 0.657364 | 222 | 1,935 | 5.603604 | 0.324324 | 0.045016 | 0.027331 | 0.040193 | 0.184887 | 0.184887 | 0.184887 | 0.101286 | 0.101286 | 0.101286 | 0 | 0.00067 | 0.228941 | 1,935 | 97 | 79 | 19.948454 | 0.83311 | 0 | 0 | 0.246154 | 0 | 0 | 0.006718 | 0 | 0 | 0 | 0 | 0 | 0.046154 | 1 | 0.107692 | false | 0 | 0.107692 | 0.030769 | 0.630769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |