hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
6d937f6a37d6c9c86d31b14c4ba6b2dbc1bfd9e0
199
py
Python
sqlcon.py
nuSapb/python-mssql-sync
66ae26a29f7412a036284f80525088b8452d4931
[ "MIT" ]
null
null
null
sqlcon.py
nuSapb/python-mssql-sync
66ae26a29f7412a036284f80525088b8452d4931
[ "MIT" ]
null
null
null
sqlcon.py
nuSapb/python-mssql-sync
66ae26a29f7412a036284f80525088b8452d4931
[ "MIT" ]
null
null
null
from sqlalchemy import create_engine import config def dbcon (user, pwd, host, dbname, driver): con = create_engine(f'mssql+pyodbc://{user}:{pwd}@{host}/{dbname}?driver={driver}') return con
33.166667
87
0.713568
28
199
5
0.642857
0.171429
0.157143
0.242857
0.328571
0
0
0
0
0
0
0
0.130653
199
6
88
33.166667
0.809249
0
0
0
0
0
0.295
0.295
0
0
0
0
0
1
0.2
false
0
0.4
0
0.8
0
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
6dbeff2571f5310f706416833ffaaa17c43b24ce
928
py
Python
saleor/product/migrations/0159_auto_20220209_1501.py
victor-abz/saleor
f8e2b49703d995d4304d5a690dbe9c83631419d0
[ "CC-BY-4.0" ]
1,392
2021-10-06T15:54:28.000Z
2022-03-31T20:50:55.000Z
saleor/product/migrations/0159_auto_20220209_1501.py
victor-abz/saleor
f8e2b49703d995d4304d5a690dbe9c83631419d0
[ "CC-BY-4.0" ]
888
2021-10-06T10:48:54.000Z
2022-03-31T11:00:30.000Z
saleor/product/migrations/0159_auto_20220209_1501.py
victor-abz/saleor
f8e2b49703d995d4304d5a690dbe9c83631419d0
[ "CC-BY-4.0" ]
538
2021-10-07T16:21:27.000Z
2022-03-31T22:58:57.000Z
# Generated by Django 3.2.12 on 2022-02-09 15:01 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("product", "0158_auto_20220120_1633"), ] operations = [ migrations.AddField( model_name="product", name="created", field=models.DateTimeField(db_index=True, null=True), ), migrations.AlterField( model_name="product", name="updated_at", field=models.DateTimeField(db_index=True, null=True), ), migrations.AddField( model_name="productvariant", name="created", field=models.DateTimeField(db_index=True, null=True), ), migrations.AddField( model_name="productvariant", name="updated_at", field=models.DateTimeField(db_index=True, null=True), ), ]
27.294118
65
0.577586
91
928
5.747253
0.428571
0.068834
0.183556
0.198853
0.596558
0.596558
0.596558
0.596558
0.596558
0.596558
0
0.049844
0.30819
928
33
66
28.121212
0.764798
0.049569
0
0.703704
1
0
0.120455
0.026136
0
0
0
0
0
1
0
false
0
0.037037
0
0.148148
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6dd19f14eac838f796e7d62ff2f498ce01ae2cae
64
py
Python
tests/__init__.py
metinsay/docluster
6b2474a4272b0e76002482a567e0679016d78188
[ "MIT" ]
1
2020-05-19T16:57:05.000Z
2020-05-19T16:57:05.000Z
tests/__init__.py
metinsay/docluster
6b2474a4272b0e76002482a567e0679016d78188
[ "MIT" ]
1
2017-08-14T15:33:10.000Z
2017-08-14T15:33:10.000Z
tests/__init__.py
metinsay/docluster
6b2474a4272b0e76002482a567e0679016d78188
[ "MIT" ]
2
2020-03-10T20:29:44.000Z
2021-04-21T21:54:58.000Z
# -*- coding: utf-8 -*- """Unit test package for docluster."""
16
38
0.578125
8
64
4.625
1
0
0
0
0
0
0
0
0
0
0
0.018868
0.171875
64
3
39
21.333333
0.679245
0.859375
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
09d027c70829798221fc3494c62c392016feee0d
275
py
Python
fractal/core/specifications/account_id_specification.py
douwevandermeij/fractal
66b04892b4d6fd8ee6a0c07b6e230f4321165085
[ "MIT" ]
2
2021-08-12T05:19:08.000Z
2022-01-29T16:22:37.000Z
fractal/core/specifications/account_id_specification.py
douwevandermeij/fractal
66b04892b4d6fd8ee6a0c07b6e230f4321165085
[ "MIT" ]
null
null
null
fractal/core/specifications/account_id_specification.py
douwevandermeij/fractal
66b04892b4d6fd8ee6a0c07b6e230f4321165085
[ "MIT" ]
null
null
null
from typing import Any from fractal.core.specifications.generic.operators import EqualsSpecification class AccountIdSpecification(EqualsSpecification): def __init__(self, account_id: Any): super(AccountIdSpecification, self).__init__("account_id", account_id)
30.555556
78
0.807273
29
275
7.275862
0.62069
0.127962
0
0
0
0
0
0
0
0
0
0
0.116364
275
8
79
34.375
0.868313
0
0
0
0
0
0.036364
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
09e2aeeef35e52a2ed2c928a88745f9647e9f32b
191
py
Python
src/pylenium/webelement/pylenium_element.py
symonk/pytest-pylenium
f01e786dd4be5eee03be4377ae8a83f4158246c4
[ "Apache-2.0" ]
10
2020-01-10T21:51:04.000Z
2020-06-12T06:59:27.000Z
src/pylenium/webelement/pylenium_element.py
JosephWardDotTech/pylenium
0603e5bd8c9642cef65039bc48f7a4ddbecfc8a7
[ "Apache-2.0" ]
14
2019-03-05T12:48:31.000Z
2019-11-25T22:55:36.000Z
src/pylenium/webelement/pylenium_element.py
JosephWardDotTech/pylenium
0603e5bd8c9642cef65039bc48f7a4ddbecfc8a7
[ "Apache-2.0" ]
2
2020-01-10T21:50:53.000Z
2021-06-28T23:07:41.000Z
from selenium.webdriver.remote.webelement import WebElement class PyleniumWebElement(WebElement): def __init__(self, parent, id_, w3c=False): super().__init__(parent, id_, w3c)
27.285714
59
0.748691
22
191
6.045455
0.727273
0.120301
0.165414
0
0
0
0
0
0
0
0
0.01227
0.146597
191
6
60
31.833333
0.803681
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
09fa995c9c3ff275efc0573256178a07f6879ac0
328
py
Python
BigGan/pytorch_pretrained_biggan/__init__.py
jybai/data-copying
da0dac491c7b0bb239629ff0cfaeecd342edf3cb
[ "MIT" ]
5
2020-08-05T18:07:54.000Z
2021-01-12T23:48:47.000Z
BigGan/pytorch_pretrained_biggan/__init__.py
jybai/data-copying
da0dac491c7b0bb239629ff0cfaeecd342edf3cb
[ "MIT" ]
1
2022-01-12T05:26:48.000Z
2022-01-12T05:26:48.000Z
BigGan/pytorch_pretrained_biggan/__init__.py
jybai/data-copying
da0dac491c7b0bb239629ff0cfaeecd342edf3cb
[ "MIT" ]
1
2021-03-02T03:25:24.000Z
2021-03-02T03:25:24.000Z
from .config import BigGANConfig from .model import BigGAN from .file_utils import PYTORCH_PRETRAINED_BIGGAN_CACHE, cached_path from .utils import (truncated_noise_sample, save_as_images, convert_to_images, display_in_terminal, one_hot_from_int, one_hot_from_names, get_imagenet_mapping)
46.857143
79
0.768293
44
328
5.25
0.704545
0.095238
0.08658
0
0
0
0
0
0
0
0
0
0.192073
328
6
80
54.666667
0.871698
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
61ee7b536827af3c6edae6f50b180f7c31466409
152
py
Python
lanes_basic.py
Rahul-Dwivedi-07/Autonomous-Vehicle-self-Driving-Car-Complete
5c62f4a4c0bdddbb6b0a6f058e86f3547b2f3d7b
[ "Apache-2.0" ]
null
null
null
lanes_basic.py
Rahul-Dwivedi-07/Autonomous-Vehicle-self-Driving-Car-Complete
5c62f4a4c0bdddbb6b0a6f058e86f3547b2f3d7b
[ "Apache-2.0" ]
null
null
null
lanes_basic.py
Rahul-Dwivedi-07/Autonomous-Vehicle-self-Driving-Car-Complete
5c62f4a4c0bdddbb6b0a6f058e86f3547b2f3d7b
[ "Apache-2.0" ]
null
null
null
import cv2 import numpy as np image = cv2.imread('test_image.jpg') lane_image = np.copy(image) cv2.imshow('result',lane_image) cv2.waitKey(0)
16.888889
37
0.710526
25
152
4.2
0.6
0.228571
0
0
0
0
0
0
0
0
0
0.039063
0.157895
152
8
38
19
0.78125
0
0
0
0
0
0.138889
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
1139ee72bd92aaa489f9b69fff66f1a8d2004b95
146
py
Python
src/davinci_crawling/task/__init__.py
intellstartup/django-davinci-crawling
638739855b63f02e318abf484d5daeab6c861e7a
[ "MIT" ]
null
null
null
src/davinci_crawling/task/__init__.py
intellstartup/django-davinci-crawling
638739855b63f02e318abf484d5daeab6c861e7a
[ "MIT" ]
null
null
null
src/davinci_crawling/task/__init__.py
intellstartup/django-davinci-crawling
638739855b63f02e318abf484d5daeab6c861e7a
[ "MIT" ]
1
2020-03-16T20:25:53.000Z
2020-03-16T20:25:53.000Z
# -*- coding: utf-8 -* # Copyright (c) 2019 BuildGroup Data Services Inc. default_app_config = "davinci_crawling.task.apps.DaVinciCrawlerConfig"
29.2
70
0.760274
18
146
6
1
0
0
0
0
0
0
0
0
0
0
0.03876
0.116438
146
4
71
36.5
0.79845
0.472603
0
0
0
0
0.635135
0.635135
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
1143a741ffd1c5b39d776e13a9d9ad267c267c33
73
py
Python
setup.py
mobiusklein/mokapot
5148da1c09cfc92f9b776ae8306619429d0656cb
[ "Apache-2.0" ]
14
2019-09-14T19:47:58.000Z
2021-07-25T21:10:50.000Z
setup.py
mobiusklein/mokapot
5148da1c09cfc92f9b776ae8306619429d0656cb
[ "Apache-2.0" ]
28
2020-09-29T19:38:29.000Z
2022-03-30T05:02:56.000Z
setup.py
mobiusklein/mokapot
5148da1c09cfc92f9b776ae8306619429d0656cb
[ "Apache-2.0" ]
6
2020-09-29T19:12:02.000Z
2022-03-18T17:39:16.000Z
""" Setup the mokapot package. """ import setuptools setuptools.setup()
10.428571
26
0.726027
8
73
6.625
0.75
0
0
0
0
0
0
0
0
0
0
0
0.136986
73
6
27
12.166667
0.84127
0.356164
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
1146e9a7e598d0257f98993f135fdd8e98275324
294
py
Python
part1/Test3.py
flyou/PythonLearn
b9d05847e8dfb919c06ae621e59503aec2f8aee0
[ "Apache-2.0" ]
null
null
null
part1/Test3.py
flyou/PythonLearn
b9d05847e8dfb919c06ae621e59503aec2f8aee0
[ "Apache-2.0" ]
null
null
null
part1/Test3.py
flyou/PythonLearn
b9d05847e8dfb919c06ae621e59503aec2f8aee0
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # -*- coding: UTF-8 -*- # -------------------------- # User flyou # Date 2017/2/21 21:07 # EMAIL fangjalylong@qq.com # Desc 字符串操作 # -------------------------- src="hello World" print src print src[0] print src[0:5] print src[:2] print src[2:] print src*2 print src+":hahaha"
16.333333
28
0.537415
44
294
3.590909
0.590909
0.35443
0.170886
0.265823
0.221519
0.221519
0.221519
0.221519
0
0
0
0.070866
0.136054
294
17
29
17.294118
0.551181
0.547619
0
0
0
0
0.144
0
0
0
0
0
0
0
null
null
0
0
null
null
0.875
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
4
fedbec8cd62c44e298052b9ae86559ebae0029ee
42,330
py
Python
acl18/modules.py
tzshi/mh4-parser-acl18
20234703d6a2824fb6f76d45499db0a6ec6e27fb
[ "MIT" ]
4
2018-08-22T13:54:38.000Z
2021-05-16T16:22:36.000Z
acl18/modules.py
tzshi/mh4-parser-acl18
20234703d6a2824fb6f76d45499db0a6ec6e27fb
[ "MIT" ]
null
null
null
acl18/modules.py
tzshi/mh4-parser-acl18
20234703d6a2824fb6f76d45499db0a6ec6e27fb
[ "MIT" ]
1
2019-05-19T16:59:20.000Z
2019-05-19T16:59:20.000Z
#!/usr/bin/env python # encoding: utf-8 from dynet import * from collections import Counter import numpy as np from . import pyximportcpp; pyximportcpp.install() from .calgorithm import parse_proj, parse_ah_dp_mst, parse_ae_dp_mst from .chu_liu_edmonds import chu_liu_edmonds from .mh4 import parse_mh4 from .mh4t import parse_mh4t, parse_mh4t_sh, mh4t_combine_scores from .ec import parse_1ec_o3 from .ahbeamconf import AHBeamConf from .aebeamconf import AEBeamConf from .mh4beamconf import MH4BeamConf from .layers import MultiLayerPerceptron, Dense, Bilinear, identity, BiaffineBatch class UPOSTagger: def __init__(self, parser, id="UPOSTagger", **kwargs): self._parser = parser self.id = id self._activations = {'tanh': tanh, 'sigmoid': logistic, 'relu': rectify, 'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))} self._bilstm_dims = kwargs.get("bilstm_dims", 128) self._utagger_mlp_activation = self._activations[kwargs.get('utagger_mlp_activation', 'relu')] self._utagger_mlp_dims = kwargs.get("utagger_mlp_dims", 128) self._utagger_mlp_layers = kwargs.get("utagger_mlp_layers", 2) self._utagger_mlp_dropout = kwargs.get("utagger_mlp_dropout", 0.0) self._utagger_discrim = kwargs.get("utagger_discrim", False) def init_params(self): self._utagger_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._utagger_mlp_dims] * self._utagger_mlp_layers, self._utagger_mlp_activation, self._parser._model) self._utagger_final = Dense(self._utagger_mlp_dims, len(self._parser._upos), identity, self._parser._model) def init_cg(self, train=False): if train: self._utagger_mlp.set_dropout(self._utagger_mlp_dropout) else: self._utagger_mlp.set_dropout(0.) def sent_loss(self, graph, carriers): ret = [] correct = 0 for node, c in zip(graph.nodes[1:], carriers[1:]): potentials = self._utagger_final(self._utagger_mlp(c.vec)) pred = np.argmax(potentials.value()) answer = self._parser._upos[node.upos] if (pred == answer): correct += 1 if self._utagger_discrim: potential_values = potentials.value() best_wrong = max([(i, val) for i, val in enumerate(potential_values) if i != answer], key=lambda x: x[1]) if best_wrong[1] + 1. > potential_values[answer]: ret.append((potentials[best_wrong[0]] - potentials[answer] + 1.)) else: ret.append(pickneglogsoftmax(potentials, answer)) return correct, ret def predict(self, graph, carriers): for node, c in zip(graph.nodes[1:], carriers[1:]): potentials = self._utagger_final(self._utagger_mlp(c.vec)) pred = np.argmax(potentials.value()) node.upos = self._parser._iupos[pred] return self class XPOSTagger: def __init__(self, parser, id="XPOSTagger", **kwargs): self._parser = parser self.id = id self._activations = {'tanh': tanh, 'sigmoid': logistic, 'relu': rectify, 'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))} self._bilstm_dims = kwargs.get("bilstm_dims", 128) self._xtagger_mlp_activation = self._activations[kwargs.get('xtagger_mlp_activation', 'relu')] self._xtagger_mlp_dims = kwargs.get("xtagger_mlp_dims", 128) self._xtagger_mlp_layers = kwargs.get("xtagger_mlp_layers", 2) self._xtagger_mlp_dropout = kwargs.get("xtagger_mlp_dropout", 0.0) self._xtagger_discrim = kwargs.get("xtagger_discrim", False) def init_params(self): self._xtagger_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._xtagger_mlp_dims] * self._xtagger_mlp_layers, self._xtagger_mlp_activation, self._parser._model) self._xtagger_final = Dense(self._xtagger_mlp_dims, len(self._parser._xpos), identity, self._parser._model) def init_cg(self, train=False): if train: self._xtagger_mlp.set_dropout(self._xtagger_mlp_dropout) else: self._xtagger_mlp.set_dropout(0.) def sent_loss(self, graph, carriers): ret = [] correct = 0 for node, c in zip(graph.nodes[1:], carriers[1:]): potentials = self._xtagger_final(self._xtagger_mlp(c.vec)) pred = np.argmax(potentials.value()) answer = self._parser._xpos[node.xupos] if (pred == answer): correct += 1 if self._xtagger_discrim: potential_values = potentials.value() best_wrong = max([(i, val) for i, val in enumerate(potential_values) if i != answer], key=lambda x: x[1]) if best_wrong[1] + 1. > potential_values[answer]: ret.append(potentials[best_wrong[0]] - potentials[answer] + 1.) else: ret.append(pickneglogsoftmax(potentials, answer)) return correct, ret def predict(self, graph, carriers): for node, c in zip(graph.nodes[1:], carriers[1:]): potentials = self._xtagger_final(self._xtagger_mlp(c.vec)) pred = np.argmax(potentials.value()) node.xpos = self._parser._ixpos[pred].split("|")[1] return self class MSTParser: def __init__(self, parser, id="MSTParser", **kwargs): self._parser = parser self.id = id self._activations = {'tanh': tanh, 'sigmoid': logistic, 'relu': rectify, 'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))} self._bilstm_dims = kwargs.get("bilstm_dims", 128) self._mst_mlp_activation = self._activations[kwargs.get('mst_mlp_activation', 'relu')] self._mst_mlp_dims = kwargs.get("mst_mlp_dims", 128) self._mst_mlp_layers = kwargs.get("mst_mlp_layers", 2) self._mst_mlp_dropout = kwargs.get("mst_mlp_dropout", 0.0) def init_params(self): self._mst_head_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._mst_mlp_dims] * self._mst_mlp_layers, self._mst_mlp_activation, self._parser._model) self._mst_mod_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._mst_mlp_dims] * self._mst_mlp_layers, self._mst_mlp_activation, self._parser._model) self._mst_bilinear = Bilinear(self._mst_mlp_dims, self._parser._model) self._mst_head_bias = Dense(self._mst_mlp_dims, 1, identity, self._parser._model) def init_cg(self, train=False): if train: self._mst_head_mlp.set_dropout(self._mst_mlp_dropout) self._mst_mod_mlp.set_dropout(self._mst_mlp_dropout) else: self._mst_head_mlp.set_dropout(0.) self._mst_mod_mlp.set_dropout(0.) def _mst_arcs_eval(self, carriers): head_vecs = [self._mst_head_mlp(c.vec) for c in carriers] mod_vecs = [self._mst_mod_mlp(c.vec) for c in carriers] head_vecs = concatenate(head_vecs, 1) mod_vecs = concatenate(mod_vecs, 1) exprs = colwise_add(self._mst_bilinear(head_vecs, mod_vecs), reshape(self._mst_head_bias(head_vecs), (len(carriers),))) scores = exprs.value() exprs = np.array([[exprs[i][j] for j in range(len(carriers))] for i in range(len(carriers))]) return scores, exprs def sent_loss(self, graph, carriers): gold_heads = graph.proj_heads scores, exprs = self._mst_arcs_eval(carriers) # Cost Augmentation for m, h in enumerate(gold_heads): scores[h, m] -= 1. heads = parse_proj(scores) correct = sum([1 for (h, g) in zip(heads[1:], gold_heads[1:]) if h == g]) loss = [exprs[int(h)][int(i)] - exprs[int(g)][int(i)] + 1. for i, (h, g) in enumerate(zip(heads, gold_heads)) if h != g] return correct, loss def predict(self, graph, carriers): scores, exprs = self._mst_arcs_eval(carriers) graph.heads = parse_proj(scores) return self class NPMSTParser: def __init__(self, parser, id="NPMSTParser", **kwargs): self._parser = parser self.id = id self._activations = {'tanh': tanh, 'sigmoid': logistic, 'relu': rectify, 'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))} self._bilstm_dims = kwargs.get("bilstm_dims", 128) self._npmst_mlp_activation = self._activations[kwargs.get('npmst_mlp_activation', 'relu')] self._npmst_mlp_dims = kwargs.get("npmst_mlp_dims", 128) self._npmst_mlp_layers = kwargs.get("npmst_mlp_layers", 2) self._npmst_mlp_dropout = kwargs.get("npmst_mlp_dropout", 0.0) def init_params(self): self._npmst_head_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._npmst_mlp_dims] * self._npmst_mlp_layers, self._npmst_mlp_activation, self._parser._model) self._npmst_mod_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._npmst_mlp_dims] * self._npmst_mlp_layers, self._npmst_mlp_activation, self._parser._model) self._npmst_bilinear = Bilinear(self._npmst_mlp_dims, self._parser._model) self._npmst_head_bias = Dense(self._npmst_mlp_dims, 1, identity, self._parser._model) def init_cg(self, train=False): if train: self._npmst_head_mlp.set_dropout(self._npmst_mlp_dropout) self._npmst_mod_mlp.set_dropout(self._npmst_mlp_dropout) else: self._npmst_head_mlp.set_dropout(0.) self._npmst_mod_mlp.set_dropout(0.) def _npmst_arcs_eval(self, carriers): head_vecs = [self._npmst_head_mlp(c.vec) for c in carriers] mod_vecs = [self._npmst_mod_mlp(c.vec) for c in carriers] head_vecs = concatenate(head_vecs, 1) mod_vecs = concatenate(mod_vecs, 1) exprs = colwise_add(self._npmst_bilinear(head_vecs, mod_vecs), reshape(self._npmst_head_bias(head_vecs), (len(carriers),))) scores = exprs.value() exprs = np.array([[exprs[i][j] for j in range(len(carriers))] for i in range(len(carriers))]) return scores, exprs def sent_loss(self, graph, carriers): gold_heads = graph.heads scores, exprs = self._npmst_arcs_eval(carriers) # Cost Augmentation for m, h in enumerate(gold_heads): scores[h, m] -= 1. heads, tree_score = chu_liu_edmonds(scores.T) correct = sum([1 for (h, g) in zip(heads[1:], gold_heads[1:]) if h == g]) loss = [exprs[int(h)][int(i)] - exprs[int(g)][int(i)] + 1. for i, (h, g) in enumerate(zip(heads, gold_heads)) if h != g] return correct, loss def predict(self, graph, carriers): scores, exprs = self._npmst_arcs_eval(carriers) graph.heads, tree_score = chu_liu_edmonds(scores.T) return self class MH4Parser: def __init__(self, parser, id="MH4Parser", **kwargs): self._parser = parser self.id = id self._activations = {'tanh': tanh, 'sigmoid': logistic, 'relu': rectify, 'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))} self._bilstm_dims = kwargs.get("bilstm_dims", 128) self._mh4_mlp_activation = self._activations[kwargs.get('mh4_mlp_activation', 'relu')] self._mh4_mlp_dims = kwargs.get("mh4_mlp_dims", 128) self._mh4_mlp_layers = kwargs.get("mh4_mlp_layers", 2) self._mh4_mlp_dropout = kwargs.get("mh4_mlp_dropout", 0.0) def init_params(self): self._mh4_head_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._mh4_mlp_dims] * self._mh4_mlp_layers, self._mh4_mlp_activation, self._parser._model) self._mh4_mod_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._mh4_mlp_dims] * self._mh4_mlp_layers, self._mh4_mlp_activation, self._parser._model) self._mh4_bilinear = Bilinear(self._mh4_mlp_dims, self._parser._model) self._mh4_head_bias = Dense(self._mh4_mlp_dims, 1, identity, self._parser._model) def init_cg(self, train=False): if train: self._mh4_head_mlp.set_dropout(self._mh4_mlp_dropout) self._mh4_mod_mlp.set_dropout(self._mh4_mlp_dropout) else: self._mh4_head_mlp.set_dropout(0.) self._mh4_mod_mlp.set_dropout(0.) def _mh4_arcs_eval(self, carriers): head_vecs = [self._mh4_head_mlp(c.vec) for c in carriers] mod_vecs = [self._mh4_mod_mlp(c.vec) for c in carriers] head_vecs = concatenate(head_vecs, 1) mod_vecs = concatenate(mod_vecs, 1) exprs = colwise_add(self._mh4_bilinear(head_vecs, mod_vecs), reshape(self._mh4_head_bias(head_vecs), (len(carriers),))) scores = exprs.value() exprs = np.array([[exprs[i][j] for j in range(len(carriers))] for i in range(len(carriers))]) return scores, exprs def sent_loss(self, graph, carriers): gold_heads = graph.mh4_heads scores, exprs = self._mh4_arcs_eval(carriers) # Cost Augmentation for m, h in enumerate(gold_heads): scores[h, m] -= 1. heads = parse_mh4(scores) correct = sum([1 for (h, g) in zip(heads[1:], gold_heads[1:]) if h == g]) loss = [exprs[int(h)][int(i)] - exprs[int(g)][int(i)] + 1. for i, (h, g) in enumerate(zip(heads, gold_heads)) if h != g] return correct, loss def predict(self, graph, carriers): scores, exprs = self._mh4_arcs_eval(carriers) graph.heads = parse_mh4(scores) return self class MH4TParser: def __init__(self, parser, id="MH4TParser", **kwargs): self._parser = parser self.id = id self._activations = {'tanh': tanh, 'sigmoid': logistic, 'relu': rectify, 'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))} self._bilstm_dims = kwargs.get("bilstm_dims", 128) self._mh4t_mlp_activation = self._activations[kwargs.get('mh4t_mlp_activation', 'relu')] self._mh4t_mlp_dims = kwargs.get("mh4t_mlp_dims", 128) self._mh4t_mlp_layers = kwargs.get("mh4t_mlp_layers", 2) self._mh4t_mlp_dropout = kwargs.get("mh4t_mlp_dropout", 0.0) self._mh4t_mode = kwargs.get("mh4t_mode", "two") self._mh4t_stack_features = kwargs.get("mh4t_stack_features", 1) self._mh4t_buffer_features = kwargs.get("mh4t_buffer_features", 1) def init_params(self): if self._mh4t_mode == "local": self._mh4t_pad_repr = [self._parser._model.add_parameters(self._bilstm_dims) for i in range(self._mh4t_stack_features + self._mh4t_buffer_features)] self._mh4t_mlp = MultiLayerPerceptron([self._bilstm_dims * (self._mh4t_stack_features + self._mh4t_buffer_features)] + [self._mh4t_mlp_dims] * self._mh4t_mlp_layers, self._mh4t_mlp_activation, self._parser._model) self._mh4t_final = Dense(self._mh4t_mlp_dims, 7, identity, self._parser._model) elif self._mh4t_mode == "two": self._mh4t_pad_repr = self._parser._model.add_parameters(self._bilstm_dims) self._mh4t_stack_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._mh4t_mlp_dims] * self._mh4t_mlp_layers, self._mh4t_mlp_activation, self._parser._model) self._mh4t_buffer_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._mh4t_mlp_dims] * self._mh4t_mlp_layers, self._mh4t_mlp_activation, self._parser._model) self._mh4t_biaffine = BiaffineBatch(self._mh4t_mlp_dims, 7, self._parser._model) elif self._mh4t_mode == "hybrid": self._mh4t_pad_repr = self._parser._model.add_parameters(self._bilstm_dims) self._mh4t_s1_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._mh4t_mlp_dims] * self._mh4t_mlp_layers, self._mh4t_mlp_activation, self._parser._model) self._mh4t_s0_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._mh4t_mlp_dims] * self._mh4t_mlp_layers, self._mh4t_mlp_activation, self._parser._model) self._mh4t_b0_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._mh4t_mlp_dims] * self._mh4t_mlp_layers, self._mh4t_mlp_activation, self._parser._model) self._mh4t_shift_biaffine = BiaffineBatch(self._mh4t_mlp_dims, 1, self._parser._model) self._mh4t_reduce_biaffine1 = BiaffineBatch(self._mh4t_mlp_dims, 6, self._parser._model) self._mh4t_reduce_biaffine2 = BiaffineBatch(self._mh4t_mlp_dims, 6, self._parser._model) elif self._mh4t_mode == "b0": self._mh4t_pad_repr = self._parser._model.add_parameters(self._bilstm_dims) self._mh4t_b0_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._mh4t_mlp_dims] * self._mh4t_mlp_layers + [7], self._mh4t_mlp_activation, self._parser._model) def init_cg(self, train=False): if self._mh4t_mode == "local": if train: self._mh4t_mlp.set_dropout(self._mh4t_mlp_dropout) else: self._mh4t_mlp.set_dropout(0.) if self._mh4t_mode == "two": if train: self._mh4t_stack_mlp.set_dropout(self._mh4t_mlp_dropout) self._mh4t_buffer_mlp.set_dropout(self._mh4t_mlp_dropout) else: self._mh4t_stack_mlp.set_dropout(0.) self._mh4t_buffer_mlp.set_dropout(0.) elif self._mh4t_mode == "hybrid": if train: self._mh4t_s1_mlp.set_dropout(self._mh4t_mlp_dropout) self._mh4t_s0_mlp.set_dropout(self._mh4t_mlp_dropout) self._mh4t_b0_mlp.set_dropout(self._mh4t_mlp_dropout) else: self._mh4t_s1_mlp.set_dropout(0.) self._mh4t_s0_mlp.set_dropout(0.) self._mh4t_b0_mlp.set_dropout(0.) elif self._mh4t_mode == "b0": if train: self._mh4t_b0_mlp.set_dropout(self._mh4t_mlp_dropout) else: self._mh4t_b0_mlp.set_dropout(0.) def _mh4t_conf_eval(self, features, carriers): vecs = [carriers[int(f)].vec if f >= 0 else parameter(self._mh4t_pad_repr[i]) for i, f in enumerate(features)] exprs = self._mh4t_final(self._mh4t_mlp(concatenate(vecs))) return exprs.value(), exprs def _mh4t_eval(self, carriers): if self._mh4t_mode == "two": stack_vecs = [self._mh4t_stack_mlp(c.vec) for c in carriers] + [parameter(self._mh4t_pad_repr)] buffer_vecs = [self._mh4t_buffer_mlp(c.vec) for c in carriers] + [parameter(self._mh4t_pad_repr)] stack_vecs = concatenate(stack_vecs, 1) buffer_vecs = concatenate(buffer_vecs, 1) exprs = self._mh4t_biaffine(stack_vecs, buffer_vecs) scores = exprs.value() return scores, exprs elif self._mh4t_mode == "b0": b0_vecs = concatenate([c.vec for c in carriers] + [parameter(self._mh4t_pad_repr)], 1) b0_vecs = transpose(self._mh4t_b0_mlp(b0_vecs)) exprs = [b0_vecs] * (len(carriers) + 1) scores = np.array([b0_vecs.value()] * (len(carriers) + 1)) return scores, exprs elif self._mh4t_mode == "hybrid": s1_vecs = [self._mh4t_s1_mlp(c.vec) for c in carriers] + [parameter(self._mh4t_pad_repr)] s0_vecs = [self._mh4t_s0_mlp(c.vec) for c in carriers] + [parameter(self._mh4t_pad_repr)] b0_vecs = [self._mh4t_b0_mlp(c.vec) for c in carriers] + [parameter(self._mh4t_pad_repr)] s1_vecs = concatenate(s1_vecs, 1) s0_vecs = concatenate(s0_vecs, 1) b0_vecs = concatenate(b0_vecs, 1) sh_exprs = self._mh4t_shift_biaffine(s0_vecs, b0_vecs) sh_scores = sh_exprs.value() re_exprs1 = self._mh4t_reduce_biaffine1(s1_vecs, s0_vecs) re_exprs2 = self._mh4t_reduce_biaffine2(s0_vecs, b0_vecs) re_scores1 = re_exprs1.value() re_scores2 = re_exprs2.value() return sh_scores, mh4t_combine_scores(re_scores1, re_scores2), sh_exprs, re_exprs1, re_exprs2 def sent_loss(self, graph, carriers): if self._mh4t_mode == "local": loss = [] gold_heads = graph.mh4_heads beamconf = MH4BeamConf(len(graph.nodes), 1, self._mh4t_stack_features, self._mh4t_buffer_features) beamconf.init_conf(0) scores = np.zeros((len(gold_heads) + 1, len(gold_heads) + 1, 7)) mst_scores = np.ones((len(gold_heads), len(gold_heads))) mst_scores += -np.inf for m, h in enumerate(gold_heads): mst_scores[h, m] = 0. gscore, _, gold_traces = parse_mh4t(scores, mst_scores) total = 0 wrong = 0 for gi, gj, gr in gold_traces: valid = beamconf.valid_transitions(0) if np.count_nonzero(valid) < 1: break scores, exprs = self._mh4t_conf_eval(beamconf.extract_features(0), carriers) best = int(gr) rest = tuple((i, s) for i, s in enumerate(scores) if i != best) total += 1 if len(rest) > 0: second, secondScore = max(rest, key=lambda x: x[1]) if scores[best] < scores[second] + 1.0: loss.append(exprs[second] - exprs[best] + 1.) wrong += 1 beamconf.make_transition(0, best) return (total - wrong) / total * (len(graph.nodes) - 1), loss elif self._mh4t_mode == "two" or self._mh4t_mode == "b0": gold_heads = graph.mh4_heads scores, exprs = self._mh4t_eval(carriers) mst_scores = np.ones((len(gold_heads), len(gold_heads))) # Cost Augmentation for m, h in enumerate(gold_heads): mst_scores[h, m] -= 1. pscore, heads, traces = parse_mh4t(scores, mst_scores) mst_scores += -np.inf for m, h in enumerate(gold_heads): mst_scores[h, m] = 0. gscore, _, gold_traces = parse_mh4t(scores, mst_scores) correct = sum([1 for (h, g) in zip(heads[1:], gold_heads[1:]) if h == g]) traces_set = Counter((i, j, r) for i, j, r in traces) gold_traces_set = Counter((i, j, r) for i, j, r in gold_traces) loss = [exprs[int(i)][int(j)][int(r)] for i, j, r in (traces_set - gold_traces_set).elements()] loss.extend([-exprs[int(i)][int(j)][int(r)] for i, j, r in (gold_traces_set - traces_set).elements()]) return correct, loss if self._mh4t_mode == "hybrid": gold_heads = graph.mh4_heads sh_scores, re_scores, sh_exprs, re_exprs1, re_exprs2 = self._mh4t_eval(carriers) mst_scores = np.ones((len(gold_heads), len(gold_heads))) # Cost Augmentation for m, h in enumerate(gold_heads): mst_scores[h, m] -= 1. pscore, heads, traces = parse_mh4t_sh(sh_scores, re_scores, mst_scores) mst_scores += -np.inf for m, h in enumerate(gold_heads): mst_scores[h, m] = 0. gscore, _, gold_traces = parse_mh4t_sh(sh_scores, re_scores, mst_scores) correct = sum([1 for (h, g) in zip(heads[1:], gold_heads[1:]) if h == g]) traces_set = Counter((n, i, j, k, r) for n, i, j, k, r in traces) gold_traces_set = Counter((n, i, j, k, r) for n, i, j, k, r in gold_traces) loss = [] for n, i, j, k, r in (traces_set - gold_traces_set).elements(): if n == 2: loss.append(sh_exprs[int(i)][int(j)]) elif n == 3: loss.append(re_exprs1[int(i)][int(j)][int(r)] + re_exprs2[int(j)][int(k)][int(r)]) for n, i, j, k, r in (gold_traces_set - traces_set).elements(): if n == 2: loss.append(-sh_exprs[int(i)][int(j)]) elif n == 3: loss.append(-re_exprs1[int(i)][int(j)][int(r)] - re_exprs2[int(j)][int(k)][int(r)]) return correct, loss def predict(self, graph, carriers): if self._mh4t_mode != "local" and len(graph.nodes) >= 100: graph.heads = -np.ones(len(graph.nodes), dtype=int) return self if self._mh4t_mode == "local": beamconf = MH4BeamConf(len(graph.nodes), 1, self._mh4t_stack_features, self._mh4t_buffer_features) beamconf.init_conf(0) while not beamconf.is_complete(0): valid = beamconf.valid_transitions(0) scores, exprs = self._mh4t_conf_eval(beamconf.extract_features(0), carriers) action, _ = max(((i, s) for i, s in enumerate(scores) if valid[i]), key=lambda x: x[1]) beamconf.make_transition(0, action) graph.heads = list(beamconf.get_heads(0)) return self elif self._mh4t_mode == "two" or self._mh4t_mode == "b0": mst_scores = np.ones((len(carriers), len(carriers))) scores, exprs = self._mh4t_eval(carriers) _, graph.heads, _ = parse_mh4t(scores, mst_scores) return self elif self._mh4t_mode == "hybrid": mst_scores = np.ones((len(carriers), len(carriers))) sh_scores, re_scores, sh_exprs, re_exprs1, re_exprs2 = self._mh4t_eval(carriers) _, graph.heads, _ = parse_mh4t_sh(sh_scores, re_scores, mst_scores) return self class AHDPParser: def __init__(self, parser, id="AHDPParser", **kwargs): self._parser = parser self.id = id self._activations = {'tanh': tanh, 'sigmoid': logistic, 'relu': rectify, 'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))} self._bilstm_dims = kwargs.get("bilstm_dims", 128) self._ah_mlp_activation = self._activations[kwargs.get('ah_mlp_activation', 'relu')] self._ah_mlp_dims = kwargs.get("ah_mlp_dims", 128) self._ah_mlp_layers = kwargs.get("ah_mlp_layers", 2) self._ah_mlp_dropout = kwargs.get("ah_mlp_dropout", 0.0) self._ah_global = kwargs.get("ah_global", True) def init_params(self): self._ah_pad_repr = [self._parser._model.add_parameters(self._bilstm_dims) for i in range(2)] self._ah_stack_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._ah_mlp_dims] * self._ah_mlp_layers, self._ah_mlp_activation, self._parser._model) self._ah_buffer_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._ah_mlp_dims] * self._ah_mlp_layers, self._ah_mlp_activation, self._parser._model) self._ah_scorer = BiaffineBatch(self._ah_mlp_dims, 3, self._parser._model) def init_cg(self, train=False): if train: self._ah_stack_mlp.set_dropout(self._ah_mlp_dropout) self._ah_buffer_mlp.set_dropout(self._ah_mlp_dropout) else: self._ah_stack_mlp.set_dropout(0.) self._ah_buffer_mlp.set_dropout(0.) def _ah_confs_eval(self, carriers): rows = list(range(-1, len(carriers))) + [-2] vecs = [carriers[f].vec if f >= 0 else parameter(self._ah_pad_repr[f]) for i, f in enumerate(rows)] vecs = concatenate(vecs, 1) exprs = self._ah_scorer(self._ah_stack_mlp(vecs), self._ah_buffer_mlp(vecs)) scores = exprs.value() exprs = np.array([[[exprs[i][j][b] for b in (0, 1, 2)] for j in range(len(rows))] for i in range(len(rows))]) return scores, exprs def _ah_seq_loss(self, correctseq, wrongseq, beamconf, loss, carriers, exprs, loc=0): commonprefix = 0 for i in range(min(len(correctseq), len(wrongseq))): if wrongseq[i] == correctseq[i]: commonprefix = i + 1 else: break beamconf.init_conf(loc, True) for i in range(commonprefix): beamconf.make_transition(loc, wrongseq[i]) for i in range(commonprefix, len(wrongseq)): s, b = beamconf.extract_features(loc) if b < 0: b = len(carriers) loss.append(exprs[int(s+1)][int(b+1)][int(wrongseq[i])]) beamconf.make_transition(loc, wrongseq[i]) beamconf.init_conf(loc, True) for i in range(commonprefix): beamconf.make_transition(loc, correctseq[i]) for i in range(commonprefix, len(correctseq)): s, b = beamconf.extract_features(loc) if b < 0: b = len(carriers) loss.append(-exprs[int(s+1)][int(b+1)][int(correctseq[i])]) beamconf.make_transition(loc, correctseq[i]) def sent_loss(self, graph, carriers): gold_heads = graph.proj_heads loss = [] beamconf = AHBeamConf(len(graph.nodes), 1, np.array(gold_heads), 1, 1) beamconf.init_conf(0, True) scores, exprs = self._ah_confs_eval(carriers) if self._ah_global: # cost augmentation mst_scores = np.ones((len(graph.nodes), len(graph.nodes))) for m, h in enumerate(gold_heads): mst_scores[h, m] -= 1. pred_transitions, pred_heads = parse_ah_dp_mst(scores, mst_scores) true_transitions = beamconf.gold_transitions(0, True) self._ah_seq_loss(true_transitions, pred_transitions, beamconf, loss, carriers, exprs, loc=0) correct = sum([1 if gold_heads[i] == pred_heads[i] else 0 for i in range(1, len(graph.nodes))]) else: true_transitions = beamconf.gold_transitions(0, True) beamconf.init_conf(0, True) beamconf.make_transition(0, 0) correct = 0 for step in range(1, len(true_transitions)): s, b = beamconf.extract_features(0) if b < 0: b = len(carriers) g = true_transitions[step] valid_transitions = beamconf.valid_transitions(0) valid_set = [i for i in range(3) if valid_transitions[i]] valid_scores = [scores[s+1, b+1, i] if i == g else scores[s+1, b+1, i] + 1. for i in valid_set] m = valid_set[np.argmax(valid_scores)] if m == true_transitions[step]: correct += 1 else: loss.append(exprs[s+1, b+1, m] + 1. - exprs[s+1, b+1, g]) beamconf.make_transition(0, true_transitions[step]) return correct, loss def predict(self, graph, carriers): beamconf = AHBeamConf(len(graph.nodes), 1, np.array(graph.heads), 1, 1) beamconf.init_conf(0, True) scores, exprs = self._ah_confs_eval(carriers) if self._ah_global: mst_scores = np.zeros((len(graph.nodes), len(graph.nodes))) transitions, heads = parse_ah_dp_mst(scores, mst_scores) graph.heads = heads else: while not beamconf.is_complete(0): valid_transitions = beamconf.valid_transitions(0) valid_set = [i for i in range(3) if valid_transitions[i]] if len(valid_set) == 0: break s, b = beamconf.extract_features(0) if b < 0: b = len(carriers) valid_scores = [scores[s+1, b+1, i] for i in valid_set] beamconf.make_transition(0, valid_set[np.argmax(valid_scores)]) graph.heads = beamconf.get_heads(0) return self class AEDPParser: def __init__(self, parser, id="AEDPParser", **kwargs): self._parser = parser self.id = id self._activations = {'tanh': tanh, 'sigmoid': logistic, 'relu': rectify, 'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))} self._bilstm_dims = kwargs.get("bilstm_dims", 128) self._ae_mlp_activation = self._activations[kwargs.get('ae_mlp_activation', 'relu')] self._ae_mlp_dims = kwargs.get("ae_mlp_dims", 128) self._ae_mlp_layers = kwargs.get("ae_mlp_layers", 2) self._ae_mlp_dropout = kwargs.get("ae_mlp_dropout", 0.0) self._ae_global = kwargs.get("ae_global", True) def init_params(self): self._ae_pad_repr = [self._parser._model.add_parameters(self._bilstm_dims) for i in range(2)] self._ae_stack_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._ae_mlp_dims] * self._ae_mlp_layers, self._ae_mlp_activation, self._parser._model) self._ae_buffer_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._ae_mlp_dims] * self._ae_mlp_layers, self._ae_mlp_activation, self._parser._model) self._ae_scorer = BiaffineBatch(self._ae_mlp_dims, 4, self._parser._model) def init_cg(self, train=False): if train: self._ae_stack_mlp.set_dropout(self._ae_mlp_dropout) self._ae_buffer_mlp.set_dropout(self._ae_mlp_dropout) else: self._ae_stack_mlp.set_dropout(0.) self._ae_buffer_mlp.set_dropout(0.) def _ae_confs_eval(self, carriers): rows = list(range(-1, len(carriers))) + [-2] vecs = [carriers[f].vec if f >= 0 else parameter(self._ae_pad_repr[f]) for i, f in enumerate(rows)] vecs = concatenate(vecs, 1) exprs = self._ae_scorer(self._ae_stack_mlp(vecs), self._ae_buffer_mlp(vecs)) scores = exprs.value() exprs = np.array([[[exprs[i][j][b] for b in (0, 1, 2, 3)] for j in range(len(rows))] for i in range(len(rows))]) return scores, exprs def _ae_seq_loss(self, correctseq, wrongseq, beamconf, loss, carriers, exprs, loc=0): commonprefix = 0 for i in range(min(len(correctseq), len(wrongseq))): if wrongseq[i] == correctseq[i]: commonprefix = i + 1 else: break beamconf.init_conf(loc, True) for i in range(commonprefix): beamconf.make_transition(loc, wrongseq[i]) for i in range(commonprefix, len(wrongseq)): s, b = beamconf.extract_features(loc) if b < 0: b = len(carriers) loss.append(exprs[int(s+1)][int(b+1)][int(wrongseq[i])]) beamconf.make_transition(loc, wrongseq[i]) beamconf.init_conf(loc, True) for i in range(commonprefix): beamconf.make_transition(loc, correctseq[i]) for i in range(commonprefix, len(correctseq)): s, b = beamconf.extract_features(loc) if b < 0: b = len(carriers) loss.append(-exprs[int(s+1)][int(b+1)][int(correctseq[i])]) beamconf.make_transition(loc, correctseq[i]) def sent_loss(self, graph, carriers): gold_heads = graph.proj_heads loss = [] beamconf = AEBeamConf(len(graph.nodes), 1, np.array(gold_heads), 1, 1) beamconf.init_conf(0, True) scores, exprs = self._ae_confs_eval(carriers) if self._ae_global: # cost augmentation mst_scores = np.ones((len(graph.nodes), len(graph.nodes))) for m, h in enumerate(gold_heads): mst_scores[h, m] -= 1. pred_transitions, pred_heads = parse_ae_dp_mst(scores, mst_scores) true_transitions = beamconf.gold_transitions(0, True) self._ae_seq_loss(true_transitions, pred_transitions, beamconf, loss, carriers, exprs, loc=0) correct = sum([1 if gold_heads[i] == pred_heads[i] else 0 for i in range(1, len(graph.nodes))]) else: true_transitions = beamconf.gold_transitions(0, True) beamconf.init_conf(0, True) beamconf.make_transition(0, 0) correct = 0 for step in range(1, len(true_transitions)): s, b = beamconf.extract_features(0) if b < 0: b = len(carriers) g = true_transitions[step] valid_transitions = beamconf.valid_transitions(0) valid_set = [i for i in range(4) if valid_transitions[i]] valid_scores = [scores[s+1, b+1, i] if i == g else scores[s+1, b+1, i] + 1. for i in valid_set] m = valid_set[np.argmax(valid_scores)] if m == true_transitions[step]: correct += 1 else: loss.append(exprs[s+1, b+1, m] + 1. - exprs[s+1, b+1, g]) beamconf.make_transition(0, true_transitions[step]) correct /= len(true_transitions) - 1 correct *= len(graph.nodes) - 1 return correct, loss def predict(self, graph, carriers): beamconf = AEBeamConf(len(graph.nodes), 1, np.array(graph.heads), 1, 1) beamconf.init_conf(0, True) scores, exprs = self._ae_confs_eval(carriers) if self._ae_global: mst_scores = np.zeros((len(graph.nodes), len(graph.nodes))) transitions, heads = parse_ae_dp_mst(scores, mst_scores) else: while not beamconf.is_complete(0): valid_transitions = beamconf.valid_transitions(0) valid_set = [i for i in range(4) if valid_transitions[i]] if len(valid_set) == 0: break s, b = beamconf.extract_features(0) if b < 0: b = len(carriers) valid_scores = [scores[s+1, b+1, i] for i in valid_set] beamconf.make_transition(0, valid_set[np.argmax(valid_scores)]) heads = beamconf.get_heads(0) graph.heads = heads return self class OneECParser: def __init__(self, parser, id="OneECParser", **kwargs): self._parser = parser self.id = id self._activations = {'tanh': tanh, 'sigmoid': logistic, 'relu': rectify, 'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))} self._bilstm_dims = kwargs.get("bilstm_dims", 128) self._oneec_mlp_activation = self._activations[kwargs.get('oneec_mlp_activation', 'relu')] self._oneec_mlp_dims = kwargs.get("oneec_mlp_dims", 128) self._oneec_mlp_layers = kwargs.get("oneec_mlp_layers", 2) self._oneec_mlp_dropout = kwargs.get("oneec_mlp_dropout", 0.0) def init_params(self): self._oneec_grand_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._oneec_mlp_dims] * self._oneec_mlp_layers, self._oneec_mlp_activation, self._parser._model) self._oneec_head_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._oneec_mlp_dims] * self._oneec_mlp_layers, self._oneec_mlp_activation, self._parser._model) self._oneec_mod_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._oneec_mlp_dims] * self._oneec_mlp_layers, self._oneec_mlp_activation, self._parser._model) self._oneec_sib_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._oneec_mlp_dims] * self._oneec_mlp_layers, self._oneec_mlp_activation, self._parser._model) self._oneec_grand_mod_biaffine = BiaffineBatch(self._oneec_mlp_dims, 1, self._parser._model) self._oneec_head_mod_biaffine = BiaffineBatch(self._oneec_mlp_dims, 1, self._parser._model) self._oneec_sib_mod_biaffine = BiaffineBatch(self._oneec_mlp_dims, 1, self._parser._model) self._oneec_non_proj_head_mod_biaffine = BiaffineBatch(self._oneec_mlp_dims, 1, self._parser._model) self._oneec_grand_pad = self._parser._model.add_parameters(self._oneec_mlp_dims) self._oneec_head_pad = self._parser._model.add_parameters(self._oneec_mlp_dims) self._oneec_mod_pad = self._parser._model.add_parameters(self._oneec_mlp_dims) self._oneec_sib_pad = self._parser._model.add_parameters(self._oneec_mlp_dims) def init_cg(self, train=False): if train: self._oneec_grand_mlp.set_dropout(self._oneec_mlp_dropout) self._oneec_head_mlp.set_dropout(self._oneec_mlp_dropout) self._oneec_mod_mlp.set_dropout(self._oneec_mlp_dropout) self._oneec_sib_mlp.set_dropout(self._oneec_mlp_dropout) else: self._oneec_grand_mlp.set_dropout(0.) self._oneec_head_mlp.set_dropout(0.) self._oneec_mod_mlp.set_dropout(0.) self._oneec_sib_mlp.set_dropout(0.) def _oneec_arcs_eval(self, carriers): grand_vecs = [parameter(self._oneec_grand_pad)] + [self._oneec_grand_mlp(c.vec) for c in carriers] head_vecs = [parameter(self._oneec_head_pad)] + [self._oneec_head_mlp(c.vec) for c in carriers] mod_vecs = [parameter(self._oneec_mod_pad)] + [self._oneec_mod_mlp(c.vec) for c in carriers] sib_vecs = [parameter(self._oneec_sib_pad)] + [self._oneec_sib_mlp(c.vec) for c in carriers] grand_vecs = concatenate(grand_vecs, 1) head_vecs = concatenate(head_vecs, 1) mod_vecs = concatenate(mod_vecs, 1) sib_vecs = concatenate(sib_vecs, 1) grand_mod = self._oneec_grand_mod_biaffine(grand_vecs, mod_vecs) head_mod = self._oneec_head_mod_biaffine(head_vecs, mod_vecs) sib_mod = self._oneec_sib_mod_biaffine(sib_vecs, mod_vecs) nonproj_mod = self._oneec_non_proj_head_mod_biaffine(head_vecs, mod_vecs) exprs = concatenate([grand_mod, head_mod, sib_mod, nonproj_mod], 2) scores = exprs.value() return scores, exprs def sent_loss(self, graph, carriers): gold_heads, gold_traces = graph.oneec_heads, graph.oneec_traces scores, exprs = self._oneec_arcs_eval(carriers) # Cost Augmentation for m, h in enumerate(gold_heads): scores[h + 1, m + 1, 1] -= 1. scores[h + 1, m + 1, 3] -= 1. heads, traces = parse_1ec_o3(scores) correct = sum([1 for (h, g) in zip(heads[1:], gold_heads[1:]) if h == g]) pred_traces = {(int(i), int(j), int(r)) for i, j, r in traces if r >= 0} loss = [exprs[i][j][r] for i, j, r in pred_traces - gold_traces] loss.extend([-exprs[i][j][r] for i, j, r in gold_traces - pred_traces]) return correct, loss def predict(self, graph, carriers): if len(graph.nodes) >= 100: graph.heads = -np.ones(len(graph.nodes), dtype=int) return self scores, exprs = self._oneec_arcs_eval(carriers) graph.heads, _ = parse_1ec_o3(scores) return self
44.047867
225
0.629246
5,772
42,330
4.277893
0.042793
0.039851
0.031589
0.023854
0.823506
0.775879
0.714685
0.660862
0.637453
0.60392
0
0.020224
0.252421
42,330
960
226
44.09375
0.760057
0.004252
0
0.575843
0
0
0.027527
0.001044
0
0
0
0
0
1
0.077247
false
0
0.018258
0
0.155899
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
fee5908195daf2a0d00c5ef0add5bb1edc10cab5
673
py
Python
connect_four/ad.py
mars-tiger/kidspython
63d62d0083fb0c0904af6078916e0fe8ab2b9278
[ "MIT" ]
6
2021-08-21T17:00:54.000Z
2021-08-28T22:12:57.000Z
connect_four/ad.py
mars-tiger/kidspython
63d62d0083fb0c0904af6078916e0fe8ab2b9278
[ "MIT" ]
null
null
null
connect_four/ad.py
mars-tiger/kidspython
63d62d0083fb0c0904af6078916e0fe8ab2b9278
[ "MIT" ]
2
2021-08-21T17:13:33.000Z
2021-08-28T22:12:59.000Z
import turtle def draw_piece(row, col, color): pass def draw(x, y): global board, rb, winner pass def check_winner(): pass def draw_board(): pass radius = 23 gap = 2 square_size = 2 * (radius + gap) offset_x = -180 offset_y = 100 board = [ [None, None, None, None, None, None, None], [None, None, None, None, None, None, None], [None, None, None, None, None, None, None], [None, None, None, None, None, None, None], [None, None, None, None, None, None, None], [None, None, None, None, None, None, None], ] winner = "" rb = "r" t = turtle.Turtle() t.ht() t.speed(200) draw_board() wn = turtle.Screen() wn.onclick(draw) wn.mainloop()
14.630435
45
0.619614
105
673
3.904762
0.333333
0.8
1.170732
1.521951
0.409756
0.409756
0.409756
0.409756
0.409756
0.409756
0
0.024575
0.213967
673
45
46
14.955556
0.750473
0
0
0.3125
0
0
0.001486
0
0
0
0
0
0
1
0.125
false
0.125
0.03125
0
0.15625
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
3a2530558a94338d58fe13c5d114764b5b226e65
386
py
Python
spearmint/sampling/__init__.py
ascripter/Spearmint
81b8cf5fa1462c09569bf323630cbee356c5897b
[ "RSA-MD" ]
null
null
null
spearmint/sampling/__init__.py
ascripter/Spearmint
81b8cf5fa1462c09569bf323630cbee356c5897b
[ "RSA-MD" ]
null
null
null
spearmint/sampling/__init__.py
ascripter/Spearmint
81b8cf5fa1462c09569bf323630cbee356c5897b
[ "RSA-MD" ]
null
null
null
from __future__ import absolute_import from .abstract_sampler import AbstractSampler from .slice_sampler import SliceSampler from .whitened_prior_slice_sampler import WhitenedPriorSliceSampler from .elliptical_slice_sampler import EllipticalSliceSampler __all__ = ["AbstractSampler", "SliceSampler", "WhitenedPriorSliceSampler", "EllipticalSliceSampler"]
55.142857
100
0.805699
33
386
8.939394
0.454545
0.176271
0.183051
0
0
0
0
0
0
0
0
0
0.147668
386
7
100
55.142857
0.896657
0
0
0
0
0
0.191214
0.121447
0
0
0
0
0
1
0
false
0
0.833333
0
0.833333
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
3a331375b229a1bc991f67d8ec7b3d94e325900f
12,428
py
Python
kitti_tools/upper_bound.py
dingmyu/mmdetection
705dc91ca43ea62f4f69355a81271d5bd81268ca
[ "Apache-2.0" ]
null
null
null
kitti_tools/upper_bound.py
dingmyu/mmdetection
705dc91ca43ea62f4f69355a81271d5bd81268ca
[ "Apache-2.0" ]
null
null
null
kitti_tools/upper_bound.py
dingmyu/mmdetection
705dc91ca43ea62f4f69355a81271d5bd81268ca
[ "Apache-2.0" ]
null
null
null
import easydict as edict import subprocess import os import sys import re import math import numpy as np def read_kitti_label(file): """ Reads the kitti label file from disc. Args: file (str): path to single label file for an image p2 (ndarray): projection matrix for the given image """ gts = [] text_file = open(file, 'r') ''' Values Name Description ---------------------------------------------------------------------------- 1 type Describes the type of object: 'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc' or 'DontCare' 1 truncated Float from 0 (non-truncated) to 1 (truncated), where truncated refers to the object leaving image boundaries 1 occluded Integer (0,1,2,3) indicating occlusion state: 0 = fully visible, 1 = partly occluded 2 = largely occluded, 3 = unknown 1 alpha Observation angle of object, ranging [-pi..pi] 4 bbox 2D bounding box of object in the image (0-based index): contains left, top, right, bottom pixel coordinates 3 dimensions 3D object dimensions: height, width, length (in meters) 3 location 3D object location x,y,z in camera coordinates (in meters) 1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi] 1 score Only for results: Float, indicating confidence in detection, needed for p/r curves, higher is better. ''' pattern = re.compile(('([a-zA-Z\-\?\_]+)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+' + '(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s*((fpat)?)\n') .replace('fpat', '[-+]?\d*\.\d+|[-+]?\d+')) TYPE2LABEL = dict( Background=0, Car=1, Cyclist=2, Pedestrian=3, # Van=4, # Person_sitting=5 # Truck = 6 # Tram = 7 # Misc = 8 ) all_boxes = [] for line in text_file: parsed = pattern.fullmatch(line) # bbGt annotation in text format of: # cls x y w h occ x y w h ign ang if parsed is not None: ign = False label_type = parsed.group(1) # type trunc = float(parsed.group(2)) occ = float(parsed.group(3)) alpha = float(parsed.group(4)) x = float(parsed.group(5)) # left y = float(parsed.group(6)) # top x2 = float(parsed.group(7)) # right y2 = float(parsed.group(8)) # bottom width = x2 - x + 1 height = y2 - y + 1 h3d = float(parsed.group(9)) w3d = float(parsed.group(10)) l3d = float(parsed.group(11)) cx3d = float(parsed.group(12)) # center of car in 3d cy3d = float(parsed.group(13)) # bottom of car in 3d cz3d = float(parsed.group(14)) # center of car in 3d rotY = float(parsed.group(15)) # all_boxes.append(((x+x2)/2, (y+y2)/2, width, height)) if label_type in TYPE2LABEL: all_boxes.append((label_type, x, y, x2, y2, h3d, w3d, l3d, cx3d, cy3d, cz3d, rotY)) return all_boxes def read_kitti_pre(file): """ Reads the kitti label file from disc. Args: file (str): path to single label file for an image p2 (ndarray): projection matrix for the given image """ gts = [] text_file = open(file, 'r') ''' Values Name Description ---------------------------------------------------------------------------- 1 type Describes the type of object: 'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc' or 'DontCare' 1 truncated Float from 0 (non-truncated) to 1 (truncated), where truncated refers to the object leaving image boundaries 1 occluded Integer (0,1,2,3) indicating occlusion state: 0 = fully visible, 1 = partly occluded 2 = largely occluded, 3 = unknown 1 alpha Observation angle of object, ranging [-pi..pi] 4 bbox 2D bounding box of object in the image (0-based index): contains left, top, right, bottom pixel coordinates 3 dimensions 3D object dimensions: height, width, length (in meters) 3 location 3D object location x,y,z in camera coordinates (in meters) 1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi] 1 score Only for results: Float, indicating confidence in detection, needed for p/r curves, higher is better. ''' pattern = re.compile(('([a-zA-Z\-\?\_]+)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+' + '(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s*((fpat)?)\n') .replace('fpat', '[-+]?\d*\.\d+|[-+]?\d+')) TYPE2LABEL = dict( Background=0, Car=1, Cyclist=2, Pedestrian=3, # Van=4, # Person_sitting=5 # Truck = 6 # Tram = 7 # Misc = 8 ) all_boxes = [] for line in text_file: parsed = pattern.fullmatch(line) # bbGt annotation in text format of: # cls x y w h occ x y w h ign ang if parsed is not None: ign = False label_type = parsed.group(1) # type trunc = float(parsed.group(2)) occ = float(parsed.group(3)) alpha = float(parsed.group(4)) x = float(parsed.group(5)) # left y = float(parsed.group(6)) # top x2 = float(parsed.group(7)) # right y2 = float(parsed.group(8)) # bottom width = x2 - x + 1 height = y2 - y + 1 h3d = float(parsed.group(9)) w3d = float(parsed.group(10)) l3d = float(parsed.group(11)) cx3d = float(parsed.group(12)) # center of car in 3d cy3d = float(parsed.group(13)) # bottom of car in 3d cz3d = float(parsed.group(14)) # center of car in 3d rotY = float(parsed.group(15)) prob = float(parsed.group(16)) # all_boxes.append(((x+x2)/2, (y+y2)/2, width, height)) if label_type in TYPE2LABEL: all_boxes.append((label_type, x, y, x2, y2, h3d, w3d, l3d, cx3d, cy3d, cz3d, rotY, prob)) return all_boxes def calculateIoU(candidateBound, groundTruthBound): cx1 = candidateBound[0] cy1 = candidateBound[1] cx2 = candidateBound[2] cy2 = candidateBound[3] gx1 = groundTruthBound[0] gy1 = groundTruthBound[1] gx2 = groundTruthBound[2] gy2 = groundTruthBound[3] carea = (cx2 - cx1) * (cy2 - cy1) #C的面积 garea = (gx2 - gx1) * (gy2 - gy1) #G的面积 x1 = max(cx1, gx1) y1 = max(cy1, gy1) x2 = min(cx2, gx2) y2 = min(cy2, gy2) w = max(0, x2 - x1) h = max(0, y2 - y1) area = w * h #C∩G的面积 iou = area / (carea + garea - area) return iou os.chdir(sys.path[0]) os.system('mkdir test_new') os.system('mkdir test_new/data') for index in range(3769): if index % 200 == 0: print(index) # if index >= 100: # break id = '%06d' % index labelname = os.path.join('/mnt/lustre/dingmingyu/2020/mmdetection/kitti_tools/split1/validation/label_2', id + '.txt') gts = read_kitti_label(labelname) labelname = os.path.join('./data', id + '.txt') pres = read_kitti_pre(labelname) fw = open('test_new/data/%s.txt' % id, 'w') for pre in pres: pre = list(pre) flag = 0 for gt in gts: gt = list(gt) if pre[0] == gt[0]: iou = calculateIoU(gt[1:], pre[1:]) if iou > 0.5: # print(pre) pre[11] = gt[11] # Ry # pre[5] = gt[5] # h # pre[6] = gt[6] # w # pre[7] = gt[7] # l # pre[8] = gt[8] # x # pre[9] = gt[9] # y pre[10] = gt[10] # z pre = [str(item) for item in pre] pre.insert(1, '0') pre.insert(1, '-1') pre.insert(1, '-1') print(' '.join(pre), file=fw) flag = 1 break if flag == 0: pre = [str(item) for item in pre] pre.insert(1, '0') pre.insert(1, '-1') pre.insert(1, '-1') print(' '.join(pre), file=fw) fw.close() script = os.path.join( '/mnt/lustre/dingmingyu/2020/mmdetection', 'kitti_tools', 'split1', 'devkit', 'cpp', 'evaluate_object') os.chdir('/mnt/lustre/dingmingyu/2020/mmdetection/') print(os.path.join(os.getcwd())) with open(os.devnull, 'w') as devnull: out = subprocess.check_output([script, os.path.join(sys.path[0], 'test_new')], stderr=devnull) os.chdir(sys.path[0]) print(os.path.join(sys.path[0], 'test_new')) # os.chdir('/mnt/lustre/dingmingyu/2020/mmdetection/') import os.path as osp results_path = osp.join(sys.path[0], 'test_new', 'data') def parse_kitti_result(respath, mode='new'): text_file = open(respath, 'r') acc = np.zeros([3, 41], dtype=float) lind = 0 for line in text_file: parsed = re.findall(r'([\d]+\.?[\d]*)', line) for i, num in enumerate(parsed): acc[lind, i] = float(num) lind += 1 text_file.close() if mode == 'old': easy = np.mean(acc[0, 0:41:4]) mod = np.mean(acc[1, 0:41:4]) hard = np.mean(acc[2, 0:41:4]) else: easy = np.mean(acc[0, 1:41:1]) mod = np.mean(acc[1, 1:41:1]) hard = np.mean(acc[2, 1:41:1]) return easy, mod, hard for lbl in ['Car', 'Cyclist', 'Pedestrian']: lbl = lbl.lower() respath_2d = os.path.join(results_path.replace( '/data', ''), 'stats_{}_detection.txt'.format(lbl)) respath_gr = os.path.join( results_path.replace( '/data', ''), 'stats_{}_detection_ground.txt'.format(lbl)) respath_3d = os.path.join( results_path.replace( '/data', ''), 'stats_{}_detection_3d.txt'.format(lbl)) # print(respath_2d) if os.path.exists(respath_2d): # print(respath_2d) easy, mod, hard = parse_kitti_result(respath_2d, mode='old') print_str = 'R11_test_epoch {} 2d {} --> easy: {:0.4f}, mod: {:0.4f}, hard: {:0.4f}'.format( 1, lbl, easy, mod, hard) print(print_str) easy, mod, hard = parse_kitti_result(respath_2d) print_str = 'R40_test_epoch {} 2d {} --> easy: {:0.4f}, mod: {:0.4f}, hard: {:0.4f}'.format( 1, lbl, easy, mod, hard) print(print_str) if os.path.exists(respath_gr): easy, mod, hard = parse_kitti_result(respath_gr, mode='old') print_str = 'R11_test_epoch {} gr {} --> easy: {:0.4f}, mod: {:0.4f}, hard: {:0.4f}'.format( 1, lbl, easy, mod, hard) print(print_str) easy, mod, hard = parse_kitti_result(respath_gr) print_str = 'R40_test_epoch {} gr {} --> easy: {:0.4f}, mod: {:0.4f}, hard: {:0.4f}'.format( 1, lbl, easy, mod, hard) print(print_str) if os.path.exists(respath_3d): easy, mod, hard = parse_kitti_result(respath_3d, mode='old') print_str = 'R11_test_epoch {} 3d {} --> easy: {:0.4f}, mod: {:0.4f}, hard: {:0.4f}'.format( 1, lbl, easy, mod, hard) print(print_str) easy, mod, hard = parse_kitti_result(respath_3d) print_str = 'R40_test_epoch {} 3d {} --> easy: {:0.4f}, mod: {:0.4f}, hard: {:0.4f}'.format( 1, lbl, easy, mod, hard) print(print_str) # os.chdir(sys.path[0])
33.498652
120
0.507403
1,614
12,428
3.83829
0.172243
0.02502
0.028087
0.046812
0.772559
0.736885
0.730105
0.708152
0.681679
0.659403
0
0.047724
0.340763
12,428
371
121
33.498652
0.708288
0.099211
0
0.453271
0
0.046729
0.147566
0.07077
0
0
0
0
0
1
0.018692
false
0
0.037383
0
0.074766
0.079439
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
3a3e4c4452666cfc7abb9fb2599aacac044f2799
232
py
Python
bench/fix.py
maaku/forthy2
7ee996ff25830f45e14d7dac29933f45b0fb6175
[ "MIT" ]
53
2019-10-20T00:56:59.000Z
2021-02-18T20:30:16.000Z
bench/fix.py
maaku/forthy2
7ee996ff25830f45e14d7dac29933f45b0fb6175
[ "MIT" ]
2
2019-10-28T11:02:10.000Z
2020-06-28T20:10:22.000Z
bench/fix.py
maaku/forthy2
7ee996ff25830f45e14d7dac29933f45b0fb6175
[ "MIT" ]
6
2019-10-28T10:55:59.000Z
2021-02-18T20:30:18.000Z
from bench import bench print(bench(10, ''' from decimal import Decimal ''', ''' x = Decimal(0) for _ in range(100000): x += Decimal(1) / Decimal(3) ''')) print(bench(10, '', ''' x = 0.0 for _ in range(1000000): x += 0.1 '''))
14.5
33
0.581897
36
232
3.694444
0.416667
0.150376
0.180451
0.165414
0
0
0
0
0
0
0
0.12766
0.189655
232
15
34
15.466667
0.579787
0
0
0.166667
0
0
0.62069
0
0
0
0
0
0
1
0
true
0
0.166667
0
0.166667
0.166667
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
28e7eed9b38168493353eef7c61b78282b25014e
26,297
py
Python
tests/test_consensus_simple.py
ValentinATA/sirius-sdk-python
b26aea94f6fc94980a4c9be6d2b8f2345c875964
[ "Apache-2.0" ]
9
2020-08-10T11:57:35.000Z
2022-03-18T21:45:36.000Z
tests/test_consensus_simple.py
ValentinATA/sirius-sdk-python
b26aea94f6fc94980a4c9be6d2b8f2345c875964
[ "Apache-2.0" ]
3
2021-03-12T22:42:27.000Z
2021-05-18T11:46:01.000Z
tests/test_consensus_simple.py
ValentinATA/sirius-sdk-python
b26aea94f6fc94980a4c9be6d2b8f2345c875964
[ "Apache-2.0" ]
7
2020-10-30T15:54:45.000Z
2022-02-28T06:59:59.000Z
import copy from typing import List from datetime import datetime import pytest import sirius_sdk from sirius_sdk import Agent, P2PConnection from sirius_sdk.agent.microledgers.abstract import AbstractMicroledger from sirius_sdk.agent.consensus.simple.state_machines import MicroLedgerSimpleConsensus from sirius_sdk.agent.consensus.simple.messages import * from .conftest import get_pairwise from .helpers import run_coroutines, ServerTestSuite async def routine_of_ledger_creator( uri: str, credentials: bytes, p2p: P2PConnection, me: Pairwise.Me, participants: List[str], ledger_name: str, genesis: List[dict] ): async with sirius_sdk.context(uri, credentials, p2p): machine = MicroLedgerSimpleConsensus(me) genesis = [Transaction.create(txn) for txn in genesis] success, ledger = await machine.init_microledger(ledger_name, participants, genesis) return success, ledger async def routine_of_ledger_creation_acceptor(uri: str, credentials: bytes, p2p: P2PConnection): async with sirius_sdk.context(uri, credentials, p2p): listener = await sirius_sdk.subscribe() event = await listener.get_one() assert event.pairwise is not None propose = event.message assert isinstance(propose, InitRequestLedgerMessage) machine = MicroLedgerSimpleConsensus(event.pairwise.me) success, ledger = await machine.accept_microledger(event.pairwise, propose) return success, ledger async def routine_of_txn_committer( uri: str, credentials: bytes, p2p: P2PConnection, me: Pairwise.Me, participants: List[str], ledger: Union[AbstractMicroledger, List[AbstractMicroledger]], txns: List[dict] ): async with sirius_sdk.context(uri, credentials, p2p): machine = MicroLedgerSimpleConsensus(me) txns = [Transaction.create(txn) for txn in txns] if isinstance(ledger, AbstractMicroledger): success, txns = await machine.commit(ledger, participants, txns) else: success = await machine.commit_in_parallel(ledger, participants, txns) return success, txns async def routine_of_txn_acceptor( uri: str, credentials: bytes, p2p: P2PConnection, txns: List[Transaction] = None ): async with sirius_sdk.context(uri, credentials, p2p): listener = await sirius_sdk.subscribe() while True: event = await listener.get_one() assert event.pairwise is not None propose = event.message if isinstance(propose, ProposeTransactionsMessage): if txns: propose['transactions'] = txns machine = MicroLedgerSimpleConsensus(event.pairwise.me) success = await machine.accept_commit(event.pairwise, propose) return success elif isinstance(propose, ProposeParallelTransactionsMessage): machine = MicroLedgerSimpleConsensus(event.pairwise.me) success = await machine.accept_commit_parallel(event.pairwise, propose) return success @pytest.mark.asyncio async def test_init_ledger_messaging(A: Agent, B: Agent, ledger_name: str): await A.open() await B.open() try: A2B = await get_pairwise(A, B) B2A = await get_pairwise(B, A) A2B.me.did = 'did:peer:' + A2B.me.did B2A.me.did = 'did:peer:' + B2A.me.did genesis_txns = [ Transaction({"reqId": 1, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op1"}) ] request = InitRequestLedgerMessage( participants=[A2B.me.did, B2A.me.did], ledger_name=ledger_name, genesis=genesis_txns, root_hash='xxx' ) await request.add_signature(A.wallet.crypto, A2B.me) await request.add_signature(B.wallet.crypto, B2A.me) assert len(request.signatures) == 2 await request.check_signatures(A.wallet.crypto, A2B.me.did) await request.check_signatures(A.wallet.crypto, B2A.me.did) await request.check_signatures(A.wallet.crypto) await request.check_signatures(B.wallet.crypto, A2B.me.did) await request.check_signatures(B.wallet.crypto, B2A.me.did) await request.check_signatures(B.wallet.crypto) response = InitResponseLedgerMessage() response.assign_from(request) payload1 = dict(**request) payload2 = dict(**response) assert payload1 != payload2 del payload1['@id'] del payload1['@type'] del payload2['@id'] del payload2['@type'] assert payload1 == payload2 finally: await A.close() await B.close() @pytest.mark.asyncio async def test_transaction_messaging(A: Agent, B: Agent, ledger_name: str): await A.open() await B.open() try: a2b = await get_pairwise(A, B) b2a = await get_pairwise(B, A) a2b.me.did = 'did:peer:' + a2b.me.did b2a.me.did = 'did:peer:' + b2a.me.did genesis_txns = [ Transaction({"reqId": 1, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op1"}) ] ledger_for_a, txns = await A.microledgers.create(ledger_name, genesis_txns) ledger_for_b, txns = await B.microledgers.create(ledger_name, genesis_txns) new_transactions = [ Transaction({"reqId": 2, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op2"}), Transaction({"reqId": 3, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op3"}), ] pos1, pos2, new_txns = await ledger_for_a.append(new_transactions) # A -> B state = MicroLedgerState( { 'name': ledger_for_a.name, 'seq_no': ledger_for_a.seq_no, 'size': ledger_for_a.size, 'uncommitted_size': ledger_for_a.uncommitted_size, 'root_hash': ledger_for_a.root_hash, 'uncommitted_root_hash': ledger_for_a.uncommitted_root_hash } ) x = MicroLedgerState.from_ledger(ledger_for_a) assert state == x assert state.hash == x.hash propose = ProposeTransactionsMessage( transactions=new_txns, state=state ) propose.validate() # B -> A await ledger_for_b.append(propose.transactions) pre_commit = PreCommitTransactionsMessage( state=MicroLedgerState( { 'name': ledger_for_b.name, 'seq_no': ledger_for_b.seq_no, 'size': ledger_for_b.size, 'uncommitted_size': ledger_for_b.uncommitted_size, 'root_hash': ledger_for_b.root_hash, 'uncommitted_root_hash': ledger_for_b.uncommitted_root_hash } ) ) await pre_commit.sign_state(B.wallet.crypto, b2a.me) pre_commit.validate() ok, loaded_state_hash = await pre_commit.verify_state(A.wallet.crypto, a2b.their.verkey) assert ok is True assert loaded_state_hash == state.hash # A -> B commit = CommitTransactionsMessage() commit.add_pre_commit(a2b.their.did, pre_commit) commit.validate() states = await commit.verify_pre_commits(A.wallet.crypto, state) assert a2b.their.did in str(states) assert a2b.their.verkey in str(states) # B -> A (post-commit) post_commit = PostCommitTransactionsMessage() await post_commit.add_commit_sign(B.wallet.crypto, commit, b2a.me) post_commit.validate() ok = await post_commit.verify_commits(A.wallet.crypto, commit, [a2b.their.verkey]) assert ok is True finally: await A.close() await B.close() @pytest.mark.asyncio async def test_parallel_transactions_messaging(A: Agent, B: Agent, ledger_names: List[str]): await A.open() await B.open() try: txn_time = str(datetime.now()) a2b = await get_pairwise(A, B) b2a = await get_pairwise(B, A) a2b.me.did = 'did:peer:' + a2b.me.did b2a.me.did = 'did:peer:' + b2a.me.did genesis_txns = [ Transaction({"reqId": 1, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op1"}) ] ledgers_for_a = [] ledgers_for_b = [] for n in ledger_names: ledger_for_a, _ = await A.microledgers.create(n, genesis_txns) ledger_for_b, _ = await B.microledgers.create(n, genesis_txns) ledgers_for_a.append(ledger_for_a) ledgers_for_b.append(ledger_for_b) new_transactions = [ Transaction({"reqId": 2, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op2"}), Transaction({"reqId": 3, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op3"}), ] for txn in new_transactions: txn.time = txn_time # A -> B states_for_a = [] for ledger_for_a in ledgers_for_a: pos1, pos2, new_txns = await ledger_for_a.append(new_transactions) state = MicroLedgerState.from_ledger(ledger_for_a) states_for_a.append(state) propose = ProposeParallelTransactionsMessage(transactions=new_transactions, states=states_for_a) propose.validate() # B -> A states_for_b = [] for ledger_name in propose.ledgers: ledger_for_b = await B.microledgers.ledger(ledger_name) pos1, pos2, new_txns = await ledger_for_b.append(propose.transactions) states_for_b.append(MicroLedgerState.from_ledger(ledger_for_b)) pre_commit = PreCommitParallelTransactionsMessage(transactions=new_transactions, states=states_for_b) await pre_commit.sign_states(B.wallet.crypto, b2a.me) pre_commit.validate() ok, state_hash_for_b = await pre_commit.verify_state(A.wallet.crypto, a2b.their.verkey) assert ok is True assert state_hash_for_b == propose.hash # A -> B commit = CommitParallelTransactionsMessage() commit.add_pre_commit(a2b.their.did, pre_commit) commit.validate() states = await commit.verify_pre_commits(A.wallet.crypto, propose.hash) assert a2b.their.did in str(states) assert a2b.their.verkey in str(states) # B -> A (post-commit) post_commit = PostCommitParallelTransactionsMessage() await post_commit.add_commit_sign(B.wallet.crypto, commit, b2a.me) post_commit.validate() ok = await post_commit.verify_commits(A.wallet.crypto, commit, [a2b.their.verkey]) assert ok is True finally: await A.close() await B.close() @pytest.mark.asyncio async def test_parallel_batching_api_messaging(A: Agent, B: Agent, ledger_names: List[str]): txn_time = str(datetime.now()) await A.open() await B.open() try: a2b = await get_pairwise(A, B) b2a = await get_pairwise(B, A) a2b.me.did = 'did:peer:' + a2b.me.did b2a.me.did = 'did:peer:' + b2a.me.did genesis_txns = [ Transaction({"reqId": 1, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op1"}) ] ledgers_for_a = [] ledgers_for_b = [] for n in ledger_names: ledger_for_a, _ = await A.microledgers.create(n, genesis_txns) ledgers_for_a.append(ledger_for_a) ledger_for_b, _ = await B.microledgers.create(n, genesis_txns) ledgers_for_b.append(ledger_for_b) new_transactions = [ Transaction({"reqId": 2, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op2"}), Transaction({"reqId": 3, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op3"}), ] for txn in new_transactions: txn.time = txn_time batching_api_for_a = await A.microledgers.batched() batching_api_for_b = await B.microledgers.batched() await batching_api_for_a.open(ledger_names) await batching_api_for_b.open(ledger_names) try: # A -> B await batching_api_for_a.append(new_transactions) propose = ProposeParallelTransactionsMessage( transactions=new_transactions, states=[MicroLedgerState.from_ledger(item) for item in ledgers_for_a] ) propose.validate() # B -> A await batching_api_for_b.append(propose.transactions) states_for_b = [MicroLedgerState.from_ledger(item) for item in ledgers_for_b] pre_commit = PreCommitParallelTransactionsMessage(states=states_for_b) await pre_commit.sign_states(B.wallet.crypto, b2a.me) pre_commit.validate() ok, state_hash_for_b = await pre_commit.verify_state(A.wallet.crypto, a2b.their.verkey) assert ok is True assert state_hash_for_b == propose.hash # A -> B commit = CommitParallelTransactionsMessage() commit.add_pre_commit(a2b.their.did, pre_commit) commit.validate() states = await commit.verify_pre_commits(A.wallet.crypto, propose.hash) assert a2b.their.did in str(states) assert a2b.their.verkey in str(states) # B -> A (post-commit) post_commit = PostCommitParallelTransactionsMessage() await post_commit.add_commit_sign(B.wallet.crypto, commit, b2a.me) post_commit.validate() ok = await post_commit.verify_commits(A.wallet.crypto, commit, [a2b.their.verkey]) assert ok is True finally: await batching_api_for_a.close() await batching_api_for_b.close() finally: await A.close() await B.close() @pytest.mark.asyncio async def test_simple_consensus_init_ledger( A: Agent, B: Agent, C: Agent, ledger_name: str, test_suite: ServerTestSuite ): A_params = test_suite.get_agent_params('agent1') B_params = test_suite.get_agent_params('agent2') C_params = test_suite.get_agent_params('agent3') await A.open() await B.open() await C.open() try: A2B = await get_pairwise(A, B) A2C = await get_pairwise(A, C) assert A2B.me == A2C.me B2A = await get_pairwise(B, A) B2C = await get_pairwise(B, C) assert B2A.me == B2C.me C2A = await get_pairwise(C, A) C2B = await get_pairwise(C, B) assert C2A.me == C2B.me participants = [ A2B.me.did, A2B.their.did, A2C.their.did ] genesis = [ {"reqId": 1, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op1"}, {"reqId": 2, "identifier": "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", "op": "op2"} ] coro_creator = routine_of_ledger_creator( A_params['server_address'], A_params['credentials'], A_params['p2p'], A2B.me, participants, ledger_name, genesis ) coro_acceptor1 = routine_of_ledger_creation_acceptor( B_params['server_address'], B_params['credentials'], B_params['p2p'], ) coro_acceptor2 = routine_of_ledger_creation_acceptor( C_params['server_address'], C_params['credentials'], C_params['p2p'], ) stamp1 = datetime.now() print('> begin') await run_coroutines(coro_creator, coro_acceptor1, coro_acceptor2, timeout=30) print('> end') stamp2 = datetime.now() delta = stamp2 - stamp1 print(f'***** Consensus timeout: {delta.seconds}') is_exists_for_A = await A.microledgers.is_exists(ledger_name) is_exists_for_B = await B.microledgers.is_exists(ledger_name) is_exists_for_C = await C.microledgers.is_exists(ledger_name) assert is_exists_for_A assert is_exists_for_B assert is_exists_for_C for agent in [A, B, C]: ledger = await agent.microledgers.ledger(ledger_name) txns = await ledger.get_all_transactions() assert len(txns) == 2 assert 'op1' in str(txns) assert 'op2' in str(txns) finally: await A.close() await B.close() await C.close() @pytest.mark.asyncio async def test_simple_consensus_commit( A: Agent, B: Agent, C: Agent, ledger_name: str, test_suite: ServerTestSuite ): A_params = test_suite.get_agent_params('agent1') B_params = test_suite.get_agent_params('agent2') C_params = test_suite.get_agent_params('agent3') await A.open() await B.open() await C.open() try: A2B = await get_pairwise(A, B) A2C = await get_pairwise(A, C) assert A2B.me == A2C.me B2A = await get_pairwise(B, A) B2C = await get_pairwise(B, C) assert B2A.me == B2C.me C2A = await get_pairwise(C, A) C2B = await get_pairwise(C, B) assert C2A.me == C2B.me participants = [ A2B.me.did, A2B.their.did, A2C.their.did ] genesis = [ {"reqId": 1, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op1"}, {"reqId": 2, "identifier": "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", "op": "op2"} ] ledger_for_a, _ = await A.microledgers.create(ledger_name, genesis) await B.microledgers.create(ledger_name, genesis) await C.microledgers.create(ledger_name, genesis) txns = [ {"reqId": 3, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op3"}, {"reqId": 4, "identifier": "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", "op": "op4"}, {"reqId": 5, "identifier": "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", "op": "op5"}, ] coro_committer = routine_of_txn_committer( A_params['server_address'], A_params['credentials'], A_params['p2p'], A2B.me, participants, ledger_for_a, txns ) coro_acceptor1 = routine_of_txn_acceptor( B_params['server_address'], B_params['credentials'], B_params['p2p'], ) coro_acceptor2 = routine_of_txn_acceptor( C_params['server_address'], C_params['credentials'], C_params['p2p'], ) stamp1 = datetime.now() print('> begin') await run_coroutines(coro_committer, coro_acceptor1, coro_acceptor2, timeout=60) print('> end') stamp2 = datetime.now() delta = stamp2 - stamp1 print(f'***** Consensus timeout: {delta.seconds}') ledger_for_a = await A.microledgers.ledger(ledger_name) ledger_for_b = await B.microledgers.ledger(ledger_name) ledger_for_c = await C.microledgers.ledger(ledger_name) for ledger in [ledger_for_a, ledger_for_b, ledger_for_c]: all_txns = await ledger.get_all_transactions() assert 'op3' in str(all_txns) assert 'op4' in str(all_txns) assert 'op5' in str(all_txns) finally: await A.close() await B.close() await C.close() @pytest.mark.asyncio async def test_simple_consensus_commit_parallel( A: Agent, B: Agent, C: Agent, ledger_names: List[str], test_suite: ServerTestSuite ): A_params = test_suite.get_agent_params('agent1') B_params = test_suite.get_agent_params('agent2') C_params = test_suite.get_agent_params('agent3') await A.open() await B.open() await C.open() try: A2B = await get_pairwise(A, B) A2C = await get_pairwise(A, C) assert A2B.me == A2C.me B2A = await get_pairwise(B, A) B2C = await get_pairwise(B, C) assert B2A.me == B2C.me C2A = await get_pairwise(C, A) C2B = await get_pairwise(C, B) assert C2A.me == C2B.me participants = [ A2B.me.did, A2B.their.did, A2C.their.did ] genesis = [ {"reqId": 1, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op1"}, {"reqId": 2, "identifier": "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", "op": "op2"} ] txns = [ {"reqId": 3, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op3"}, {"reqId": 4, "identifier": "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", "op": "op4"}, {"reqId": 5, "identifier": "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", "op": "op5"}, ] # Init ledgers leader_ledgers = [] for name in ledger_names: ledger_of_leader, _ = await A.microledgers.create(name, genesis) leader_ledgers.append(ledger_of_leader) await B.microledgers.create(name, genesis) await C.microledgers.create(name, genesis) coro_committer = routine_of_txn_committer( A_params['server_address'], A_params['credentials'], A_params['p2p'], A2B.me, participants, leader_ledgers, txns ) coro_acceptor1 = routine_of_txn_acceptor( B_params['server_address'], B_params['credentials'], B_params['p2p'], ) coro_acceptor2 = routine_of_txn_acceptor( C_params['server_address'], C_params['credentials'], C_params['p2p'], ) stamp1 = datetime.now() print('> begin') await run_coroutines( coro_committer, coro_acceptor1, coro_acceptor2, timeout=60 ) print('> end') stamp2 = datetime.now() delta = stamp2 - stamp1 print(f'***** Consensus timeout: {delta.seconds}') for name in ledger_names: ledger_for_a = await A.microledgers.ledger(name) ledger_for_b = await B.microledgers.ledger(name) ledger_for_c = await C.microledgers.ledger(name) for ledger in [ledger_for_a, ledger_for_b, ledger_for_c]: all_txns = await ledger.get_all_transactions() assert 'op3' in str(all_txns) assert 'op4' in str(all_txns) assert 'op5' in str(all_txns) finally: await A.close() await B.close() await C.close() @pytest.mark.asyncio async def test_simple_consensus_error(A: Agent, B: Agent, C: Agent, ledger_name: str, test_suite: ServerTestSuite): A_params = test_suite.get_agent_params('agent1') B_params = test_suite.get_agent_params('agent2') C_params = test_suite.get_agent_params('agent3') await A.open() await B.open() await C.open() try: A2B = await get_pairwise(A, B) A2C = await get_pairwise(A, C) assert A2B.me == A2C.me B2A = await get_pairwise(B, A) B2C = await get_pairwise(B, C) assert B2A.me == B2C.me C2A = await get_pairwise(C, A) C2B = await get_pairwise(C, B) assert C2A.me == C2B.me participants = [ A2B.me.did, A2B.their.did, A2C.their.did ] genesis = [ {"reqId": 1, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op1"}, {"reqId": 2, "identifier": "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", "op": "op2"} ] ledger_for_a, _ = await A.microledgers.create(ledger_name, genesis) initial_state_root_hash = ledger_for_a.root_hash await B.microledgers.create(ledger_name, genesis) await C.microledgers.create(ledger_name, genesis) txns = [ {"reqId": 3, "identifier": "5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op3"}, {"reqId": 4, "identifier": "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", "op": "op4"}, {"reqId": 5, "identifier": "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", "op": "op5"}, ] broken_txns = [ {"reqId": 3, "identifier": "BROKEN-5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC", "op": "op3", "txnMetadata": {"seqNo": 4}}, {"reqId": 4, "identifier": "BROKEN-2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", "op": "op4", "txnMetadata": {"seqNo": 5}}, {"reqId": 5, "identifier": "2btLJAAb1S3x6hZYdVyAePjqtQYi2ZBSRGy4569RZu8h", "op": "op5", "txnMetadata": {"seqNo": 6}}, ] broken_txns = [Transaction(txn) for txn in broken_txns] coro_committer = routine_of_txn_committer( A_params['server_address'], A_params['credentials'], A_params['p2p'], A2B.me, participants, ledger_for_a, txns ) coro_acceptor1 = routine_of_txn_acceptor( B_params['server_address'], B_params['credentials'], B_params['p2p'], broken_txns ) coro_acceptor2 = routine_of_txn_acceptor( C_params['server_address'], C_params['credentials'], C_params['p2p'], broken_txns ) stamp1 = datetime.now() print('> begin') results = await run_coroutines(coro_committer, coro_acceptor1, coro_acceptor2, timeout=60) print('> end') stamp2 = datetime.now() delta = stamp2 - stamp1 print(f'***** Consensus timeout: {delta.seconds}') for res in results: if type(res) is tuple: val = res[0] else: val = res assert val is False ledger_for_a = await A.microledgers.ledger(ledger_name) ledger_for_b = await B.microledgers.ledger(ledger_name) ledger_for_c = await C.microledgers.ledger(ledger_name) for ledger in [ledger_for_a, ledger_for_b, ledger_for_c]: await ledger.reload() assert ledger.root_hash == initial_state_root_hash all_txns = await ledger.get_all_transactions() assert 'op3' not in str(all_txns) assert 'op4' not in str(all_txns) assert 'op5' not in str(all_txns) finally: await A.close() await B.close() await C.close()
41.153365
136
0.624786
3,011
26,297
5.233477
0.069744
0.030841
0.032491
0.012946
0.833228
0.775098
0.727694
0.692981
0.681368
0.640119
0
0.028679
0.269384
26,297
638
137
41.217868
0.791495
0.005248
0
0.627178
0
0
0.10916
0.05263
0
0
0
0
0.090592
1
0
false
0
0.019164
0
0.027875
0.020906
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
3a67ec9d3a6c2328c28a0a2c84924678d75d2470
489
py
Python
datastructure/practice/c2/r_2_16.py
stoneyangxu/python-kata
979af91c74718a525dcd2a83fe53ec6342af9741
[ "MIT" ]
null
null
null
datastructure/practice/c2/r_2_16.py
stoneyangxu/python-kata
979af91c74718a525dcd2a83fe53ec6342af9741
[ "MIT" ]
null
null
null
datastructure/practice/c2/r_2_16.py
stoneyangxu/python-kata
979af91c74718a525dcd2a83fe53ec6342af9741
[ "MIT" ]
null
null
null
import unittest def range_length(start, stop, step=1): return max(0, (stop - start + step - 1) // step) class MyTestCase(unittest.TestCase): def test_something(self): self.assertEqual(range_length(0, 10), 10) self.assertEqual(range_length(0, 10, 2), 5) self.assertEqual(range_length(0, 10, 3), 4) self.assertEqual(range_length(-10, 0, 3), 4) self.assertEqual(range_length(-10, 0, -3), 0) if __name__ == '__main__': unittest.main()
25.736842
53
0.640082
69
489
4.318841
0.391304
0.221477
0.33557
0.436242
0.503356
0.503356
0.211409
0.211409
0.211409
0
0
0.072539
0.210634
489
18
54
27.166667
0.699482
0
0
0
0
0
0.01636
0
0
0
0
0
0.416667
1
0.166667
false
0
0.083333
0.083333
0.416667
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
4
3ab6b548318e6c3e2103366ec5a6a484708b6f2a
53
py
Python
spring/user.py
hala21/ipaas
564721e2590f1e2f87e1cf3346e3c3e6b816dc3d
[ "Apache-2.0" ]
null
null
null
spring/user.py
hala21/ipaas
564721e2590f1e2f87e1cf3346e3c3e6b816dc3d
[ "Apache-2.0" ]
null
null
null
spring/user.py
hala21/ipaas
564721e2590f1e2f87e1cf3346e3c3e6b816dc3d
[ "Apache-2.0" ]
null
null
null
''' Created on 2015年8月4日 @author: Administrator '''
8.833333
22
0.698113
5
53
7.4
1
0
0
0
0
0
0
0
0
0
0
0.133333
0.150943
53
5
23
10.6
0.688889
0.830189
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
3aba48267c8654d64fe851de8739c1fde8db98a7
272
py
Python
main.py
Pixel2008/FlaskImageDownloader
3c4cac53a31adb2012e058c3f6c6dc82ad7e66c2
[ "bzip2-1.0.6" ]
null
null
null
main.py
Pixel2008/FlaskImageDownloader
3c4cac53a31adb2012e058c3f6c6dc82ad7e66c2
[ "bzip2-1.0.6" ]
null
null
null
main.py
Pixel2008/FlaskImageDownloader
3c4cac53a31adb2012e058c3f6c6dc82ad7e66c2
[ "bzip2-1.0.6" ]
null
null
null
#! python3 ''' Sample script for downloading all images from website Pixel2008 All Rights Reserved ® ''' from app import app, db from app.models import User, Post @app.shell_context_processor def make_shell_context(): return {'db': db, 'User': User, 'Post': Post}
18.133333
53
0.724265
40
272
4.85
0.625
0.072165
0
0
0
0
0
0
0
0
0
0.022026
0.165441
272
14
54
19.428571
0.828194
0.349265
0
0
0
0
0.059172
0
0
0
0
0
0
1
0.2
true
0
0.4
0.2
0.8
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
1
1
0
0
4
3aef8f7b69997f80abe56e8cb63cf27b2510a3c1
93
py
Python
src/festivals/apps.py
vineethvanga18/gymkhana_portal
9dcb108dab0fd9fe163c72013b6de3210d5c9cd5
[ "MIT" ]
16
2018-04-12T22:38:28.000Z
2020-08-19T16:06:33.000Z
src/festivals/apps.py
vineethvanga18/gymkhana_portal
9dcb108dab0fd9fe163c72013b6de3210d5c9cd5
[ "MIT" ]
36
2018-03-23T15:40:26.000Z
2021-06-10T17:51:23.000Z
src/festivals/apps.py
vineethvanga18/gymkhana_portal
9dcb108dab0fd9fe163c72013b6de3210d5c9cd5
[ "MIT" ]
46
2018-04-06T21:03:36.000Z
2021-11-03T04:39:27.000Z
from django.apps import AppConfig class FestivalsConfig(AppConfig): name = 'festivals'
15.5
33
0.763441
10
93
7.1
0.9
0
0
0
0
0
0
0
0
0
0
0
0.16129
93
5
34
18.6
0.910256
0
0
0
0
0
0.096774
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
3af6027749cdc39488934b86b14baa2979b6799a
729
py
Python
tests/adventofcode/util/test_math_helpers.py
Frazzer951/Advent-Of-Code
419a6b7915a529fd49215639db79dcf5bde9cab7
[ "MIT" ]
null
null
null
tests/adventofcode/util/test_math_helpers.py
Frazzer951/Advent-Of-Code
419a6b7915a529fd49215639db79dcf5bde9cab7
[ "MIT" ]
2
2022-02-02T15:59:34.000Z
2022-02-02T15:59:44.000Z
tests/adventofcode/util/test_math_helpers.py
Frazzer951/Advent-Of-Code
419a6b7915a529fd49215639db79dcf5bde9cab7
[ "MIT" ]
null
null
null
import pytest from adventofcode.util.math_helpers import gaussian_sum from adventofcode.util.math_helpers import mean_ceil from adventofcode.util.math_helpers import mean_floor @pytest.mark.parametrize("number", [*range(0, 10, 3)]) def test_gaussian_sum(number): assert gaussian_sum(number) == sum(range(1, number + 1)) @pytest.mark.parametrize(["target_list", "expected"], [([1, 2, 3, 4, 5], 3), ([13, 34, 45, 68, 5], 33)]) def test_mean_floor(target_list, expected): assert mean_floor(target_list) == expected @pytest.mark.parametrize(["target_list", "expected"], [([123, 34546, 341, 45], 8764), ([13, 34, 45, 68, 5], 33)]) def test_mean_ceil(target_list, expected): assert mean_ceil(target_list) == expected
36.45
113
0.721536
109
729
4.633028
0.348624
0.118812
0.213861
0.142574
0.69505
0.477228
0.249505
0.087129
0.087129
0
0
0.079316
0.11797
729
19
114
38.368421
0.706065
0
0
0
0
0
0.060357
0
0
0
0
0
0.230769
1
0.230769
false
0
0.307692
0
0.538462
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
4
c9178fed97d4451cd5ca77c5d929a22d3d0b9a98
126
py
Python
Gustavo Guanabara - Python Learning Exercises/ex030 - Number Check [Exercise].py
TiagoPL/Python-Learning
20855433a8a050647ee9c5039aac1e50807324f8
[ "MIT" ]
null
null
null
Gustavo Guanabara - Python Learning Exercises/ex030 - Number Check [Exercise].py
TiagoPL/Python-Learning
20855433a8a050647ee9c5039aac1e50807324f8
[ "MIT" ]
null
null
null
Gustavo Guanabara - Python Learning Exercises/ex030 - Number Check [Exercise].py
TiagoPL/Python-Learning
20855433a8a050647ee9c5039aac1e50807324f8
[ "MIT" ]
null
null
null
n = int(input('Choose a number: ')) if n % 2 == 0: print('Thats an even number.') else: print('Thats an odd number.')
21
35
0.587302
21
126
3.52381
0.714286
0.27027
0.324324
0
0
0
0
0
0
0
0
0.020619
0.230159
126
5
36
25.2
0.742268
0
0
0
0
0
0.460317
0
0
0
0
0
0
1
0
false
0
0
0
0
0.4
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
c9281f83de1db2882247233b6c738779c3ed2c30
461
py
Python
backend/apps/profile_app/profile_model.py
raphaelrpl/portal
9e84e52a73500390187d3fc7c4871cf8a3620231
[ "MIT" ]
null
null
null
backend/apps/profile_app/profile_model.py
raphaelrpl/portal
9e84e52a73500390187d3fc7c4871cf8a3620231
[ "MIT" ]
null
null
null
backend/apps/profile_app/profile_model.py
raphaelrpl/portal
9e84e52a73500390187d3fc7c4871cf8a3620231
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from google.appengine.ext import ndb from gaegraph.model import Node from gaepermission.model import MainUser class Profile(Node): user = ndb.KeyProperty(MainUser, required=True) avatar = ndb.StringProperty(required=False) education = ndb.StringProperty(required=False) position = ndb.StringProperty(required=False) about = ndb.TextProperty(required=False)
30.733333
56
0.770065
55
461
6.345455
0.563636
0.148997
0.2149
0.25788
0
0
0
0
0
0
0
0.002513
0.136659
461
14
57
32.928571
0.874372
0.045553
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
c93070156be707dd7e0dfe507a4132f0af8ad493
110
py
Python
autoencoding/data/__init__.py
perschi/Neural-Gas-VAE
cbe23f324b9979a5af36c23f062b122a946a8196
[ "MIT" ]
null
null
null
autoencoding/data/__init__.py
perschi/Neural-Gas-VAE
cbe23f324b9979a5af36c23f062b122a946a8196
[ "MIT" ]
null
null
null
autoencoding/data/__init__.py
perschi/Neural-Gas-VAE
cbe23f324b9979a5af36c23f062b122a946a8196
[ "MIT" ]
null
null
null
from .speechcommands import Speechcommands from .fashion_mnist import FashionMNIST from .cifar import CIFAR10
27.5
42
0.863636
13
110
7.230769
0.615385
0
0
0
0
0
0
0
0
0
0
0.020408
0.109091
110
3
43
36.666667
0.938776
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
c934369fc712d2cf8666d92a91e78c1345cc1e25
147
py
Python
Exercicios propostos/Ex 003.py
luks-rossato/curso-Python3
4d64f7927c23dedcf4e8c37c233d0bb062035029
[ "MIT" ]
null
null
null
Exercicios propostos/Ex 003.py
luks-rossato/curso-Python3
4d64f7927c23dedcf4e8c37c233d0bb062035029
[ "MIT" ]
null
null
null
Exercicios propostos/Ex 003.py
luks-rossato/curso-Python3
4d64f7927c23dedcf4e8c37c233d0bb062035029
[ "MIT" ]
null
null
null
n1 = int(input('Digite o numero 1:')) n2 = int(input('Digite o numero 2:')) s = n1+n2 print('A soma dos numeros {} e {} é: {} '.format(n1,n2,s))
21
58
0.585034
28
147
3.071429
0.642857
0.186047
0.325581
0.348837
0.488372
0
0
0
0
0
0
0.066667
0.183673
147
6
59
24.5
0.65
0
0
0
0
0
0.469388
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
c96544fe0f0e3b25b912865a13067ac1a1dfbf2a
17
py
Python
jupiter/use_cases/__init__.py
horia141/jupiter
2c721d1d44e1cd2607ad9936e54a20ea254741dc
[ "MIT" ]
15
2019-05-05T14:34:58.000Z
2022-02-25T09:57:28.000Z
src/toml_validator/use_cases/__init__.py
staticdev/toml-validator
e3bc7a674c5ec1c996d7539616411784995869b1
[ "MIT" ]
92
2020-03-01T15:57:59.000Z
2021-02-16T18:48:53.000Z
jupiter/use_cases/__init__.py
horia141/jupiter
2c721d1d44e1cd2607ad9936e54a20ea254741dc
[ "MIT" ]
1
2020-08-25T00:43:21.000Z
2020-08-25T00:43:21.000Z
"""Use cases."""
8.5
16
0.470588
2
17
4
1
0
0
0
0
0
0
0
0
0
0
0
0.117647
17
1
17
17
0.533333
0.588235
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
a338992fa94557a042858c7b1e77d16a7b3d9e37
334
py
Python
src/apps/productividad/admin.py
SGC-Tlaxcala/cerebro
6c842f66d849065a70002fccdb1eaca1e3d61d99
[ "MIT" ]
null
null
null
src/apps/productividad/admin.py
SGC-Tlaxcala/cerebro
6c842f66d849065a70002fccdb1eaca1e3d61d99
[ "MIT" ]
48
2017-04-21T17:35:23.000Z
2020-08-29T04:19:35.000Z
src/apps/productividad/admin.py
SGC-Tlaxcala/cerebro
6c842f66d849065a70002fccdb1eaca1e3d61d99
[ "MIT" ]
null
null
null
# coding: utf-8 """Área administrativa de productividad.""" from django.contrib import admin from apps.productividad.models import PronosticoTramites class PronosticoTramitesAdmin(admin.ModelAdmin): """Generador simple de administración del modelo""" pass admin.site.register(PronosticoTramites, PronosticoTramitesAdmin)
23.857143
64
0.799401
34
334
7.852941
0.764706
0
0
0
0
0
0
0
0
0
0
0.00339
0.116766
334
13
65
25.692308
0.901695
0.293413
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.2
0.4
0
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
4
a39b1e60be730c90e67639e4206ed8aca33f6e7d
242
py
Python
workflows/lexicology/visualization_views.py
xflows/textflows
7fd99cebe29bcb25ea21b8bfb7dca2d0b663ea2b
[ "MIT" ]
18
2015-07-29T07:14:41.000Z
2021-05-31T16:10:49.000Z
workflows/lexicology/visualization_views.py
xflows/textflows
7fd99cebe29bcb25ea21b8bfb7dca2d0b663ea2b
[ "MIT" ]
null
null
null
workflows/lexicology/visualization_views.py
xflows/textflows
7fd99cebe29bcb25ea21b8bfb7dca2d0b663ea2b
[ "MIT" ]
8
2016-02-05T10:13:40.000Z
2020-11-10T14:36:31.000Z
from django.shortcuts import render def read_string_in_slovene(request,input_dict,output_dict,widget): text=input_dict.get('text') return render(request, 'visualizations/read_string_in_slovene.html',{'widget':widget,'text':text})
26.888889
102
0.785124
34
242
5.323529
0.588235
0.110497
0.132597
0.209945
0
0
0
0
0
0
0
0
0.090909
242
8
103
30.25
0.822727
0
0
0
0
0
0.233333
0.175
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
6e6320fb812d023fbdd96e37f5697933fb2bf396
1,021
py
Python
diazotheme/uaic/browser/viewlets.py
smcmahon/diazotheme.uaic
364aac7aaf6826b17e58a91c311e67104f614651
[ "CNRI-Python" ]
null
null
null
diazotheme/uaic/browser/viewlets.py
smcmahon/diazotheme.uaic
364aac7aaf6826b17e58a91c311e67104f614651
[ "CNRI-Python" ]
null
null
null
diazotheme/uaic/browser/viewlets.py
smcmahon/diazotheme.uaic
364aac7aaf6826b17e58a91c311e67104f614651
[ "CNRI-Python" ]
null
null
null
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile from plone.app.layout.viewlets.common import ViewletBase # Sample code for a basic viewlet (In order to use it, you'll have to): # - Un-comment the following useable piece of code (viewlet python class). # - Rename the viewlet template file ('browser/viewlet.pt') and edit the # following python code accordingly. # - Edit the class and template to make them suit your needs. # - Make sure your viewlet is correctly registered in 'browser/configure.zcml'. # - If you need it to appear in a specific order inside its viewlet manager, # edit 'profiles/default/viewlets.xml' accordingly. # - Restart Zope. # - If you edited any file in 'profiles/default/', reinstall your package. # - Once you're happy with your viewlet implementation, remove any related # (unwanted) inline documentation ;-p #class MyViewlet(ViewletBase): # render = ViewPageTemplateFile('viewlet.pt') # # def update(self): # self.computed_value = 'any output'
46.409091
79
0.750245
141
1,021
5.425532
0.631206
0.031373
0
0
0
0
0
0
0
0
0
0
0.161606
1,021
21
80
48.619048
0.893692
0.837414
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
6e781425b3ac44db0301689748e2d4ab7f22410e
42
py
Python
pyexcel/__version__.py
hiaselhans/pyexcel
4c96909eaa7ec322f28207f43e41f1fff07d8123
[ "BSD-3-Clause" ]
1,045
2016-01-12T10:09:45.000Z
2022-03-21T18:43:02.000Z
pyexcel/__version__.py
hiaselhans/pyexcel
4c96909eaa7ec322f28207f43e41f1fff07d8123
[ "BSD-3-Clause" ]
231
2016-01-26T15:04:46.000Z
2022-03-18T10:13:07.000Z
pyexcel/__version__.py
hiaselhans/pyexcel
4c96909eaa7ec322f28207f43e41f1fff07d8123
[ "BSD-3-Clause" ]
157
2016-02-13T21:53:25.000Z
2022-03-05T15:16:48.000Z
__version__ = '0.7.0' __author__ = 'C.W.'
14
21
0.619048
7
42
2.571429
0.857143
0
0
0
0
0
0
0
0
0
0
0.083333
0.142857
42
2
22
21
0.416667
0
0
0
0
0
0.214286
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6e80bd61cd54f6f27de32777d90944ceaa68c908
717
py
Python
scripts/templates/fastApiCrud/zarubaServiceName/repos/zarubaEntityName.py
sulthonzh/zaruba
ec9262f43da17d86330da2c593b7da451aabd60f
[ "Apache-2.0" ]
39
2020-03-13T19:41:11.000Z
2022-02-14T02:01:00.000Z
scripts/templates/fastApiCrud/zarubaServiceName/repos/zarubaEntityName.py
sulthonzh/zaruba
ec9262f43da17d86330da2c593b7da451aabd60f
[ "Apache-2.0" ]
5
2020-08-01T08:55:48.000Z
2022-02-10T00:55:39.000Z
scripts/templates/fastApiCrud/zarubaServiceName/repos/zarubaEntityName.py
sulthonzh/zaruba
ec9262f43da17d86330da2c593b7da451aabd60f
[ "Apache-2.0" ]
4
2020-11-10T20:45:12.000Z
2021-03-18T06:18:55.000Z
from typing import List from schemas.zarubaEntityName import ZarubaEntityName, ZarubaEntityNameData import abc class ZarubaEntityNameRepo(abc.ABC): @abc.abstractmethod def find_by_id(self, id: str) -> ZarubaEntityName: pass @abc.abstractmethod def find(self, keyword: str, limit: int, offset: int) -> List[ZarubaEntityName]: pass @abc.abstractmethod def insert(self, zaruba_entity_name_data: ZarubaEntityNameData) -> ZarubaEntityName: pass @abc.abstractmethod def update(self, id: str, zaruba_entity_name_data: ZarubaEntityNameData) -> ZarubaEntityName: pass @abc.abstractmethod def delete(self, id: str) -> ZarubaEntityName: pass
27.576923
97
0.715481
76
717
6.644737
0.381579
0.168317
0.19802
0.293069
0.550495
0.316832
0.316832
0.316832
0.316832
0.316832
0
0
0.202232
717
26
98
27.576923
0.882867
0
0
0.526316
0
0
0
0
0
0
0
0
0
1
0.263158
false
0.263158
0.157895
0
0.473684
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
6ec2846917e19d56b9a46ca5be4dbb9e4b891fb1
177
py
Python
test/python_rules/data_dep.py
samwestmoreland/please
1616742eeefca3dd0b3194e4c1ec9a8542ec13c7
[ "Apache-2.0" ]
1,992
2016-08-08T11:14:10.000Z
2022-03-31T08:29:57.000Z
test/python_rules/data_dep.py
samwestmoreland/please
1616742eeefca3dd0b3194e4c1ec9a8542ec13c7
[ "Apache-2.0" ]
1,059
2016-08-03T17:11:37.000Z
2022-03-30T16:27:30.000Z
test/python_rules/data_dep.py
samwestmoreland/please
1616742eeefca3dd0b3194e4c1ec9a8542ec13c7
[ "Apache-2.0" ]
213
2016-12-09T15:37:00.000Z
2022-03-23T23:08:26.000Z
"""Part of a test on deps, data, requires and provides.""" from __future__ import print_function def the_answer(): return 42 if __name__ == '__main__': print('42')
14.75
58
0.677966
25
177
4.24
0.92
0
0
0
0
0
0
0
0
0
0
0.028369
0.20339
177
11
59
16.090909
0.723404
0.293785
0
0
0
0
0.084034
0
0
0
0
0
0
1
0.2
true
0
0.2
0.2
0.6
0.4
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
1
1
0
0
4
6ed9c5289052bc1d6b46a450cad6bae635acdae0
33
py
Python
DjangoBICAP/DjangoBICAP/__init__.py
SgozziCoders/BICAP-backend
272bfc67df1df49e30d99cf055eac7d7e3810b7a
[ "MIT" ]
null
null
null
DjangoBICAP/DjangoBICAP/__init__.py
SgozziCoders/BICAP-backend
272bfc67df1df49e30d99cf055eac7d7e3810b7a
[ "MIT" ]
null
null
null
DjangoBICAP/DjangoBICAP/__init__.py
SgozziCoders/BICAP-backend
272bfc67df1df49e30d99cf055eac7d7e3810b7a
[ "MIT" ]
null
null
null
""" Package for DjangoBICAP. """
8.25
24
0.636364
3
33
7
1
0
0
0
0
0
0
0
0
0
0
0
0.151515
33
3
25
11
0.75
0.727273
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
42b2e6af25ccb6a14eb643922290e8c2581542dd
65
py
Python
model/db.py
franckisses/tornado_daily
bde1395472ae7acb490b378a8634011ecab34807
[ "MIT" ]
null
null
null
model/db.py
franckisses/tornado_daily
bde1395472ae7acb490b378a8634011ecab34807
[ "MIT" ]
null
null
null
model/db.py
franckisses/tornado_daily
bde1395472ae7acb490b378a8634011ecab34807
[ "MIT" ]
null
null
null
class Dbconnection: def __init__(self): self.__co
9.285714
23
0.615385
7
65
4.857143
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.307692
65
7
24
9.285714
0.755556
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
42b424166aed2b65eb58581f9e0b53d61548adf5
74
py
Python
D01/tuple.py
shdx8/dtwrhs
108decb8056931fc7601ed455a72ef0d65983ab0
[ "MIT" ]
null
null
null
D01/tuple.py
shdx8/dtwrhs
108decb8056931fc7601ed455a72ef0d65983ab0
[ "MIT" ]
null
null
null
D01/tuple.py
shdx8/dtwrhs
108decb8056931fc7601ed455a72ef0d65983ab0
[ "MIT" ]
null
null
null
#TUPLE AKAN DIULAS PADA PERTEMUAN BERIKUTNYA a = (255,255, 255) print (a)
18.5
44
0.72973
12
74
4.5
0.75
0.222222
0
0
0
0
0
0
0
0
0
0.145161
0.162162
74
3
45
24.666667
0.725806
0.581081
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
42ba00c6ebbb370c6bea67d18ca803b55d56125a
605
py
Python
simpleoncall/api/v1/urls.py
simpleoncall/simpleoncall
ffc247045c7ce357871899c84fdfc61f4add06a9
[ "MIT" ]
1
2016-01-11T21:37:44.000Z
2016-01-11T21:37:44.000Z
simpleoncall/api/v1/urls.py
simpleoncall/simpleoncall
ffc247045c7ce357871899c84fdfc61f4add06a9
[ "MIT" ]
48
2015-01-04T16:04:20.000Z
2015-01-25T20:53:49.000Z
simpleoncall/api/v1/urls.py
simpleoncall/simpleoncall
ffc247045c7ce357871899c84fdfc61f4add06a9
[ "MIT" ]
null
null
null
from django.conf.urls import patterns, url urlpatterns = patterns( '', url(r'^$', 'simpleoncall.api.v1.views.index', name='index'), url(r'^whose/oncall$', 'simpleoncall.api.v1.views.whose_oncall', name='whose-oncall'), url(r'^alert/add$', 'simpleoncall.api.v1.views.alert_create', name='alert-add'), url(r'^alert/update$', 'simpleoncall.api.v1.views.alert_update', name='alert-update'), url(r'^alerts$', 'simpleoncall.api.v1.views.alerts_list', name='alerts-list'), url(r'^integration/datadog$', 'simpleoncall.api.v1.views.datadog_integration', name='integration-datadog'), )
50.416667
111
0.697521
82
605
5.085366
0.304878
0.057554
0.244604
0.316547
0.129496
0
0
0
0
0
0
0.010949
0.094215
605
11
112
55
0.75
0
0
0
0
0
0.603306
0.409917
0
0
0
0
0
1
0
false
0
0.1
0
0.1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
42c89be1d1b86a202cc45bf8f9b35b78ec3defa4
222
py
Python
lippukala_tests/test_admin.py
kcsry/lippukala
05f11d14d3cb86a59a4a1ec2bbb403ac303a6c3b
[ "MIT" ]
1
2019-03-04T15:35:39.000Z
2019-03-04T15:35:39.000Z
lippukala_tests/test_admin.py
kcsry/lippukala
05f11d14d3cb86a59a4a1ec2bbb403ac303a6c3b
[ "MIT" ]
8
2016-03-26T10:07:16.000Z
2020-12-10T09:06:36.000Z
lippukala_tests/test_admin.py
kcsry/lippukala
05f11d14d3cb86a59a4a1ec2bbb403ac303a6c3b
[ "MIT" ]
null
null
null
import pytest @pytest.mark.django_db def test_admin_smoke(admin_client): assert admin_client.get("/admin/lippukala/code/").status_code == 200 assert admin_client.get("/admin/lippukala/order/").status_code == 200
27.75
73
0.761261
32
222
5.03125
0.53125
0.204969
0.21118
0.248447
0.42236
0.42236
0
0
0
0
0
0.030151
0.103604
222
7
74
31.714286
0.778894
0
0
0
0
0
0.202703
0.202703
0
0
0
0
0.4
1
0.2
false
0
0.2
0
0.4
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
42e0d24f5e9d5d162a905c69308ca05b3b8246c2
88
py
Python
nglp/precompute/precompute.py
mauromsl/NGLP-Analytics
d6df05f82dbf2ffa5e136a20b6e89f7ef3bcbef2
[ "MIT" ]
null
null
null
nglp/precompute/precompute.py
mauromsl/NGLP-Analytics
d6df05f82dbf2ffa5e136a20b6e89f7ef3bcbef2
[ "MIT" ]
54
2021-04-28T05:14:45.000Z
2021-12-10T09:14:28.000Z
nglp/precompute/precompute.py
mauromsl/NGLP-Analytics
d6df05f82dbf2ffa5e136a20b6e89f7ef3bcbef2
[ "MIT" ]
1
2022-03-09T16:09:25.000Z
2022-03-09T16:09:25.000Z
class Precompute: def run(self, object_id: str): raise NotImplementedError()
29.333333
35
0.693182
10
88
6
1
0
0
0
0
0
0
0
0
0
0
0
0.215909
88
3
35
29.333333
0.869565
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
42e75d6af955207366d58eabde55a5af21b4886c
953
py
Python
setup.py
JackLeeMing/nameko_mongo
9d26282391925ce6bd1fd6ca28815b1b1bd83aba
[ "MIT" ]
null
null
null
setup.py
JackLeeMing/nameko_mongo
9d26282391925ce6bd1fd6ca28815b1b1bd83aba
[ "MIT" ]
null
null
null
setup.py
JackLeeMing/nameko_mongo
9d26282391925ce6bd1fd6ca28815b1b1bd83aba
[ "MIT" ]
null
null
null
from setuptools import setup setup( name='nameko_mongo_util', version='1.0.2', url='https://github.com/JackLeeMing/nameko_mongo.git', license='MIT', author='Jaque', author_email='1285879942@qq.com', packages=["nameko_mongo_util"], package_data={'':['*.*']}, install_requires=[ "nameko", "tornado", "pymongo", ], description='Redis dependency for nameko services', classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.4', ], )
30.741935
61
0.582371
96
953
5.697917
0.552083
0.277879
0.365631
0.285192
0.149909
0
0
0
0
0
0
0.039548
0.257083
953
31
62
30.741935
0.733051
0
0
0.137931
0
0
0.569182
0
0
0
0
0
0
1
0
true
0
0.034483
0
0.034483
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
42f59be18caef447da9d8d097cb71bf4d2fe8a92
405
py
Python
src/tests/dataclass_bakery/generators/test_random_uuid_generator.py
miguelFLG13/dataclass-bakery
413b5b88ced200e4208e9a25edf520bfc7c31ca5
[ "Apache-2.0" ]
1
2021-10-10T04:52:31.000Z
2021-10-10T04:52:31.000Z
src/tests/dataclass_bakery/generators/test_random_uuid_generator.py
miguelFLG13/dataclass-bakery
413b5b88ced200e4208e9a25edf520bfc7c31ca5
[ "Apache-2.0" ]
null
null
null
src/tests/dataclass_bakery/generators/test_random_uuid_generator.py
miguelFLG13/dataclass-bakery
413b5b88ced200e4208e9a25edf520bfc7c31ca5
[ "Apache-2.0" ]
2
2021-06-05T18:41:50.000Z
2022-03-28T02:05:11.000Z
from unittest import TestCase from uuid import UUID from dataclass_bakery.generators.random_uuid_generator import RandomUuidGenerator class TestRandomUuidGenerator(TestCase): def setUp(self): self.random_uuid_generator = RandomUuidGenerator() def test_generate_uuid_ok(self): random_uuid = self.random_uuid_generator.generate() self.assertIsInstance(random_uuid, UUID)
28.928571
81
0.787654
46
405
6.673913
0.434783
0.162866
0.185668
0.149837
0
0
0
0
0
0
0
0
0.153086
405
13
82
31.153846
0.895044
0
0
0
0
0
0
0
0
0
0
0
0.111111
1
0.222222
false
0
0.333333
0
0.666667
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
4
42f80641b66db9ef0b0d1fb8a2fd4a22058b0c4a
679
py
Python
trajminer/classification/traclass.py
ybj94/trajminer
7355344be8fe763ba2583b6f508fefc3290c9849
[ "MIT" ]
37
2019-04-04T19:27:26.000Z
2021-12-22T07:10:13.000Z
trajminer/classification/traclass.py
ybj94/trajminer
7355344be8fe763ba2583b6f508fefc3290c9849
[ "MIT" ]
26
2019-04-04T19:20:44.000Z
2021-12-22T07:56:53.000Z
trajminer/classification/traclass.py
ybj94/trajminer
7355344be8fe763ba2583b6f508fefc3290c9849
[ "MIT" ]
9
2019-04-04T19:17:05.000Z
2019-11-05T15:06:21.000Z
from .base import Classifier class TraClass(Classifier): """TraClass: Trajectory Classification Using Hierarchical Region-Based and Trajectory-Based Clustering. Parameters ---------- TO-DO References ---------- `Lee, J. G., Han, J., Li, X., & Gonzalez, H. (2008). TraClass: trajectory classification using hierarchical region-based and trajectory-based clustering. Proceedings of the VLDB Endowment, 1(1), 1081-1094. <https://dl.acm.org/citation.cfm?id=1453972>`__ """ def __init__(self): pass def fit(self, X, y): pass def predict(self, X): pass def score(self, X, y): pass
21.903226
78
0.615611
81
679
5.08642
0.62963
0.050971
0.15534
0.179612
0.427184
0.427184
0.427184
0.427184
0.427184
0.427184
0
0.041257
0.250368
679
30
79
22.633333
0.768173
0.597938
0
0.4
0
0
0
0
0
0
0
0
0
1
0.4
false
0.4
0.1
0
0.6
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
4
6e030643a051c6deaae5974fb3d7587eb5cf9f95
277
py
Python
Assignment Solution/Module4-master/Module4_CaseStudy1_Q6_Ans.py
krdhakal/Edureka-HW-Solutions-of-Data-Science-for-Python-Certification
2332839d25cca3ee8036d6dda7360a3b31824d6b
[ "MIT" ]
null
null
null
Assignment Solution/Module4-master/Module4_CaseStudy1_Q6_Ans.py
krdhakal/Edureka-HW-Solutions-of-Data-Science-for-Python-Certification
2332839d25cca3ee8036d6dda7360a3b31824d6b
[ "MIT" ]
null
null
null
Assignment Solution/Module4-master/Module4_CaseStudy1_Q6_Ans.py
krdhakal/Edureka-HW-Solutions-of-Data-Science-for-Python-Certification
2332839d25cca3ee8036d6dda7360a3b31824d6b
[ "MIT" ]
null
null
null
# Create a numpy array [[0, 1, 2],[ 3, 4, 5],[ 6, 7, 8],[ 9,10, 11]] and filter the elements greater than 5 import numpy arr=numpy.array([[0, 1, 2],[3, 4, 5],[6, 7, 8],[9,10, 11]]) x=arr>5 print 'Main Array',arr print 'Greater than 5',x print 'Greater than 5 elements',arr[x]
30.777778
107
0.613718
59
277
2.881356
0.457627
0.194118
0.211765
0.141176
0.282353
0.282353
0.282353
0.282353
0.282353
0.282353
0
0.13913
0.169675
277
8
108
34.625
0.6
0.379061
0
0
0
0
0.276471
0
0
0
0
0
0
0
null
null
0
0.166667
null
null
0.5
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
4
6e2bbf035d2e9904203252d9420d7300c7faba9a
209
py
Python
exercicios_curso_em_video/Exercicio 11.py
Sposigor/Caminho_do_Python
e84d74e9dc89c0966f931a94cb9ebe3ee4671b6d
[ "MIT" ]
1
2021-01-13T18:07:46.000Z
2021-01-13T18:07:46.000Z
exercicios_curso_em_video/Exercicio 11.py
Sposigor/Caminho_do_Python
e84d74e9dc89c0966f931a94cb9ebe3ee4671b6d
[ "MIT" ]
null
null
null
exercicios_curso_em_video/Exercicio 11.py
Sposigor/Caminho_do_Python
e84d74e9dc89c0966f931a94cb9ebe3ee4671b6d
[ "MIT" ]
null
null
null
L = float(input('Largura da parede: ')) A = float(input('Altura da parede: ')) print(f'Sua parede tem a dimensão de {L}x{A} e sua área de {L*A}m²') print(f'Precisa de {(L*A)/2} litros de tinta para pintá la')
41.8
68
0.665072
43
209
3.232558
0.581395
0.064748
0.057554
0
0
0
0
0
0
0
0
0.011364
0.157895
209
5
69
41.8
0.778409
0
0
0
0
0.25
0.690476
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
6e2c0153a1901ee64b82fc182a76f7679e511cd0
248
py
Python
pine/pk_utils.py
tokuzfunpi/pine
1c42d4f23e388dfc651aa16f1a26b8496adfaf0c
[ "Apache-2.0" ]
null
null
null
pine/pk_utils.py
tokuzfunpi/pine
1c42d4f23e388dfc651aa16f1a26b8496adfaf0c
[ "Apache-2.0" ]
null
null
null
pine/pk_utils.py
tokuzfunpi/pine
1c42d4f23e388dfc651aa16f1a26b8496adfaf0c
[ "Apache-2.0" ]
null
null
null
import pickle def read_pickle(file_name): data = None with open(file_name, 'r') as fd : data = pickle.load(fd) return data def write_pickle(file_name, data): with open(file_name, 'w') as fd : pickle.dump(data, fd)
20.666667
37
0.633065
39
248
3.871795
0.461538
0.211921
0.18543
0.238411
0
0
0
0
0
0
0
0
0.254032
248
11
38
22.545455
0.816216
0
0
0
0
0
0.008065
0
0
0
0
0
0
1
0.222222
false
0
0.111111
0
0.444444
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
6e3656e84977c5522e7cc07fd18b8ef3902ed6e7
107
py
Python
dmango/apps.py
Oppoin/dmango
5c820afef8d52e9be50c87cd78356cbaedfde158
[ "MIT" ]
null
null
null
dmango/apps.py
Oppoin/dmango
5c820afef8d52e9be50c87cd78356cbaedfde158
[ "MIT" ]
null
null
null
dmango/apps.py
Oppoin/dmango
5c820afef8d52e9be50c87cd78356cbaedfde158
[ "MIT" ]
null
null
null
# -*- coding: utf-8 from django.apps import AppConfig class DmangoConfig(AppConfig): name = 'dmango'
15.285714
33
0.700935
13
107
5.769231
0.923077
0
0
0
0
0
0
0
0
0
0
0.011364
0.17757
107
6
34
17.833333
0.840909
0.158879
0
0
0
0
0.068182
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
281667757bff8b9420173562228750d27fe24aed
1,402
py
Python
managedb/change.py
peter-de-boer/ponzischeme_webapp
6b2a71dd61ac14bd364a6228191fc76b23fdfef0
[ "MIT" ]
2
2019-07-25T15:44:31.000Z
2020-10-07T15:24:45.000Z
managedb/change.py
peter-de-boer/ponzischeme_webapp
6b2a71dd61ac14bd364a6228191fc76b23fdfef0
[ "MIT" ]
5
2021-03-08T22:51:24.000Z
2022-02-26T07:01:07.000Z
managedb/change.py
peter-de-boer/ponzischeme_webapp
6b2a71dd61ac14bd364a6228191fc76b23fdfef0
[ "MIT" ]
null
null
null
# template for changing the db from backend.db import engine, Base from backend.models import User, GameModel, Notes import jsonpickle import copy from werkzeug.security import generate_password_hash, check_password_hash Base.metadata.create_all(engine) from sqlalchemy.orm import sessionmaker Session = sessionmaker(bind=engine) session = Session() ''' gameToBeDeleted = session.query(GameModel).filter(GameModel.id==13).first() session.delete(gameToBeDeleted) ''' ''' findName = "<old_username>" newName = "<new_username>" userToBeChanged = session.query(User) \ .filter(User.username==findName) \ .first() user_games = session.query(GameModel) \ .join(User, GameModel.players) \ .filter(User.username == findName) for gms in user_games: print(gms.id) gmp = gms.game if gmp is not None: gm = jsonpickle.decode(gmp) for plr in gm.players: print(plr.name) if plr.name==findName: plr.changeName(newName) print("after change:") for plr in gm.players: print(plr.name) gms.game = jsonpickle.encode(gm) userToBeChanged.username = newName ''' ''' usr=session.query(User).filter(User.email=="<email>").first() password="<new_password>" hashed_password = generate_password_hash(password) usr.password = hashed_password ''' session.commit() session.close()
21.569231
75
0.686163
168
1,402
5.642857
0.404762
0.050633
0.042194
0.046414
0.116034
0.061181
0.061181
0.061181
0
0
0
0.001768
0.193295
1,402
64
76
21.90625
0.836428
0.019971
0
0
1
0
0
0
0
0
0
0
0
1
0
false
0.090909
0.545455
0
0.545455
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
4
28605ec57b6279a4f19f620f42237b740aa5652c
211
py
Python
nym_embeddings/pywsd/new_lesk.py
c0ntradicti0n/KnowledgeScience
7dd6b9ec532a1fbd9b12ea1980d0c7e9c8539ae5
[ "Info-ZIP" ]
10
2021-05-31T07:18:08.000Z
2022-03-19T09:20:11.000Z
nym_embeddings/pywsd/new_lesk.py
c0ntradicti0n/allennlp_vs_ampligraph
00b984db38615a3197b2120d88363b0bee408982
[ "Info-ZIP" ]
4
2020-01-28T23:03:03.000Z
2022-02-10T00:30:10.000Z
nym_embeddings/pywsd/new_lesk.py
c0ntradicti0n/Distinctiopus4
cf7982613f86f9d12c9f45a9f678be5d59f8fb3c
[ "MIT" ]
2
2021-12-09T07:23:21.000Z
2022-03-31T06:13:10.000Z
#!/usr/bin/env python -*- coding: utf-8 -*- # # Python Word Sense Disambiguation (pyWSD) # # Copyright (C) 2014-2017 alvations # URL: https://github.com/alvations/pywsd # For license information, see LICENSE.md
26.375
43
0.7109
29
211
5.172414
0.862069
0
0
0
0
0
0
0
0
0
0
0.04918
0.132701
211
7
44
30.142857
0.770492
0.933649
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
2861ac6cd6e4bf2174c56aa2f73d804861e63bfd
398
py
Python
L4/pyL4/src/independent/OneUseFrozenDict.py
Marcao02/complaw-deeptech
4c2af1bdd885bc2fb7e14dc14c11fed30f8f3428
[ "Apache-2.0" ]
null
null
null
L4/pyL4/src/independent/OneUseFrozenDict.py
Marcao02/complaw-deeptech
4c2af1bdd885bc2fb7e14dc14c11fed30f8f3428
[ "Apache-2.0" ]
null
null
null
L4/pyL4/src/independent/OneUseFrozenDict.py
Marcao02/complaw-deeptech
4c2af1bdd885bc2fb7e14dc14c11fed30f8f3428
[ "Apache-2.0" ]
null
null
null
from typing import TypeVar, Dict K = TypeVar('K') V = TypeVar('V') NoMutating = Exception("mutating not allowed") class OneUseFrozenDict(Dict[K, V]): def __set__(self, k: K, v: V): raise NoMutating def clear(self): raise NoMutating def pop(self): raise NoMutating def popitem(self): raise NoMutating def update(self): raise NoMutating
20.947368
46
0.630653
50
398
4.94
0.44
0.303644
0.291498
0.267206
0
0
0
0
0
0
0
0
0.266332
398
18
47
22.111111
0.84589
0
0
0.333333
0
0
0.055276
0
0
0
0
0
0
1
0.333333
false
0
0.066667
0
0.466667
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
288593f0aba24ae9c8c87fd652c84f0ef2820b60
206
py
Python
myhvac_web/myhvac_service/api.py
alanquillin/myhvac_web
645dd750d55118eb2a5c8648b9ea1c959af7b654
[ "Apache-2.0" ]
null
null
null
myhvac_web/myhvac_service/api.py
alanquillin/myhvac_web
645dd750d55118eb2a5c8648b9ea1c959af7b654
[ "Apache-2.0" ]
null
null
null
myhvac_web/myhvac_service/api.py
alanquillin/myhvac_web
645dd750d55118eb2a5c8648b9ea1c959af7b654
[ "Apache-2.0" ]
null
null
null
from myhvac_web.myhvac_service import factory import logging LOG = logging.getLogger(__name__) def get_system_state(): service = factory.get_service_module() return service.get_system_state()
15.846154
45
0.781553
27
206
5.518519
0.592593
0.120805
0.187919
0
0
0
0
0
0
0
0
0
0.145631
206
13
46
15.846154
0.846591
0
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.666667
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
289f20d0e79e350caa803793636867a0ce7c9808
64
py
Python
CodeWars/8 Kyu/Super Duper Easy.py
anubhab-code/Competitive-Programming
de28cb7d44044b9e7d8bdb475da61e37c018ac35
[ "MIT" ]
null
null
null
CodeWars/8 Kyu/Super Duper Easy.py
anubhab-code/Competitive-Programming
de28cb7d44044b9e7d8bdb475da61e37c018ac35
[ "MIT" ]
null
null
null
CodeWars/8 Kyu/Super Duper Easy.py
anubhab-code/Competitive-Programming
de28cb7d44044b9e7d8bdb475da61e37c018ac35
[ "MIT" ]
null
null
null
def problem(a): return 'Error' if type(a) is str else a*50+6
32
48
0.65625
14
64
3
0.857143
0
0
0
0
0
0
0
0
0
0
0.058824
0.203125
64
2
48
32
0.764706
0
0
0
0
0
0.076923
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
9551009b1bbcae72d01f81bc19e33ba81c7c27e2
410
py
Python
exercises/concept/chaitanas-colossal-coaster/list_methods.py
SaschaMann/python
e34801a4c647635e5eb3fd4e2c41c68784b19ec7
[ "MIT" ]
200
2019-12-12T13:50:59.000Z
2022-02-20T22:38:42.000Z
exercises/concept/chaitanas-colossal-coaster/list_methods.py
SaschaMann/python
e34801a4c647635e5eb3fd4e2c41c68784b19ec7
[ "MIT" ]
1,938
2019-12-12T08:07:10.000Z
2021-01-29T12:56:13.000Z
exercises/concept/chaitanas-colossal-coaster/list_methods.py
SaschaMann/python
e34801a4c647635e5eb3fd4e2c41c68784b19ec7
[ "MIT" ]
239
2019-12-12T14:09:08.000Z
2022-03-18T00:04:07.000Z
def add_me_to_the_queue(express_queue, normal_queue, ticket_type, person_name): pass def find_his_friend(queue, friend_name): pass def add_person_with_his_friends(queue, index, person_name): pass def remove_the_mean_person(queue, person_name): pass def how_many_namefellows(queue, person_name): pass def remove_the_last_person(queue): pass def sorted_names(queue): pass
15.185185
79
0.760976
63
410
4.52381
0.428571
0.147368
0.192982
0.238596
0.277193
0.182456
0
0
0
0
0
0
0.168293
410
26
80
15.769231
0.835777
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0.5
false
0.5
0
0
0.5
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
955348d59c98eef9bde36561663dc6d05ec25c07
232
py
Python
shorty/views.py
druvdub/urlshortener-notsoeasy
a42367849aafd2a5410b7b6adc083fa3530f9e9a
[ "Apache-2.0" ]
null
null
null
shorty/views.py
druvdub/urlshortener-notsoeasy
a42367849aafd2a5410b7b6adc083fa3530f9e9a
[ "Apache-2.0" ]
null
null
null
shorty/views.py
druvdub/urlshortener-notsoeasy
a42367849aafd2a5410b7b6adc083fa3530f9e9a
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import get_object_or_404, redirect from .models import URL def root(request, url_hash): url = get_object_or_404(URL, url_hash = url_hash) url.clicked() return redirect(url.full_url)
19.333333
57
0.711207
35
232
4.428571
0.514286
0.135484
0.193548
0.180645
0
0
0
0
0
0
0
0.032787
0.211207
232
11
58
21.090909
0.814208
0
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.666667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
957e45bb5016f1c0208a555b678fda14015d9467
4,172
py
Python
examples/nio02.py
yang69can/pyngl
78a7040ce9de4b7a442b0c3b5faecccab2f01426
[ "Apache-2.0" ]
125
2016-11-24T09:04:28.000Z
2022-01-22T14:06:56.000Z
examples/nio02.py
yang69can/pyngl
78a7040ce9de4b7a442b0c3b5faecccab2f01426
[ "Apache-2.0" ]
52
2017-11-08T23:23:02.000Z
2022-03-20T03:17:39.000Z
examples/nio02.py
yang69can/pyngl
78a7040ce9de4b7a442b0c3b5faecccab2f01426
[ "Apache-2.0" ]
25
2017-08-27T10:50:43.000Z
2022-01-29T14:56:05.000Z
# # File: # nio02.py # # Synopsis: # Demonstrates PyNIO docstrings # # Category: # Processing. # # Author: # Dave Brown # # Date of original publication: # June, 2006 # # Description: # This example reads NetCDF file 'pop.nc' only to provide # an instance of the NioFile and NioVariable classes. # It prints all available docstrings in a systematic way, # # Effects illustrated: # o Reading a NetCDF file, and learning to use PyNIO through # its self-documenting docstrings. # # Output: # None # # Notes: # from __future__ import print_function import numpy import Nio import Ngl import os print(""" This example prints all Nio docstrings. For clarity each docstring is bracketed by a line of equal signs, and preceded by a comment naming the particular docstring. """) # # print the Nio summary documentation # print("The Nio docstring (Nio.__doc__):") print("=======================================================================") print(Nio.__doc__) print("=======================================================================") print("The NioOptions constructor options docstring (Nio.options.__doc__):") print("=======================================================================") print(Nio.options.__doc__) print("=======================================================================") # create an NioOptions object opt = Nio.options() print("The NioOptions class docstring (opt.__doc__):") print("=======================================================================") print(opt.__doc__) print("=======================================================================") print("The NioFile constructor open_file docstring (Nio.open_file.__doc__):") print("=======================================================================") print(Nio.open_file.__doc__) print("=======================================================================") # # Read the file pop.nc # dirc = Ngl.pynglpath("data") f = Nio.open_file(os.path.join(dirc,"cdf","pop.nc")) print("The NioFile object docstring (f.__doc__):") print("=======================================================================") print(f.__doc__) print("=======================================================================") print("The close method docstring (f.close.__doc__):") print("=======================================================================") print(f.close.__doc__) print("=======================================================================") print("The create_dimension method docstring (f.create_dimension.__doc__):") print("=======================================================================") print(f.create_dimension.__doc__) print("=======================================================================") print("The create_variable method docstring (f.create_variable.__doc__):") print("=======================================================================") print(f.create_variable.__doc__) print("=======================================================================") v = f.variables['t'] print("The NioVariable object docstring (f.variables['varname'].__doc__):") print("=======================================================================") print(v.__doc__) print("=======================================================================") print("The assign_value method docstring (f.variables['varname'].assign_value.__doc__):") print("=======================================================================") print(v.assign_value.__doc__) print("=======================================================================") print("The get_value method docstring (f.variables['varname'].get_value.__doc__):") print("=======================================================================") print(v.get_value.__doc__) print("=======================================================================") print("The typecode method docstring (f.variables['varname'].typecode.__doc__):") print("=======================================================================") print(v.typecode.__doc__) print("=======================================================================") f.close()
32.850394
89
0.420422
329
4,172
4.975684
0.334347
0.117288
0.158827
0.078192
0.248626
0.080635
0
0
0
0
0
0.001569
0.083174
4,172
126
90
33.111111
0.426405
0.14046
0
0.380952
0
0
0.734705
0.555681
0
0
0
0
0
1
0
false
0
0.079365
0
0.079365
0.809524
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
95922bc3363f008cb23e98aeb1273ffbb0bba860
202
py
Python
pythonCode/rosenum.py
eatmore/python_practice
c6a773c8d24182b23a86fd9b66b27b5ff948b258
[ "MIT" ]
null
null
null
pythonCode/rosenum.py
eatmore/python_practice
c6a773c8d24182b23a86fd9b66b27b5ff948b258
[ "MIT" ]
null
null
null
pythonCode/rosenum.py
eatmore/python_practice
c6a773c8d24182b23a86fd9b66b27b5ff948b258
[ "MIT" ]
1
2020-03-12T06:05:38.000Z
2020-03-12T06:05:38.000Z
for i in range(1000, 10000): a = int(str(i)[0]) b = int(str(i)[1]) c = int(str(i)[2]) d = int(str(i)[3]) n = pow(a,4) + pow(b,4) + pow(c,4) + pow(d,4) if n == i: print(i)
25.25
49
0.430693
44
202
1.977273
0.477273
0.275862
0.321839
0
0
0
0
0
0
0
0
0.121429
0.306931
202
8
50
25.25
0.5
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.125
0
0
1
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
959b37a776a87b7b242cb2e8c5a3c899469d86d6
128
py
Python
Mundo 1/ex_027.py
Shock3/Python_Exercicios
4420569e881b883728168aabe76b0e9f3a42597f
[ "MIT" ]
null
null
null
Mundo 1/ex_027.py
Shock3/Python_Exercicios
4420569e881b883728168aabe76b0e9f3a42597f
[ "MIT" ]
null
null
null
Mundo 1/ex_027.py
Shock3/Python_Exercicios
4420569e881b883728168aabe76b0e9f3a42597f
[ "MIT" ]
null
null
null
""" Faça um programa que leia o nome completo de uma pessoa, mostrando em seguida o primeiro e o último nome separadamente. """
21.333333
56
0.757813
21
128
4.619048
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.179688
128
5
57
25.6
0.92381
0.929688
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
25146e93cf264fffebd5736f85dc1e6797d6e572
28
py
Python
oildrum/plugins/__init__.py
ericsk/google-app-engine-oil
742740a4e692d23e06d99182eea62b5bc1c79175
[ "Apache-2.0" ]
1
2015-10-18T10:21:06.000Z
2015-10-18T10:21:06.000Z
oildrum/plugins/__init__.py
ericsk/google-app-engine-oil
742740a4e692d23e06d99182eea62b5bc1c79175
[ "Apache-2.0" ]
null
null
null
oildrum/plugins/__init__.py
ericsk/google-app-engine-oil
742740a4e692d23e06d99182eea62b5bc1c79175
[ "Apache-2.0" ]
null
null
null
"""Custom plugins package"""
28
28
0.714286
3
28
6.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.071429
28
1
28
28
0.769231
0.785714
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
25228805280d58681b77a52c4eb55c5eaa95b4d5
442
py
Python
db/Where.py
NewGr8Player/json-redis-demo
ad5d97b360c50afe04041ab8539d15d5c639969c
[ "MIT" ]
null
null
null
db/Where.py
NewGr8Player/json-redis-demo
ad5d97b360c50afe04041ab8539d15d5c639969c
[ "MIT" ]
null
null
null
db/Where.py
NewGr8Player/json-redis-demo
ad5d97b360c50afe04041ab8539d15d5c639969c
[ "MIT" ]
null
null
null
class Where: """ 查询条件 """ # 查询字段名 __field = None # 查询表达式 __express = None # 字段值 __value = None def __init__(self, _field, _express, _value): self.__field = _field self.__express = _express self.__value = _value def get_field(self): return self.__field def get_express(self): return self.__express def get_value(self): return self.__value
17.68
49
0.576923
48
442
4.666667
0.3125
0.120536
0.1875
0
0
0
0
0
0
0
0
0
0.339367
442
24
50
18.416667
0.767123
0.047511
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0
0.214286
0.785714
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
25232d223a9edc5e7ce2270070e27c357958c2f1
136
py
Python
custom/filler.py
saudzahirr/videos
538a5f62f3f7c973e06763ef9da9bb8ec3f5006c
[ "MIT" ]
3
2021-12-22T16:36:05.000Z
2022-03-22T06:44:42.000Z
custom/filler.py
saudzahirr/videos
538a5f62f3f7c973e06763ef9da9bb8ec3f5006c
[ "MIT" ]
2
2022-01-25T17:15:02.000Z
2022-02-05T06:02:54.000Z
custom/filler.py
saudzahirr/videos
538a5f62f3f7c973e06763ef9da9bb8ec3f5006c
[ "MIT" ]
null
null
null
from manim import Scene class ExternallyAnimatedScene(Scene): def construct(self): raise Exception("Externally Animated!")
22.666667
47
0.742647
14
136
7.214286
0.928571
0
0
0
0
0
0
0
0
0
0
0
0.176471
136
5
48
27.2
0.901786
0
0
0
0
0
0.147059
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
25424e6b47cddfeacf5c7ab19cd28416327273c7
13,878
py
Python
evaluation/bin/diff/diff_utils_test.py
ckorzen/pdf-text-extraction-benchmark
47b456f7c5c445b5087200d2ce4051647a9cbbf6
[ "MIT" ]
34
2018-05-16T17:50:10.000Z
2021-09-12T19:40:40.000Z
evaluation/bin/diff/diff_utils_test.py
e-sim/pdf-text-extraction-benchmark
42eede9867e5795a6fc040b0a7ce92da3ddd3120
[ "MIT" ]
1
2017-06-20T10:31:15.000Z
2017-08-08T20:10:16.000Z
evaluation/bin/diff/diff_utils_test.py
e-sim/pdf-text-extraction-benchmark
42eede9867e5795a6fc040b0a7ce92da3ddd3120
[ "MIT" ]
10
2018-07-07T15:37:45.000Z
2021-01-28T07:06:27.000Z
import unittest import word_diff from diff_utils import string_to_diff_words from diff_utils import filter_special_chars from diff_utils import compose_characters from diff_utils import split_into_paras_and_words from diff_utils import split from diff_utils import is_special_character from diff_utils import flatten_list class DiffUtilsTest(unittest.TestCase): """ Tests for diff_utils.""" def test_string_to_diff_words_1(self): """ Tests the method string_to_diff_words(). """ words = string_to_diff_words(None) self.assertEqual(words, []) words = string_to_diff_words("") self.assertEqual(words, []) words = string_to_diff_words("A") words_str = self.diff_words_to_string(words) self.assertEqual( words_str, "[(a,A,A,None,None)]") words = string_to_diff_words("A B") words_str = self.diff_words_to_string(words) self.assertEqual( words_str, "[(a,A,A ,None,b),(b,B,B,a,None)]") words = string_to_diff_words("A B C") words_str = self.diff_words_to_string(words) self.assertEqual( words_str, "[(a,A,A ,None,b),(b,B,B ,a,c),(c,C,C,b,None)]") words = string_to_diff_words("A \n\n B \n\n C") words_str = self.diff_words_to_string(words) self.assertEqual( words_str, "[(a,A,A \n\n ,None,b),(b,B,B \n\n ,a,c),(c,C,C,b,None)]") def test_string_to_diff_words_2(self): """ Tests the method string_to_diff_words() with flatten=False. """ words = string_to_diff_words(None, flatten=False) self.assertEqual(words, []) words = string_to_diff_words("", flatten=False) self.assertEqual(words, []) words = string_to_diff_words("A", flatten=False) words_str = self.diff_words_to_string(words) self.assertEqual( words_str, "[[(a,A,A,None,None)]]") words = string_to_diff_words("A B", flatten=False) words_str = self.diff_words_to_string(words) self.assertEqual( words_str, "[[(a,A,A ,None,b),(b,B,B,a,None)]]") words = string_to_diff_words("A B C", flatten=False) words_str = self.diff_words_to_string(words) self.assertEqual( words_str, "[[(a,A,A ,None,b),(b,B,B ,a,c),(c,C,C,b,None)]]") words = string_to_diff_words("A \n\n B \n\n C", flatten=False) words_str = self.diff_words_to_string(words) self.assertEqual( words_str, "[[(a,A,A \n\n ,None,b)],[(b,B,B \n\n ,a,c)],[(c,C,C,b,None)]]") def test_string_to_diff_words_3(self): """ Tests the method string_to_diff_words() with to_lower=False. """ words = string_to_diff_words(None, to_lower=False) self.assertEqual(words, []) words = string_to_diff_words("", to_lower=False) self.assertEqual(words, []) words = string_to_diff_words("A", to_lower=False) words_str = self.diff_words_to_string(words) self.assertEqual( words_str, "[(A,A,A,None,None)]") words = string_to_diff_words("A B", to_lower=False) words_str = self.diff_words_to_string(words) self.assertEqual( words_str, "[(A,A,A ,None,B),(B,B,B,A,None)]") words = string_to_diff_words("A B C", to_lower=False) words_str = self.diff_words_to_string(words) self.assertEqual( words_str, "[(A,A,A ,None,B),(B,B,B ,A,C),(C,C,C,B,None)]") words = string_to_diff_words("A \n\n B \n\n C", to_lower=False) words_str = self.diff_words_to_string(words) self.assertEqual( words_str, "[(A,A,A \n\n ,None,B),(B,B,B \n\n ,A,C),(C,C,C,B,None)]") def test_string_to_diff_words_4(self): """ Tests the method string_to_diff_words() with specialchars_pattern="a". """ res = string_to_diff_words(None, specialchars_pattern="a") self.assertEqual(res, []) res = string_to_diff_words("", specialchars_pattern="a") self.assertEqual(res, []) res = string_to_diff_words("A", specialchars_pattern="a") res_str = self.diff_words_to_string(res) self.assertEqual(res_str, "[]") res = string_to_diff_words("A B", specialchars_pattern="a") res_str = self.diff_words_to_string(res) self.assertEqual(res_str, "[(b,B,B,None,None)]") res = string_to_diff_words("A B C", specialchars_pattern="a") res_str = self.diff_words_to_string(res) self.assertEqual(res_str, "[(b,B,B ,None,c),(c,C,C,b,None)]") res = string_to_diff_words("A \n\n B \n\n C", specialchars_pattern="a") res_str = self.diff_words_to_string(res) self.assertEqual(res_str, "[(b,B,B \n\n ,None,c),(c,C,C,b,None)]") def test_string_to_diff_words_5(self): """ Tests the method string_to_diff_words() with excludes=["\[A\]"]". """ res = string_to_diff_words(None, excludes=["\[A\]"]) self.assertEqual(res, []) res = string_to_diff_words("", excludes=["\[A\]"]) self.assertEqual(res, []) res = string_to_diff_words("[A]", excludes=["\[A\]"]) res_str = self.diff_words_to_string(res) self.assertEqual(res_str, "[([A],[A],[A],None,None)]") res = string_to_diff_words("[A] B", excludes=["\[A\]"]) res_str = self.diff_words_to_string(res) self.assertEqual( res_str, "[([A],[A],[A] ,None,b),(b,B,B,[A],None)]") res = string_to_diff_words("[A] B C", excludes=["\[A\]"]) res_str = self.diff_words_to_string(res) self.assertEqual( res_str, "[([A],[A],[A] ,None,b),(b,B,B ,[A],c),(c,C,C,b,None)]") res = string_to_diff_words("[A] \n\n B \n\n C", excludes=["\[A\]"]) res_str = self.diff_words_to_string(res) self.assertEqual( res_str, "[([A],[A],[A] \n\n ,None,b),(b,B,B \n\n ,[A],c),(c,C,C,b,None)]") def test_compose_characters(self): """ Tests the method compose_characters(). """ res = compose_characters(None) self.assertEqual(res, None) res = compose_characters("") self.assertEqual(res, "") res = compose_characters("A B C") self.assertEqual(res, "A B C") res = compose_characters(u"A\u0300 B\u0342 C\u0302") self.assertEqual(res, "À B͂ Ĉ") res = compose_characters(u"A\u0300 \n\n B\u0342 \n\n C\u0302") self.assertEqual(res, "À \n\n B͂ \n\n Ĉ") def test_split_into_paras_and_words(self): """ Tests the method split_into_paras_and_words(). """ res = split_into_paras_and_words(None) self.assertEqual(res, []) res = split_into_paras_and_words("") self.assertEqual(res, []) res = split_into_paras_and_words("A") self.assertEqual(str(res), "[[a]]") res = split_into_paras_and_words("A B") self.assertEqual(str(res), "[[a, b]]") res = split_into_paras_and_words("A B C") self.assertEqual(str(res), "[[a, b, c]]") res = split_into_paras_and_words("A \n\n B \n\n C") self.assertEqual(str(res), "[[a], [b], [c]]") def test_split(self): """ Tests the method split(). """ res = split(None, None) self.assertEqual(res, None) res = split(None, "") self.assertEqual(res, None) res = split("", "") self.assertEqual(res, ["", ""]) res = split("", None) self.assertEqual(res, ["", ""]) res = split("A B C", "X") self.assertEqual(res, ["A B C", ""]) res = split("A B C", "\s") self.assertEqual(res, ["A", " ", "B", " ", "C", ""]) res = split("A B C", "\s+") self.assertEqual(res, ["A", " ", "B", " ", "C", ""]) res = split("A B C ", "\s+") self.assertEqual(res, ["A", " ", "B", " ", "C", " ", "", ""]) def test_is_special_character(self): """ Tests the method is_special_character(). """ res = is_special_character(None, 0, None) self.assertEqual(res, False) res = is_special_character(None, 0, "X") self.assertEqual(res, False) res = is_special_character("A", 0, None) self.assertEqual(res, False) res = is_special_character("", 0, "X") self.assertEqual(res, False) res = is_special_character("A", -1, "X") self.assertEqual(res, False) res = is_special_character("", 1, "X") self.assertEqual(res, False) res = is_special_character("A", -1, "A") self.assertEqual(res, False) res = is_special_character("", 1, "A") self.assertEqual(res, False) res = is_special_character("A", 0, "A") self.assertEqual(res, True) res = is_special_character("ABA", 0, "A") self.assertEqual(res, True) res = is_special_character("ABA", 1, "A") self.assertEqual(res, False) res = is_special_character("ABA", 2, "A") self.assertEqual(res, True) def test_filter_special_chars(self): """ Tests the method filter_special_chars(). """ text = filter_special_chars(None, None) self.assertEqual(text, None) text = filter_special_chars("", None) self.assertEqual(text, "") text = filter_special_chars("ABC", None) self.assertEqual(text, "ABC") text = filter_special_chars("ABC", "X") self.assertEqual(text, "ABC") text = filter_special_chars("AXBC", "X") self.assertEqual(text, "ABC") text = filter_special_chars("XXX", "X") self.assertEqual(text, "") def test_flatten_list(self): """ Tests the method flatten_string(). """ a = word_diff.DiffWord("A") b = word_diff.DiffWord("B") c = word_diff.DiffWord("C") d = word_diff.DiffWord("D") e = word_diff.DiffWord("E") f = word_diff.DiffWord("F") flat1 = flatten_list([a, b, c]) self.assert_flatten_list1(flat1) flat2 = flatten_list([[a, b, c], [d, e, f]]) self.assert_flatten_list2(flat2) def assert_flatten_list1(self, flat1): """ Tests the first flattened list. """ self.assertEqual(len(flat1), 3) word0 = flat1[0] self.assertEqual(type(word0), word_diff.DiffWord) self.assertEqual(word0.word, "A") self.assertEqual(word0.para, 0) self.assertEqual(word0.pos_in_para, 0) self.assertEqual(word0.pos_in_text, 0) word1 = flat1[1] self.assertEqual(type(word1), word_diff.DiffWord) self.assertEqual(word1.word, "B") self.assertEqual(word1.para, 0) self.assertEqual(word1.pos_in_para, 1) self.assertEqual(word1.pos_in_text, 1) word2 = flat1[2] self.assertEqual(type(word2), word_diff.DiffWord) self.assertEqual(word2.word, "C") self.assertEqual(word2.para, 0) self.assertEqual(word2.pos_in_para, 2) self.assertEqual(word2.pos_in_text, 2) def assert_flatten_list2(self, flat2): """ Tests the second flattened list. """ self.assertEqual(len(flat2), 6) word0 = flat2[0] self.assertEqual(type(word0), word_diff.DiffWord) self.assertEqual(word0.word, "A") self.assertEqual(word0.para, 0) self.assertEqual(word0.pos_in_para, 0) self.assertEqual(word0.pos_in_text, 0) word1 = flat2[1] self.assertEqual(type(word1), word_diff.DiffWord) self.assertEqual(word1.word, "B") self.assertEqual(word1.para, 0) self.assertEqual(word1.pos_in_para, 1) self.assertEqual(word1.pos_in_text, 1) word2 = flat2[2] self.assertEqual(type(word2), word_diff.DiffWord) self.assertEqual(word2.word, "C") self.assertEqual(word2.para, 0) self.assertEqual(word2.pos_in_para, 2) self.assertEqual(word2.pos_in_text, 2) word3 = flat2[3] self.assertEqual(type(word3), word_diff.DiffWord) self.assertEqual(word3.word, "D") self.assertEqual(word3.para, 1) self.assertEqual(word3.pos_in_para, 0) self.assertEqual(word3.pos_in_text, 3) word4 = flat2[4] self.assertEqual(type(word4), word_diff.DiffWord) self.assertEqual(word4.word, "E") self.assertEqual(word4.para, 1) self.assertEqual(word4.pos_in_para, 1) self.assertEqual(word4.pos_in_text, 4) word5 = flat2[5] self.assertEqual(type(word5), word_diff.DiffWord) self.assertEqual(word5.word, "F") self.assertEqual(word5.para, 1) self.assertEqual(word5.pos_in_para, 2) self.assertEqual(word5.pos_in_text, 5) # ========================================================================== # Some util methods. def diff_words_to_string(self, diff_words): """ Creates a string representation for given diff words. """ parts = [] for word in diff_words: if type(word) is list: parts.append(self.diff_words_to_string(word)) else: parts.append(self.diff_word_to_string(word)) return "[%s]" % ",".join(parts) def diff_word_to_string(self, word): """ Creates a string representation for given diff word. """ word_parts = [] word_parts.append(word.word) word_parts.append(word.unnormalized) word_parts.append(word.unnormalized_with_whitespaces) word_parts.append(str(word.prev)) word_parts.append(str(word.next)) return "(%s)" % ",".join(word_parts) if __name__ == '__main__': unittest.main()
34.522388
80
0.585171
1,900
13,878
4.038947
0.057368
0.22283
0.064113
0.090826
0.801798
0.71423
0.663539
0.612588
0.581835
0.523326
0
0.01493
0.256737
13,878
401
81
34.608479
0.728841
0.059735
0
0.405498
0
0.04811
0.092514
0.028567
0
0
0
0
0.405498
1
0.051546
false
0
0.030928
0
0.092784
0
0
0
0
null
1
0
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
4
c288e0d3d808f6509faa7c1f5f4ff97bdbf9c518
224
py
Python
test/test_main.py
cbartram/pi-assistant
94a4fd7cc9f89bee355bef04e2e8a76b341df613
[ "MIT" ]
null
null
null
test/test_main.py
cbartram/pi-assistant
94a4fd7cc9f89bee355bef04e2e8a76b341df613
[ "MIT" ]
null
null
null
test/test_main.py
cbartram/pi-assistant
94a4fd7cc9f89bee355bef04e2e8a76b341df613
[ "MIT" ]
null
null
null
from pi_assistant.config import Configuration from pi_assistant.main import get_keywords def test_get_keywords_success(): config = Configuration(environment="test") assert get_keywords(config) == [('noomis', 0.5)]
28
52
0.772321
29
224
5.724138
0.586207
0.198795
0.180723
0
0
0
0
0
0
0
0
0.010204
0.125
224
7
53
32
0.836735
0
0
0
0
0
0.044643
0
0
0
0
0
0.2
1
0.2
false
0
0.4
0
0.6
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
c28ef4560a14afef233d43b8cc4b894f18a59122
146
py
Python
pyamg/util/__init__.py
Alexey-Voronin/pyamg-1
59d35010e4bd660aae3526e8a206a42cb1a54bfa
[ "MIT" ]
null
null
null
pyamg/util/__init__.py
Alexey-Voronin/pyamg-1
59d35010e4bd660aae3526e8a206a42cb1a54bfa
[ "MIT" ]
null
null
null
pyamg/util/__init__.py
Alexey-Voronin/pyamg-1
59d35010e4bd660aae3526e8a206a42cb1a54bfa
[ "MIT" ]
null
null
null
"Utility Functions" from .info import __doc__ from .linalg import * from .utils import * __all__ = [s for s in dir() if not s.startswith('_')]
16.222222
53
0.69863
22
146
4.227273
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.184932
146
8
54
18.25
0.781513
0.116438
0
0
0
0
0.123288
0
0
0
0
0
0
1
0
false
0
0.6
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
c2b2b0568b1aa18a50e281ca0e6477d76f4ba907
95
py
Python
dnspython/example/script.py
mcflis/docker-images
68d67e268afaa5290db690624201b48ec1796fb9
[ "MIT" ]
null
null
null
dnspython/example/script.py
mcflis/docker-images
68d67e268afaa5290db690624201b48ec1796fb9
[ "MIT" ]
null
null
null
dnspython/example/script.py
mcflis/docker-images
68d67e268afaa5290db690624201b48ec1796fb9
[ "MIT" ]
null
null
null
import dns.resolver answer = dns.resolver.resolve('github.com') print(answer.rrset.to_text())
19
43
0.768421
14
95
5.142857
0.785714
0.305556
0
0
0
0
0
0
0
0
0
0
0.073684
95
4
44
23.75
0.818182
0
0
0
0
0
0.105263
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.333333
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
c2f24cd08abe4f35be602d0bd36dd385be684829
481
py
Python
Practice/AllDomains/Languages/Python/Strings/TheMinionGame.py
DHS009/HackerRankSolutions
cc74ecc436c4d3e8ca7d62986a7cbe482f3c24ba
[ "MIT" ]
15
2017-11-10T06:20:22.000Z
2022-03-20T15:33:19.000Z
Practice/AllDomains/Languages/Python/Strings/TheMinionGame.py
DHS009/HackerRankSolutions
cc74ecc436c4d3e8ca7d62986a7cbe482f3c24ba
[ "MIT" ]
1
2018-12-12T15:12:33.000Z
2018-12-12T15:12:33.000Z
Practice/AllDomains/Languages/Python/Strings/TheMinionGame.py
DHS009/HackerRankSolutions
cc74ecc436c4d3e8ca7d62986a7cbe482f3c24ba
[ "MIT" ]
9
2017-07-28T12:54:19.000Z
2021-08-13T12:00:08.000Z
#/* author:@shivkrthakur */ # Enter your code here. Read input from STDIN. Print output to STDOUT input, vowels = raw_input().strip(), 'AEIOU' inputLen = len(input) kevinSum = 0 stuartSum = 0 for x in xrange(inputLen): if input[x] in vowels: kevinSum += inputLen - x else: stuartSum += inputLen - x if stuartSum == kevinSum: print "Draw" elif stuartSum > kevinSum: print "Stuart", stuartSum elif stuartSum < kevinSum: print "Kevin", kevinSum
24.05
69
0.665281
61
481
5.229508
0.540984
0.159875
0.206897
0.163009
0
0
0
0
0
0
0
0.005362
0.224532
481
19
70
25.315789
0.849866
0.197505
0
0
0
0
0.052083
0
0
0
0
0.052632
0
0
null
null
0
0
null
null
0.2
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
1
0
0
0
0
0
0
0
0
4
c2f361e268ff44564e495d6657e8729b4b0d6f17
1,094
py
Python
app-django-demo/mysite/myadmin/views.py
Xingkai98/AntiqueID-1
0daf9ebcdbc59182e5e11a845546f29a289a7c34
[ "MIT" ]
6
2018-09-20T03:39:15.000Z
2021-02-23T06:20:51.000Z
app-django-demo/mysite/myadmin/views.py
Xingkai98/AntiqueID-1
0daf9ebcdbc59182e5e11a845546f29a289a7c34
[ "MIT" ]
3
2018-11-03T09:55:38.000Z
2018-12-16T13:34:05.000Z
app-django-demo/mysite/myadmin/views.py
Xingkai98/AntiqueID-1
0daf9ebcdbc59182e5e11a845546f29a289a7c34
[ "MIT" ]
2
2018-09-20T03:39:16.000Z
2019-10-21T04:27:58.000Z
from django.shortcuts import render, redirect import login.models as models # Create your views here. def index(request): return render(request, 'myadmin/intro.html') #用户点击申请专家 def expertapply(request): #user_identity为1表示正在审核 expertList = models.User.objects.filter(user_identity = 1) return render(request, 'myadmin/expertapply.html', locals()) #管理员端确认是否通过申请 def checkapply(request): if request.method == 'POST': UserId = request.POST.get('id') operation = request.POST.get('operation') if operation == 'pass': user = models.User.objects.get(user_id = UserId) user.user_identity = 2 user.save() return redirect('/myadmin/expertapply/', locals()) def auctionmanagement(request): return render(request, 'myadmin/auctionmanagement.html') def mallmanagement(request): return render(request, 'myadmin/mallmanagement.html') def usermanagement(request): return render(request, 'myadmin/usermanagement.html') def expert_detail(request): return render(request, 'myadmin/expert_detail.html')
30.388889
64
0.707495
123
1,094
6.243902
0.382114
0.09375
0.148438
0.203125
0.214844
0
0
0
0
0
0
0.003326
0.175503
1,094
35
65
31.257143
0.848115
0.058501
0
0
0
0
0.187317
0.15122
0
0
0
0
0
1
0.291667
false
0.041667
0.083333
0.208333
0.666667
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
6c0875db40f2ce46e66813bbb814c64b22fbf66d
317
py
Python
lenzm_utils/flask/__init__.py
mlenzen/Flask-LenzmM-Utils
be5f9f9383d72aa93cc089de356ad3dcdab4f148
[ "BSD-3-Clause" ]
null
null
null
lenzm_utils/flask/__init__.py
mlenzen/Flask-LenzmM-Utils
be5f9f9383d72aa93cc089de356ad3dcdab4f148
[ "BSD-3-Clause" ]
5
2016-10-11T18:30:03.000Z
2019-07-14T09:45:36.000Z
lenzm_utils/flask/__init__.py
mlenzen/Flask-LenzmM-Utils
be5f9f9383d72aa93cc089de356ad3dcdab4f148
[ "BSD-3-Clause" ]
null
null
null
"""lenzm_utils.flask - Utils for Flask Projects """ import urllib from . import db_admin, url_converters, url_for_obj, url_update # noqa def encodeURIComponent(v): # noqa match JS capitalization """Python implementation of Javascript's encodeURIComponent.""" return urllib.parse.quote(v, safe='~()*!.\'')
31.7
71
0.728707
40
317
5.625
0.75
0
0
0
0
0
0
0
0
0
0
0
0.141956
317
9
72
35.222222
0.827206
0.432177
0
0
0
0
0.041916
0
0
0
0
0
0
1
0.25
false
0
0.5
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
4
6c0c5e11aff705f0db51ba6565e4b842715bdc00
3,464
py
Python
stubs/m5stack_flowui-v1_4_0-beta/flowlib/uiflow.py
mattytrentini/micropython-stubs
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
[ "MIT" ]
null
null
null
stubs/m5stack_flowui-v1_4_0-beta/flowlib/uiflow.py
mattytrentini/micropython-stubs
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
[ "MIT" ]
null
null
null
stubs/m5stack_flowui-v1_4_0-beta/flowlib/uiflow.py
mattytrentini/micropython-stubs
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
[ "MIT" ]
null
null
null
""" Module: 'flowlib.uiflow' on M5 FlowUI v1.4.0-beta """ # MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11-284-g5d8e1c867 on 2019-08-30', machine='ESP32 module with ESP32') # Stubber: 1.3.1 - updated from typing import Any class Btn: """""" def attach(self, *argv) -> Any: pass def deinit(self, *argv) -> Any: pass def detach(self, *argv) -> Any: pass def multiBtnCb(self, *argv) -> Any: pass def restart(self, *argv) -> Any: pass def timerCb(self, *argv) -> Any: pass class BtnChild: """""" def deinit(self, *argv) -> Any: pass def isPressed(self, *argv) -> Any: pass def isReleased(self, *argv) -> Any: pass def pressFor(self, *argv) -> Any: pass def restart(self, *argv) -> Any: pass def upDate(self, *argv) -> Any: pass def wasDoublePress(self, *argv) -> Any: pass def wasPressed(self, *argv) -> Any: pass def wasReleased(self, *argv) -> Any: pass class IP5306: """""" def getBatteryLevel(self, *argv) -> Any: pass def init(self, *argv) -> Any: pass def isChargeFull(self, *argv) -> Any: pass def isCharging(self, *argv) -> Any: pass def setCharge(self, *argv) -> Any: pass def setChargeVolt(self, *argv) -> Any: pass def setVinMaxCurrent(self, *argv) -> Any: pass class Rgb_multi: """""" def deinit(self, *argv) -> Any: pass def setBrightness(self, *argv) -> Any: pass def setColor(self, *argv) -> Any: pass def setColorAll(self, *argv) -> Any: pass def setColorFrom(self, *argv) -> Any: pass def setShowLock(self, *argv) -> Any: pass def show(self, *argv) -> Any: pass class Speaker: """""" def _timeout_cb(self, *argv) -> Any: pass def checkInit(self, *argv) -> Any: pass def setBeat(self, *argv) -> Any: pass def setVolume(self, *argv) -> Any: pass def sing(self, *argv) -> Any: pass def tone(self, *argv) -> Any: pass _exitState = None _is_remote = None _nextP2PTime = 0 _p2pData = None apikey = "67C7D165" binascii = None btn = None btnA = None btnB = None btnC = None def btnText(): pass def cfgRead(): pass def cfgWrite(): pass config_normal = '{\n "start": "flow",\n "mode": "internet",\n "server": "Flow.m5stack.com", \n "wifi": {\n "ssid": "",\n "password": ""\n }\n}\n' def const(): pass def core_start(): pass display = None def flowDeinit(): pass class flowExit: """""" gc = None def getP2PData(): pass def get_sd_state(): pass def hwDeinit(): pass lcd = None def loopExit(): pass def loopSetIdle(): pass def loopState(): pass m5base = None machine = None def modeSet(): pass node_id = "840d8e2598b4" os = None power = None def remoteInit(): pass def resetDefault(): pass rgb = None def sd_mount(): pass def sd_umount(): pass def sendP2PData(): pass def setP2PData(): pass speaker = None def start(): pass def startBeep(): pass timEx = None time = None timeSchedule = None time_ex = None timerSch = None def wait(): pass def wait_ms(): pass
12.642336
174
0.544169
416
3,464
4.490385
0.336538
0.161135
0.206103
0.281049
0.352248
0.0894
0.0894
0.046039
0.046039
0.046039
0
0.027872
0.316397
3,464
273
175
12.688645
0.76098
0.062067
0
0.422819
0
0.006711
0.054931
0
0
0
0
0
0
1
0.389262
false
0.395973
0.006711
0
0.436242
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
6c1a23727d3a5249c663a3220faa98e342b090b0
2,709
py
Python
keras_losses.py
HelgeS/predml4copboundaries
e536c2677509c03e4c76e1f37b7c78361c1dec57
[ "MIT" ]
null
null
null
keras_losses.py
HelgeS/predml4copboundaries
e536c2677509c03e4c76e1f37b7c78361c1dec57
[ "MIT" ]
null
null
null
keras_losses.py
HelgeS/predml4copboundaries
e536c2677509c03e4c76e1f37b7c78361c1dec57
[ "MIT" ]
null
null
null
from keras import backend as K # Adapted from https://github.com/lukovkin/linex-keras/blob/master/linex_loss.py def linex_underestimate_fn(factor=1): return lambda y_true, y_pred: linex_loss_val(y_true, y_pred, factor) def linex_overestimate_fn(factor=1): return lambda y_true, y_pred: linex_loss_val(y_true, y_pred, -factor) def linex_underestimate(y_true, y_pred): return linex_loss_val(y_true, y_pred, 1) def linex_overestimate(y_true, y_pred): return linex_loss_val(y_true, y_pred, -1) def linex_loss_val(y_true, y_pred, a): delta = sign_ae(y_true, y_pred) # delta = K.abs(y_true - y_pred) res = linex_loss(delta, a=a) return K.mean(res) def linex_loss(delta, a=-1, b=1): """ LinEx(eps) = b * (e^(a * eps) - a * eps - 1) A. Zellner, “Bayesian Estimation and Prediction Using Asymmetric Loss Functions,” J. Am. Stat. Assoc., vol. 81, no. 394, pp. 446–451, 1986. :param delta: :param a: :param b: :return: """ if a != 0 and b > 0: return b * (K.exp(a * delta) - a * delta - 1) else: raise ValueError def sign_ae(x, y): sign_x = K.sign(x) sign_y = K.sign(y) delta = x - y return sign_x * sign_y * K.abs(delta) def shifted_mse_both(y_true, y_pred): overest = shifted_mse_overestimate(y_true[:, 0], y_pred[:, 0]) underest = shifted_mse_underestimate(y_true[:, 1], y_pred[:, 1]) return (overest + underest)/2.0 def shifted_mse_underestimate_fn(factor): return lambda y_true, y_pred: shifted_mse(y_true, y_pred, factor) def shifted_mse_overestimate_fn(factor): return lambda y_true, y_pred: shifted_mse(y_true, y_pred, -factor) def shifted_mse_underestimate(y_true, y_pred): return shifted_mse(y_true, y_pred, 1) def shifted_mse_overestimate(y_true, y_pred): return shifted_mse(y_true, y_pred, -1) def shifted_mse(y_true, y_pred, a): if a == 0: raise ValueError x = y_pred - y_true #K.abs(y_true - ) return K.pow(x, 2) * K.pow(K.sign(x) + a, 2) def pe_ann_both(y_true, y_pred): overest = pe_ann_overestimate(y_true[:, 0], y_pred[:, 0]) underest = pe_ann_underestimate(y_true[:, 1], y_pred[:, 1]) return (overest + underest)/2.0 def pe_ann_underestimate_fn(factor): return lambda y_true, y_pred: pe_ann(y_true, y_pred, 1, factor) def pe_ann_overestimate_fn(factor): return lambda y_true, y_pred: pe_ann(y_true, y_pred, 1, -factor) def pe_ann_underestimate(y_true, y_pred): return pe_ann(y_true, y_pred, 1, 2) def pe_ann_overestimate(y_true, y_pred): return pe_ann(y_true, y_pred, 1, -2) def pe_ann(y_true, y_pred, a, b): x = y_pred - y_true return (a + 1/(1 + K.exp(-b*x))) * x**2
25.317757
85
0.672573
483
2,709
3.480331
0.15735
0.113028
0.110648
0.184414
0.663296
0.61749
0.53599
0.525877
0.487805
0.458061
0
0.023448
0.197121
2,709
106
86
25.556604
0.748966
0.131045
0
0.109091
0
0
0
0
0
0
0
0
0
1
0.345455
false
0
0.018182
0.218182
0.709091
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
6c314f8abe2585c26247f443f112418f9268b3b2
90
py
Python
examples/nas/__init__.py
hase1128/dragonfly
4be7e4c539d3edccc4d243ab9f972b1ffb0d9a5c
[ "MIT" ]
675
2018-08-23T17:30:46.000Z
2022-03-30T18:37:23.000Z
examples/nas/__init__.py
hase1128/dragonfly
4be7e4c539d3edccc4d243ab9f972b1ffb0d9a5c
[ "MIT" ]
62
2018-11-30T23:40:19.000Z
2022-03-10T19:47:27.000Z
examples/nas/__init__.py
hase1128/dragonfly
4be7e4c539d3edccc4d243ab9f972b1ffb0d9a5c
[ "MIT" ]
349
2018-09-10T19:04:34.000Z
2022-03-31T13:10:45.000Z
""" Demos for Neural Architecture Search using Dragonfly. -- kandasamy@cs.cmu.edu """
18
55
0.7
11
90
5.727273
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
90
4
56
22.5
0.84
0.855556
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
6c4119c5662c4b0826d83d930bd128e0cb97425e
292
py
Python
BootGridApp/views.py
cs-fullstack-2019-spring/django-bootstrap-grid-ic-Kenn-CodeCrew
98f322816bf2c2688fb38bc5f1625d2d48a12145
[ "Apache-2.0" ]
null
null
null
BootGridApp/views.py
cs-fullstack-2019-spring/django-bootstrap-grid-ic-Kenn-CodeCrew
98f322816bf2c2688fb38bc5f1625d2d48a12145
[ "Apache-2.0" ]
null
null
null
BootGridApp/views.py
cs-fullstack-2019-spring/django-bootstrap-grid-ic-Kenn-CodeCrew
98f322816bf2c2688fb38bc5f1625d2d48a12145
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import render # Create your views here. def index(request): return render(request, "BootGridApp/signIn.html") def signIn(request): return render(request, "BootGridApp/signIn.html") def signUp(request): return render(request, "BootGridApp/signUp.html")
19.466667
53
0.743151
36
292
6.027778
0.472222
0.179724
0.262673
0.359447
0.631336
0.460829
0.460829
0.460829
0
0
0
0
0.143836
292
14
54
20.857143
0.868
0.078767
0
0.285714
0
0
0.258427
0.258427
0
0
0
0
0
1
0.428571
false
0
0.142857
0.428571
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
6c413a56d9e20d69168a913628c5a545b1e17b3b
124
py
Python
source/python/json-sample.py
rafaritter44/DevOpsEngineerExpress
c61c43e98d7bb89269f7aed5d1c773199f79c20f
[ "Unlicense" ]
51
2018-12-26T17:41:12.000Z
2021-12-25T08:23:59.000Z
source/python/json-sample.py
rafaritter44/DevOpsEngineerExpress
c61c43e98d7bb89269f7aed5d1c773199f79c20f
[ "Unlicense" ]
null
null
null
source/python/json-sample.py
rafaritter44/DevOpsEngineerExpress
c61c43e98d7bb89269f7aed5d1c773199f79c20f
[ "Unlicense" ]
35
2019-01-01T20:36:31.000Z
2022-03-16T04:15:42.000Z
import json j = json.loads('{"gravatai" : "1763", "porto_alegre" : "1769"}') print("Was founded in: " +j['porto_alegre'])
20.666667
64
0.629032
17
124
4.470588
0.764706
0.289474
0
0
0
0
0
0
0
0
0
0.074766
0.137097
124
5
65
24.8
0.635514
0
0
0
0
0
0.596774
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.333333
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
6c41984ff7aaee0893e6a8b8785df50f177e7743
237
py
Python
torchfly_dev/metrics/__init__.py
ECS-251-W2020/final-project-TorchFly
69f60b337c5dec0b1cd8315c194bc7891ba98d3a
[ "MIT" ]
null
null
null
torchfly_dev/metrics/__init__.py
ECS-251-W2020/final-project-TorchFly
69f60b337c5dec0b1cd8315c194bc7891ba98d3a
[ "MIT" ]
3
2021-06-08T21:07:12.000Z
2021-12-13T20:41:53.000Z
torchfly_dev/metrics/__init__.py
ECS-251-W2020/final-project-TorchFly
69f60b337c5dec0b1cd8315c194bc7891ba98d3a
[ "MIT" ]
1
2020-02-19T00:53:21.000Z
2020-02-19T00:53:21.000Z
""" The Metric system is inherited from allennlp """ from .metric import Metric from .average import Average from .categorical_accuracy import CategoricalAccuracy from .fbeta_measure import FBetaMeasure from .f1_measure import F1Measure
26.333333
53
0.831224
30
237
6.466667
0.566667
0.134021
0
0
0
0
0
0
0
0
0
0.009615
0.122363
237
8
54
29.625
0.923077
0.185654
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
6c451fd0aefe804694b6a5a3fda6a90ca22635e4
97
py
Python
install.py
dxtrity/box
bf6bfd977f27734b1f2f18c95bbbcd25d26c1569
[ "MIT" ]
null
null
null
install.py
dxtrity/box
bf6bfd977f27734b1f2f18c95bbbcd25d26c1569
[ "MIT" ]
null
null
null
install.py
dxtrity/box
bf6bfd977f27734b1f2f18c95bbbcd25d26c1569
[ "MIT" ]
null
null
null
# Install files def this(a): print("This is not yet implemented.") print("") print(a)
19.4
41
0.608247
14
97
4.214286
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.237113
97
5
42
19.4
0.797297
0.134021
0
0
0
0
0.337349
0
0
0
0
0
0
1
0.25
false
0
0
0
0.25
0.75
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
1
0
4
6c6d0740892c1e04a85cff5e01a3ffffc60f7af1
894
py
Python
tests/test_helpers.py
guitarpoet/python-configurator
c470c2e4175c51f214fc0314a9324729dcbc3b5c
[ "Apache-2.0" ]
null
null
null
tests/test_helpers.py
guitarpoet/python-configurator
c470c2e4175c51f214fc0314a9324729dcbc3b5c
[ "Apache-2.0" ]
null
null
null
tests/test_helpers.py
guitarpoet/python-configurator
c470c2e4175c51f214fc0314a9324729dcbc3b5c
[ "Apache-2.0" ]
null
null
null
################################################################################ # # # This is the test for the helpers # # # # @author Jack <jack@thinkingcloud.info> # # @version 1.0 # # @date 2021-05-31 15:52:13 # # # ################################################################################ from configuratorpy import * def test_load_class(): clz = load_class('.plugins.env.EnvExtension') assert clz, 'Class .plugins.env.EnvExtension not loaded'
55.875
80
0.230425
43
894
4.72093
0.767442
0.08867
0.147783
0.26601
0
0
0
0
0
0
0
0.039024
0.541387
894
15
81
59.6
0.456098
0.380313
0
0
0
0
0.385057
0.287356
0
0
0
0
0.25
1
0.25
false
0
0.25
0
0.5
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
1
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
6c79e9d3df7d6458b67788a95d3f5f47f98d7e17
3,391
py
Python
ycnbc/base.py
asepscareer/ycnbc
44ad6a73db6252cb33c2e5ea01421108c280557e
[ "Apache-2.0" ]
1
2022-03-20T08:00:22.000Z
2022-03-20T08:00:22.000Z
ycnbc/base.py
asepscareer/ycnbc
44ad6a73db6252cb33c2e5ea01421108c280557e
[ "Apache-2.0" ]
null
null
null
ycnbc/base.py
asepscareer/ycnbc
44ad6a73db6252cb33c2e5ea01421108c280557e
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # # ycnbc - CNBC data downloader # https://github.com/asepscareer/ycnbc # # Copyright 2022 Asep Saputra # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function from .utils import getnews, latest, trending class News(): def latest(self): return latest() def trending(self): return trending() def economy(self): return getnews('economy') def jobs(self): return getnews('jobs') def white_house(self): return getnews('white-house') def hospitals(self): return getnews('hospitals') def transportation(self): return getnews('transportation') def jobs(self): return getnews('jobs') def climate(self): return getnews('climate') def media(self): return getnews('media') def internet(self): return getnews('internet') def congress(self): return getnews('congress') def policy(self): return getnews('policy') def finance(self): return getnews('finance') def life(self): return getnews('life') def defense(self): return getnews('defense') def europe_politics(self): return getnews('europe-politics') def china_politics(self): return getnews('china-politics') def asia_politics(self): return getnews('asia-politics') def world_politics(self): return getnews('world-politics') def equity_opportunity(self): return getnews('equity-opportunity') def politics(self): return getnews('politics') def wealth(self): return getnews('wealth') def world_economy(self): return getnews('world-economy') def central_banks(self): return getnews('central-banks') def real_estate(self): return getnews('real-estate') def health_science(self): return getnews('health-and-science') def small_business(self): return getnews('small-business') def lifehealth_insurance(self): return getnews('life-and-health-insurance') def business(self): return getnews('business') def energy(self): return getnews('energy') def industrials(self): return getnews('industrials') def retail(self): return getnews('retail') def cybersecurity(self): return getnews('cybersecurity') def mobile(self): return getnews('mobile') def mobile(self): return getnews('technology') def cnbc_disruptors(self): return getnews('cnbc-disruptors') def tech_guide(self): return getnews('tech-guide') def social_media(self): return getnews('social-media')
23.713287
74
0.625184
384
3,391
5.46875
0.348958
0.185714
0.299524
0.059524
0.054286
0.029524
0.029524
0
0
0
0
0.003635
0.269832
3,391
143
75
23.713287
0.844507
0.194043
0
0.074074
0
0
0.140015
0.009212
0
0
0
0
0
1
0.481481
false
0
0.024691
0.481481
1
0.012346
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
667e1ed9e5b5523ae52808fb7556984074d60390
86
py
Python
docs/make.py
lassoan/SlicerPRISMRendering
6b9a7af56dc883a9eedc5686e974882e1421a577
[ "BSD-3-Clause" ]
1
2020-10-20T14:59:22.000Z
2020-10-20T14:59:22.000Z
docs/make.py
lassoan/SlicerPRISMRendering
6b9a7af56dc883a9eedc5686e974882e1421a577
[ "BSD-3-Clause" ]
13
2020-07-23T16:31:16.000Z
2021-03-19T06:30:35.000Z
docs/make.py
lassoan/SlicerPRISMRendering
6b9a7af56dc883a9eedc5686e974882e1421a577
[ "BSD-3-Clause" ]
3
2020-07-28T18:30:28.000Z
2022-01-18T00:50:29.000Z
import subprocess subprocess.run(['sphinx-build', '-M', 'html', 'source', 'build'])
28.666667
65
0.651163
10
86
5.6
0.8
0
0
0
0
0
0
0
0
0
0
0
0.104651
86
3
65
28.666667
0.727273
0
0
0
0
0
0.341176
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
6680dba5db7ea19c832d1c7201705c0e52e5b874
9,257
py
Python
apps/combine-api/tests/combine/test_get_metadata_from_combine_archive.py
reproducible-biomedical-modeling/CRBM-Viz
cc436a2bbacd2521f02eb3c087a63624c2e93101
[ "MIT" ]
2
2020-05-07T07:45:59.000Z
2020-05-13T23:48:59.000Z
apps/combine-api/tests/combine/test_get_metadata_from_combine_archive.py
reproducible-biomedical-modeling/Biosimulations
f80f18480543e9edcbc2bed8a3f8bf90c4119058
[ "MIT" ]
703
2020-01-17T20:46:38.000Z
2020-08-21T00:55:06.000Z
apps/combine-api/tests/combine/test_get_metadata_from_combine_archive.py
KarrLab/CRBM-Viz
3f2f2fa4c7fe38ccf083001230cc2fec17cd8531
[ "MIT" ]
2
2020-03-26T21:31:38.000Z
2020-07-15T15:49:32.000Z
from biosimulators_utils.omex_meta.data_model import OmexMetadataInputFormat from openapi_core.validation.response.datatypes import OpenAPIResponse from openapi_core.validation.request.datatypes import ( OpenAPIRequest, RequestParameters, ) from src import app from src.exceptions import BadRequestException from src.handlers.combine.get_metadata_for_combine_archive import _convert_rdf_node_to_json from unittest import mock from werkzeug.datastructures import MultiDict import json import os import unittest class GetMetadataTestCase(unittest.TestCase): FIXTURES_DIR = os.path.join(os.path.dirname(__file__), '..', 'fixtures') def test_get_metadata_for_combine_archive_url(self): archive_filename = os.path.join( self.FIXTURES_DIR, 'Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint-continuous.omex') with open(archive_filename, 'rb') as file: archive_url_content = file.read() archive_url = 'https://archive.combine.org' data = MultiDict([ ('url', archive_url), ('omexMetadataFormat', OmexMetadataInputFormat.rdfxml.value), ]) response = mock.Mock( raise_for_status=lambda: None, content=archive_url_content, ) endpoint = '/combine/metadata/biosimulations' with mock.patch('requests.get', return_value=response): with app.app.app.test_client() as client: response = client.post(endpoint, data=data, content_type='multipart/form-data') self.assertEqual(response.status_code, 200, response.json) metadata = response.json sed_output_specs_filename = os.path.join( self.FIXTURES_DIR, 'Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint-continuous.md.json') with open(sed_output_specs_filename, 'r') as file: expected_metadata = json.load(file) self.assertEqual(metadata, expected_metadata, metadata) # validate request and response if hasattr(self, "request_validator"): request = OpenAPIRequest( full_url_pattern='https://127.0.0.1/combine/metadata/biosimulations', method='post', body={ 'url': archive_url, 'omexMetadataFormat': OmexMetadataInputFormat.rdfxml.value, }, mimetype='multipart/form-data', parameters=RequestParameters(), ) result = self.request_validator.validate(request) result.raise_for_errors() response = OpenAPIResponse(data=json.dumps(expected_metadata), status_code=200, mimetype='application/json') result = self.response_validator.validate(request, response) result.raise_for_errors() def test_get_metadata_for_combine_archive_file_as_biosimulations(self): archive_filename = os.path.join( self.FIXTURES_DIR, 'Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint-continuous.omex') fid = open(archive_filename, 'rb') data = MultiDict([ ('file', fid), ('omexMetadataFormat', OmexMetadataInputFormat.rdfxml.value), ]) endpoint = '/combine/metadata/biosimulations' with app.app.app.test_client() as client: response = client.post(endpoint, data=data, content_type="multipart/form-data") self.assertEqual(response.status_code, 200, response.json) metadata = response.json sed_output_specs_filename = os.path.join( self.FIXTURES_DIR, 'Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint-continuous.md.json') with open(sed_output_specs_filename, 'r') as file: expected_metadata = json.load(file) self.assertEqual(metadata, expected_metadata) fid.close() # validate request and response if hasattr(self, "request_validator"): with open(archive_filename, 'rb') as file: file_content = file.read() request = OpenAPIRequest( full_url_pattern='https://127.0.0.1/combine/metadata/biosimulations', method='post', body={ 'file': file_content, 'omexMetadataFormat': OmexMetadataInputFormat.rdfxml.value, }, mimetype='multipart/form-data', parameters=RequestParameters(), ) result = self.request_validator.validate(request) result.raise_for_errors() response = OpenAPIResponse(data=json.dumps(expected_metadata), status_code=200, mimetype='application/json') result = self.response_validator.validate(request, response) result.raise_for_errors() def test_get_metadata_for_combine_archive_file_as_rdf_triples(self): archive_filename = os.path.join( self.FIXTURES_DIR, 'Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint-continuous.omex') fid = open(archive_filename, 'rb') data = MultiDict([ ('file', fid), ('omexMetadataFormat', OmexMetadataInputFormat.rdfxml.value), ]) endpoint = '/combine/metadata/rdf' with app.app.app.test_client() as client: response = client.post(endpoint, data=data, content_type="multipart/form-data") self.assertEqual(response.status_code, 200, response.json) metadata = response.json self.assertEqual(metadata[0], { '_type': 'RdfTriple', 'subject': { '_type': 'RdfUriNode', 'value': 'http://omex-library.org/Ciliberto-J-Cell-Biol-2003-morphogenesis-checkpoint-continuous.omex', }, 'predicate': { '_type': 'RdfUriNode', 'value': 'http://purl.org/dc/elements/1.1/title', }, 'object': { '_type': 'RdfLiteralNode', 'value': 'Morphogenesis checkpoint in budding yeast (continuous) (Ciliberto et al., Journal Cell Biology, 2003)', } }) fid.close() # validate request and response if hasattr(self, "request_validator"): with open(archive_filename, 'rb') as file: file_content = file.read() request = OpenAPIRequest( full_url_pattern='https://127.0.0.1/combine/metadata/rdf', method='post', body={ 'file': file_content, 'omexMetadataFormat': OmexMetadataInputFormat.rdfxml.value, }, mimetype='multipart/form-data', parameters=RequestParameters(), ) result = self.request_validator.validate(request) result.raise_for_errors() response = OpenAPIResponse(data=json.dumps(metadata), status_code=200, mimetype='application/json') result = self.response_validator.validate(request, response) result.raise_for_errors() def test_get_metadata_for_combine_archive_error_handling(self): endpoint = '/combine/metadata/biosimulations' data = MultiDict([ ('omexMetadataFormat', OmexMetadataInputFormat.rdfxml.value), ]) with app.app.app.test_client() as client: response = client.post(endpoint, data=data, content_type='multipart/form-data') self.assertEqual(response.status_code, 400, response.json) self.assertIn('must be used', response.json['title']) if hasattr(self, "response_validator"): request = OpenAPIRequest( full_url_pattern='https://127.0.0.1/combine/metadata/biosimulations', method='post', body={ 'url': 'x', 'omexMetadataFormat': OmexMetadataInputFormat.rdfxml.value, }, mimetype=None, parameters=RequestParameters(), ) response = OpenAPIResponse( data=json.dumps(response.json), status_code=400, mimetype='application/json') result = self.response_validator.validate(request, response) result.raise_for_errors() archive_filename = os.path.join( self.FIXTURES_DIR, 'invalid-metadata.omex') fid = open(archive_filename, 'rb') data = MultiDict([ ('file', fid), ('omexMetadataFormat', OmexMetadataInputFormat.rdfxml.value), ]) with app.app.app.test_client() as client: response = client.post(endpoint, data=data, content_type="multipart/form-data") self.assertEqual(response.status_code, 400, response.json) self.assertIn('is not valid', response.json['title']) fid.close() def test__convert_rdf_node_to_json(self): with self.assertRaises(BadRequestException): _convert_rdf_node_to_json(None)
42.077273
129
0.605812
898
9,257
6.062361
0.165924
0.024247
0.0777
0.085966
0.777002
0.733101
0.733101
0.707201
0.699853
0.680566
0
0.012504
0.291563
9,257
219
130
42.269406
0.817627
0.009614
0
0.643617
0
0.005319
0.172741
0.05227
0
0
0
0
0.058511
1
0.026596
false
0
0.058511
0
0.095745
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6698fc9b9ad877791a6b74efa89cc9dda19baee0
77
py
Python
tasks/EPAM/python_course/foundation-python/l3/temp.py
AleksNeStu/projects
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
[ "Apache-2.0" ]
2
2022-01-19T18:01:35.000Z
2022-02-06T06:54:38.000Z
tasks/EPAM/python_course/foundation-python/l3/temp.py
AleksNeStu/projects
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
[ "Apache-2.0" ]
null
null
null
tasks/EPAM/python_course/foundation-python/l3/temp.py
AleksNeStu/projects
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
[ "Apache-2.0" ]
null
null
null
_dict = {('Name'): 'Zara', 'Age': 7} print("_dict['Name']: ", _dict['Name'])
25.666667
39
0.519481
10
77
3.7
0.6
0.648649
0
0
0
0
0
0
0
0
0
0.014706
0.116883
77
2
40
38.5
0.529412
0
0
0
0
0
0.38961
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
dd104d7513b21f1e5de59ec82e381297291f1bf4
744
py
Python
config.py
sirKiraUzumaki/Zoom-Bot-Application
3899a7e196464537dd20f8dc3e2089f3144ef0bd
[ "Apache-2.0" ]
1
2021-12-02T03:58:00.000Z
2021-12-02T03:58:00.000Z
config.py
sirKiraUzumaki/Zoom-Bot-Application
3899a7e196464537dd20f8dc3e2089f3144ef0bd
[ "Apache-2.0" ]
null
null
null
config.py
sirKiraUzumaki/Zoom-Bot-Application
3899a7e196464537dd20f8dc3e2089f3144ef0bd
[ "Apache-2.0" ]
null
null
null
id = { "Accountancy": ["677 986 3608", "760646"], "Business": ["674 382 4240", "bst369"], "Arts": ["503 821 5968", "Art1309"], "English": ["835 860 0276", "0000"], "Moral Science": ["856 7761 0332", "81b8XM"], "Economics": ["653 479 2542", "G6sJF4"], "Subject Name": ["Meeting Id", "Meeting Password"], "Subject Name": ["Meeting Id", "Meeting Password"], "Subject Name": ["Meeting Id", "Meeting Password"], "Subject Name": ["Meeting Id", "Meeting Password"], "Subject Name": ["Meeting Id", "Meeting Password"], "Subject Name": ["Meeting Id", "Meeting Password"] } zoom_location = "C:\\Users\\God Bless Me\\AppData\\Roaming\\Zoom\\bin\\Zoom.exe" #Put your Zookm Location here
43.764706
111
0.591398
86
744
5.104651
0.593023
0.150342
0.246014
0.273349
0.47836
0.47836
0.47836
0.47836
0.47836
0.47836
0
0.139731
0.201613
744
16
112
46.5
0.599327
0.037634
0
0.333333
0
0
0.643777
0.058655
0
0
0
0
0
1
0
false
0.4
0
0
0
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
dd2110785b5a22f19575d7bf185222d6a52a40d6
32,560
py
Python
module_troops.py
Jazora/Feudal_World
2fca7827abfb6a59a40f21ac372163bd595ef3bb
[ "BSD-3-Clause" ]
null
null
null
module_troops.py
Jazora/Feudal_World
2fca7827abfb6a59a40f21ac372163bd595ef3bb
[ "BSD-3-Clause" ]
null
null
null
module_troops.py
Jazora/Feudal_World
2fca7827abfb6a59a40f21ac372163bd595ef3bb
[ "BSD-3-Clause" ]
null
null
null
from header_common import * from header_items import * from header_troops import * from header_skills import * #################################################################################################################### # Each troop contains the following fields: # 1) Troop id (string): used for referencing troops in other files. The prefix trp_ is automatically added before each troop-id # 2) Toop name (string) # 3) Plural troop name (string) # 4) Troop flags (int). See header_troops.py for a list of available flags # 5) Scene and entry # 5.1) Scene (only applicable to heroes) # 5.2) Entry point using the entry() function: for example, entry(15) # 6) Reserved (int). Put constant "reserved" or 0 # 7) Faction (int) # 8) Inventory (list): Must be a list of items # 9) Attributes (int): Example usage: # str_6|agi_6|int_4|cha_5|level(5) # 10) Weapon proficiencies (int): Example usage: # wp_one_handed(55)|wp_two_handed(90)|wp_polearm(36)|wp_archery(80)|wp_crossbow(24)|wp_throwing(45) # 11) Skills (int): See header_skills.py to see a list of skills. Example: # knows_ironflesh_3|knows_power_strike_2|knows_athletics_2|knows_riding_2 # 12) Face code (int): You can obtain the face code by pressing ctrl+E in face generator screen # 13) Face code (int)(2) (only applicable to regular troops, can be omitted for heroes): # The game will create random faces between Face code 1 and face code 2 for generated troops # 14) Troop image (string): If this variable is set, the troop will use an image rather than its 3D visual during the conversations #################################################################################################################### def wp(x): return wp_one_handed(x)|wp_two_handed(x)|wp_polearm(x)|wp_archery(x)|wp_crossbow(x)|wp_throwing(x) def wpe(m,a,c,t): return wp_one_handed(m)|wp_two_handed(m)|wp_polearm(m)|wp_archery(a)|wp_crossbow(c)|wp_throwing(t) def wpex(o,w,p,a,c,t): return wp_one_handed(o)|wp_two_handed(w)|wp_polearm(p)|wp_archery(a)|wp_crossbow(c)|wp_throwing(t) def wp_melee(x): return wp_one_handed(x + 20)|wp_two_handed(x)|wp_polearm(x + 10) reserved = 0 no_scene = 0 default_face_1 = 0x0000000400000001124000000020000000000000001c00800000000000000000 default_face_2 = 0x0000000cbf00230c4deeffffffffffff00000000001efff90000000000000000 tf_guarantee_all = tf_guarantee_boots|tf_guarantee_armor|tf_guarantee_gloves|tf_guarantee_helmet|tf_guarantee_horse|tf_guarantee_shield|tf_guarantee_ranged tf_guarantee_all_wo_ranged = tf_guarantee_boots|tf_guarantee_armor|tf_guarantee_gloves|tf_guarantee_helmet|tf_guarantee_horse|tf_guarantee_shield # troops have 30 charisma and 10 weapon master to stop the game engine from adding random skill levels pw_attr = cha_30|level(1) knows_pw = knows_weapon_master_10 troops = [ ["player","Player","Player",tf_hero|tf_unmoveable_in_party_window,no_scene,reserved,0, [], str_15|agi_15|int_4|cha_4,wp(15),0,0x000000018000000136db6db6db6db6db00000000001db6db0000000000000000], ["multiplayer_profile_troop_male","multiplayer_profile_troop_male","multiplayer_profile_troop_male", tf_hero|tf_guarantee_all,0,0,0, [], str_14, 0, 0, 0x000000018000000136db6db6db6db6db00000000001db6db0000000000000000], ["multiplayer_profile_troop_female","multiplayer_profile_troop_female","multiplayer_profile_troop_female", tf_hero|tf_female|tf_guarantee_all,0,0,0, [], str_14, 0, 0, 0x000000018000000136db6db6db6db6db00000000001db6db0000000000000000], ["temp_troop","Temp Troop","Temp Troop",tf_hero,no_scene,reserved,0,[],0,0,0,0], #################################################################################################################### # Troops before this point are hardwired into the game and their order should not be changed! #################################################################################################################### #Basic Tier ["peasant","Peasant","a peasant",tf_guarantee_all,0,0,"fac_commoners", ["itm_old_coarse_tunic","itm_ragged_shirt","itm_woolen_hose"], str_9|agi_11|pw_attr,wpex(50,30,30,0,0,30),knows_pw|knows_power_strike_1|knows_athletics_2|knows_labouring_2|knows_tailoring_1|knows_riding_1,default_face_1,default_face_2], ["peasant_female","Peasant","a peasant",tf_guarantee_all,0,0,"fac_commoners", ["itm_dress","itm_blue_dress","itm_peasant_dress","itm_woolen_dress","itm_ripped_woolen_hose","itm_woolen_hose","itm_blue_hose"], str_9|agi_11|pw_attr,wpex(50,30,30,0,0,30),knows_pw|knows_power_strike_1|knows_athletics_2|knows_labouring_2|knows_tailoring_1|knows_riding_1,default_face_1,default_face_2], ["serf","Serf","a serf",tf_guarantee_all,0,0,"fac_commoners", ["itm_heraldic_coarse_tunic","itm_woolen_hose"], str_8|agi_10|pw_attr,wpex(50,30,30,0,0,50),knows_pw|knows_power_strike_1|knows_athletics_2|knows_labouring_5|knows_engineer_1|knows_tailoring_1|knows_herding_3|knows_riding_1,default_face_1,default_face_2], ["serf_female","Serf","a serf",tf_guarantee_all,0,0,"fac_commoners", ["itm_heraldic_blue_dress","itm_heraldic_peasant_dress","itm_ripped_woolen_hose","itm_woolen_hose","itm_blue_hose"], str_8|agi_10|pw_attr,wpex(50,30,30,0,0,50),knows_pw|knows_power_strike_1|knows_athletics_2|knows_labouring_5|knows_engineer_1|knows_tailoring_1|knows_herding_3|knows_riding_1,default_face_1,default_face_2], ["beggar","Beggar","a beggar",tf_guarantee_all,0,0,"fac_commoners", ["itm_ragged_shirt","itm_tattered_wrapping_boots"], str_11|agi_13|pw_attr,wpex(60,40,60,10,40,60),knows_pw|knows_ironflesh_1|knows_power_strike_1|knows_athletics_5|knows_riding_1|knows_looting_2|knows_labouring_2,default_face_1,default_face_2], ["ministrel","Minstrel","a minstrel",tf_guarantee_all,0,0,"fac_commoners", ["itm_old_tabard","itm_woolen_hose"], str_10|agi_12|pw_attr,wpex(50,30,30,0,0,80),knows_pw|knows_musician_3|knows_athletics_5|knows_riding_3,default_face_1,default_face_2], ["huntsman","Huntsman","a huntsman",tf_guarantee_all,0,0,"fac_commoners", ["itm_ragged_woolen_cap","itm_old_coarse_tunic","itm_crude_bow","itm_bent_arrows","itm_woolen_hose"], str_11|agi_13|pw_attr,wpex(70,35,60,120,100,50),knows_pw|knows_ironflesh_2|knows_power_strike_1|knows_power_draw_3|knows_athletics_3|knows_riding_1|knows_labouring_1|knows_herding_3,default_face_1,default_face_2], ["alchemist","Alchemist","a alchemist",tf_guarantee_all,0,0,"fac_commoners", ["itm_black_robe","itm_woolen_hose"], str_9|agi_12|pw_attr,wpex(70,30,30,20,20,20),knows_pw|knows_athletics_3|knows_riding_3|knows_alchemy_3|knows_wound_treatment_1|knows_labouring_1,default_face_1,default_face_2], ["healer","Healer","a healer",tf_guarantee_all,0,0,"fac_commoners", ["itm_old_linen_tunic","itm_woolen_hose"], str_8|agi_9|pw_attr,wpex(70,10,20,0,0,5),knows_pw|knows_athletics_1|knows_riding_1|knows_wound_treatment_2|knows_alchemy_1|knows_labouring_1|knows_tailoring_2,default_face_1,default_face_2], ["monk","Monk","a monk",tf_guarantee_all,0,0,"fac_commoners", ["itm_friar_sandals","itm_friar_robe","itm_brown_hood"], str_8|agi_9|pw_attr,wpex(25,10,80,0,0,30),knows_pw|knows_athletics_1|knows_riding_1|knows_wound_treatment_1|knows_engineer_1|knows_labouring_5|knows_tailoring_1|knows_herding_2,default_face_1,default_face_2], ["militia","Militia","a militia",tf_guarantee_all,0,0,"fac_commoners", ["itm_ragged_woolen_cap","itm_heraldic_coarse_tunic","itm_woolen_hose"], str_13|agi_12|pw_attr,wpex(90,40,80,10,40,50),knows_pw|knows_ironflesh_2|knows_power_strike_2|knows_athletics_2|knows_riding_1|knows_labouring_1|knows_herding_2|knows_wage_1,default_face_1,default_face_2], ["footman","Footman","a footman",tf_guarantee_all,0,0,"fac_commoners", ["itm_arming_cap","itm_heraldic_shirt","itm_tattered_wrapping_boots"], str_15|agi_14|pw_attr,wpex(100,100,130,10,45,80),knows_pw|knows_ironflesh_5|knows_power_strike_4|knows_power_throw_2|knows_shield_1|knows_athletics_5|knows_riding_2|knows_sailing_3|knows_wage_1,default_face_1,default_face_2], ["archer","Archer","an archer",tf_guarantee_all,0,0,"fac_commoners", ["itm_stained_felt_hat_b","itm_tattered_headcloth","itm_heraldic_shirt","itm_tattered_wrapping_boots"], str_14|agi_14|pw_attr,wpex(90,60,70,125,50,50),knows_pw|knows_ironflesh_4|knows_power_strike_3|knows_power_draw_4|knows_athletics_4|knows_riding_2|knows_sailing_3|knows_wage_1,default_face_1,default_face_2], ["crossbowman","Crossbowman","a crossbowman",tf_guarantee_all,0,0,"fac_commoners", ["itm_stained_felt_hat_b","itm_ragged_woolen_cap","itm_heraldic_shirt","itm_tattered_wrapping_boots"], str_14|agi_14|pw_attr,wpex(90,60,60,50,125,50),knows_pw|knows_ironflesh_3|knows_power_strike_2|knows_athletics_4|knows_riding_2|knows_sailing_3|knows_wage_1,default_face_1,default_face_2], ["lancer","Lancer","a lancer",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_arming_cap","itm_heraldic_shirt","itm_tattered_wrapping_boots"], str_14|agi_14|pw_attr,wpex(90,60,120,10,30,80),knows_pw|knows_ironflesh_3|knows_power_strike_3|knows_power_throw_2|knows_athletics_1|knows_riding_6|knows_wage_1,default_face_1,default_face_2], ["man_at_arms","Man at Arms","a man at arms",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_arming_cap","itm_tabard","itm_old_hide_boots"], str_15|agi_15|pw_attr,wpex(105,120,110,20,50,30),knows_pw|knows_ironflesh_4|knows_power_strike_4|knows_shield_1|knows_athletics_2|knows_riding_4|knows_wage_1,default_face_1,default_face_2], ["sergeant","Sergeant","a sergeant",tf_guarantee_all,0,0,"fac_commoners", ["itm_arming_cap","itm_tabard","itm_old_hide_boots"], str_15|agi_15|pw_attr,wpex(110,125,110,20,60,40),knows_pw|knows_ironflesh_5|knows_power_strike_4|knows_shield_2|knows_athletics_5|knows_riding_2|knows_wage_1,default_face_1,default_face_2], ["engineer","Engineer","an engineer",tf_guarantee_all,0,0,"fac_commoners", ["itm_stained_felt_hat_b","itm_tattered_headcloth","itm_tabard","itm_old_hide_boots","itm_tattered_wrapping_boots",], str_10|agi_12|pw_attr,wpex(90,50,60,30,65,50),knows_pw|knows_ironflesh_2|knows_power_strike_2|knows_athletics_2|knows_engineer_2|knows_siege_1|knows_riding_2|knows_looting_1|knows_sailing_4|knows_wage_1,default_face_1,default_face_2], ["sailor","Sailor","a sailor",tf_guarantee_all,0,0,"fac_commoners", ["itm_stained_felt_hat_b","itm_ragged_woolen_cap","itm_old_hide_boots"], str_14|agi_12|pw_attr,wpex(95,70,80,30,40,70),knows_pw|knows_ironflesh_3|knows_power_strike_3|knows_athletics_2|knows_sailing_9|knows_riding_1|knows_wage_1,default_face_1,default_face_2], ["blacksmith","Blacksmith","a Blacksmith",tf_guarantee_all,0,0,"fac_commoners", ["itm_stained_felt_hat_b","itm_ragged_leather_apron","itm_old_hide_boots"], str_14|agi_12|pw_attr,wpex(90,55,70,30,70,50),knows_pw|knows_ironflesh_3|knows_power_strike_4|knows_athletics_1|knows_engineer_7|knows_tailoring_5|knows_riding_2|knows_looting_2|knows_sailing_4,default_face_1,default_face_2], ["armoursmith","Armourer","a armourer",tf_guarantee_all,0,0,"fac_commoners", ["itm_stained_felt_hat_b","itm_ragged_leather_apron","itm_old_hide_boots"], str_14|agi_12|pw_attr,wpex(90,55,70,30,70,50),knows_pw|knows_ironflesh_3|knows_power_strike_4|knows_athletics_1|knows_engineer_5|knows_tailoring_7|knows_riding_2|knows_looting_2|knows_sailing_4,default_face_1,default_face_2], ["doctor","Doctor","a doctor",tf_guarantee_all,0,0,"fac_commoners", ["itm_black_robe","itm_black_hood","itm_ripped_woolen_hose"], str_10|agi_10|pw_attr,wpex(70,40,50,10,20,30),knows_pw|knows_athletics_2|knows_power_strike_2|knows_wound_treatment_5|knows_alchemy_2|knows_riding_2,default_face_1,default_face_2], ["traveler","Traveler","a traveler",tf_guarantee_all,0,0,"fac_commoners", ["itm_old_coarse_tunic","itm_old_hide_boots",], str_11|agi_13|pw_attr,wpex(70,40,90,10,40,60),knows_pw|knows_ironflesh_1|knows_power_strike_2|knows_athletics_5|knows_sailing_7|knows_riding_3|knows_labouring_1|knows_tailoring_2,default_face_1,default_face_2], ["herdsman","Herdsman","a herdsman",tf_guarantee_all,0,0,"fac_commoners", ["itm_old_coarse_tunic","itm_ragged_shirt","itm_tattered_wrapping_boots","itm_herding_crook"], str_10|agi_14|pw_attr,wpex(50,35,40,0,10,40),knows_pw|knows_power_strike_1|knows_athletics_5|knows_labouring_2|knows_riding_3|knows_herding_5,default_face_1,default_face_2], ["merchant","Merchant","a merchant",tf_guarantee_all,0,0,"fac_commoners", ["itm_rich_outfit","itm_ripped_woolen_hose",], str_12|agi_13|pw_attr,wpex(50,40,50,10,20,60),knows_pw|knows_ironflesh_1|knows_power_strike_1|knows_athletics_5|knows_sailing_7|knows_riding_6|knows_trade_2|knows_labouring_1|knows_herding_1,default_face_1,default_face_2], #Mercenary Tier ["mercenary_crossbowman","Mercenary Crossbowman","a mercenary crossbowman",tf_guarantee_all,0,0,"fac_commoners", ["itm_stained_felt_hat_b","itm_ragged_woolen_cap","itm_old_coarse_tunic","itm_old_tabard","itm_tattered_wrapping_boots"], str_14|agi_14|pw_attr,wpex(60,70,70,50,125,50),knows_pw|knows_ironflesh_3|knows_power_strike_2|knows_athletics_4|knows_riding_2|knows_sailing_3,default_face_1,default_face_2], ["mercenary_cavalry","Mercenary Cavalry","a mercenary cavalry",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_stained_felt_hat_b","itm_ragged_woolen_cap","itm_old_coarse_tunic","itm_tattered_wrapping_boots"], str_14|agi_14|pw_attr,wpex(90,60,120,10,30,80),knows_pw|knows_ironflesh_3|knows_power_strike_3|knows_athletics_1|knows_riding_6,default_face_1,default_face_2], ["mercenary_hired_blade","Hired Blade","a hired blade",tf_guarantee_all,0,0,"fac_commoners", ["itm_ragged_woolen_cap","itm_old_coarse_tunic","itm_old_tabard","itm_tattered_wrapping_boots"], str_15|agi_14|pw_attr,wpex(90,110,110,10,35,80),knows_pw|knows_ironflesh_5|knows_power_strike_4|knows_power_throw_2|knows_shield_1|knows_athletics_5|knows_riding_2|knows_sailing_3,default_face_1,default_face_2], ["mercenary_sword_sister","Sword Sister","a sword sister",tf_guarantee_all,0,0,"fac_commoners", ["itm_ragged_woolen_cap","itm_old_coarse_tunic","itm_old_tabard","itm_tattered_wrapping_boots"], str_15|agi_14|pw_attr,wpex(90,110,110,10,35,80),knows_pw|knows_ironflesh_5|knows_power_strike_4|knows_power_throw_2|knows_shield_1|knows_athletics_5|knows_riding_2|knows_sailing_3,default_face_1,default_face_2], ["mercenary_end","playable_troops_end","playable_troops_end",0,0,0,0,[],0,0,0,0,0], ##Cultural Tier #Swadia ["knight","Knight","a knight",tf_guarantee_all,0,0,"fac_commoners", ["itm_arming_cap","itm_tabard","itm_old_hide_boots"], str_15|agi_15|pw_attr,wpex(105,120,125,20,50,30),knows_pw|knows_ironflesh_4|knows_power_strike_5|knows_shield_2|knows_athletics_2|knows_riding_5|knows_wage_1,default_face_1,default_face_2], #Rhodok ["sharpshooter","Sharpshooter","a sharpshooter",tf_guarantee_all,0,0,"fac_commoners", ["itm_stained_felt_hat_b","itm_ragged_woolen_cap","itm_heraldic_shirt","itm_tattered_wrapping_boots"], str_14|agi_14|pw_attr,wpex(90,60,60,50,140,50),knows_pw|knows_ironflesh_3|knows_power_strike_3|knows_shield_1|knows_athletics_4|knows_riding_2|knows_sailing_3|knows_wage_1,default_face_1,default_face_2], #Nord ["huscarl","Huscarl","a huscarl",tf_guarantee_all,0,0,"fac_commoners", ["itm_arming_cap","itm_tabard","itm_old_hide_boots"], str_17|agi_14|pw_attr,wpex(125,120,125,20,60,80),knows_pw|knows_ironflesh_5|knows_power_strike_5|knows_shield_3|knows_power_throw_1|knows_athletics_5|knows_riding_2|knows_wage_1,default_face_1,default_face_2], ["shieldmaiden","Shield Maiden","a shield maiden",tf_guarantee_all,0,0,"fac_commoners", ["itm_arming_cap","itm_tabard","itm_old_hide_boots"], str_17|agi_14|pw_attr,wpex(125,120,125,20,60,80),knows_pw|knows_ironflesh_5|knows_power_strike_5|knows_shield_3|knows_power_throw_1|knows_athletics_5|knows_riding_2|knows_wage_1,default_face_1,default_face_2], #Sarranid ["mamaluke","Mamaluke","a mamaluke",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_arming_cap","itm_tabard","itm_old_hide_boots"], str_15|agi_16|pw_attr,wpex(120,105,110,20,50,30),knows_pw|knows_ironflesh_3|knows_power_strike_4|knows_shield_1|knows_athletics_3|knows_riding_7|knows_wage_1,default_face_1,default_face_2], ["slave","Slave","a slave",tf_guarantee_all,0,0,"fac_commoners", ["itm_tattered_wrapping_boots"], str_14|agi_15|pw_attr,wpex(90,100,130,10,45,80),knows_pw|knows_ironflesh_5|knows_power_strike_4|knows_power_throw_2|knows_shield_1|knows_athletics_5|knows_riding_2|knows_sailing_3|knows_labouring_2,default_face_1,default_face_2], #Vaegir ["marksman","Marksman","an marksman",tf_guarantee_all,0,0,"fac_commoners", ["itm_stained_felt_hat_b","itm_tattered_headcloth","itm_heraldic_shirt","itm_tattered_wrapping_boots"], str_14|agi_14|pw_attr,wpex(90,60,70,145,50,50),knows_pw|knows_ironflesh_4|knows_power_strike_3|knows_power_draw_5|knows_athletics_4|knows_riding_2|knows_sailing_3|knows_wage_1,default_face_1,default_face_2], #Khergit ["horse_archer","Horse Archer","a horse archer",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_stained_felt_hat_b","itm_tattered_headcloth","itm_heraldic_shirt","itm_tattered_wrapping_boots"], str_14|agi_14|pw_attr,wpex(90,60,60,130,30,50),knows_pw|knows_ironflesh_2|knows_power_strike_2|knows_power_draw_3|knows_horse_archery_2|knows_athletics_1|knows_riding_6|knows_wage_1,default_face_1,default_face_2], #Multiple Cultures ["guard","Guard","a guard",tf_guarantee_all,0,0,"fac_commoners", ["itm_arming_cap","itm_tabard","itm_old_hide_boots"], str_15|agi_18|pw_attr,wpex(125,110,110,20,60,40),knows_pw|knows_ironflesh_4|knows_power_strike_4|knows_shield_2|knows_athletics_5|knows_riding_2|knows_wage_1,default_face_1,default_face_2], #Townwatch ["watchman","Watchman","a watchman",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_arming_cap","itm_heraldic_shirt","itm_tattered_wrapping_boots"], str_15|agi_14|pw_attr,wpex(115,130,100,10,45,80),knows_pw|knows_ironflesh_4|knows_power_strike_4|knows_shield_1|knows_athletics_4|knows_riding_3|knows_sailing_3|knows_wage_1,default_face_1,default_face_2], ["ranger","Ranger","an ranger",tf_guarantee_all,0,0,"fac_commoners", ["itm_arming_cap","itm_heraldic_shirt","itm_tattered_wrapping_boots"], str_14|agi_15|pw_attr,wpex(95,60,60,125,50,50),knows_pw|knows_ironflesh_4|knows_power_strike_3|knows_power_draw_4|knows_athletics_5|knows_riding_2|knows_sailing_3|knows_wage_1,default_face_1,default_face_2], #Outlaw Tier ["ruffian","Ruffian","a ruffian",tf_guarantee_all,0,0,"fac_commoners", ["itm_nomad_vest","itm_nomad_armor","itm_rawhide_coat","itm_old_hide_boots","itm_club","itm_spiked_club"], str_14|agi_12|pw_attr,wpex(90,75,80,10,20,80),knows_pw|knows_ironflesh_4|knows_power_strike_4|knows_athletics_3|knows_riding_1|knows_looting_2|knows_labouring_2,default_face_1,default_face_2], ["brigand","Brigand","a brigand",tf_guarantee_all,0,0,"fac_commoners", ["itm_nomad_vest","itm_nomad_armor","itm_rawhide_coat","itm_old_hide_boots","itm_club"], str_12|agi_14|pw_attr,wpex(85,40,70,100,60,70),knows_pw|knows_ironflesh_3|knows_power_strike_2|knows_power_draw_3|knows_athletics_4|knows_riding_1|knows_looting_6,default_face_1,default_face_2], ["hashashin","Hashashin","a hashashin",tf_guarantee_all,0,0,"fac_commoners", ["itm_archers_vest","itm_turban","itm_old_hide_boots","itm_blunt_falchion"], str_13|agi_15|pw_attr,wpex(125,70,110,100,100,80),knows_pw|knows_power_strike_4|knows_power_draw_2|knows_power_throw_1|knows_athletics_6|knows_riding_2|knows_looting_6,default_face_1,default_face_2], ["sea_raider","Sea Raider","a sea raider",tf_guarantee_all,0,0,"fac_commoners", ["itm_nomad_vest","itm_nomad_armor","itm_rawhide_coat","itm_old_hide_boots","itm_club","itm_old_shield"], str_15|agi_12|pw_attr,wpex(100,75,95,10,20,80),knows_pw|knows_ironflesh_4|knows_power_strike_4|knows_athletics_3|knows_shield_1|knows_riding_1|knows_sailing_4|knows_engineer_1,default_face_1,default_face_2], ["thief","Thief","a thief",tf_guarantee_all,0,0,"fac_commoners", ["itm_tunic_with_green_cape","itm_tattered_wrapping_boots","itm_common_hood","itm_old_knife","itm_club"], str_12|agi_15|pw_attr,wpex(100,40,60,10,80,60),knows_pw|knows_ironflesh_3|knows_power_strike_3|knows_power_throw_2|knows_athletics_5|knows_riding_2|knows_looting_8,default_face_1,default_face_2], ["raider","Raider","a raider",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_nomad_vest","itm_tattered_wrapping_boots","itm_club","itm_bent_lance"], str_13|agi_14|pw_attr,wpex(90,60,110,10,30,80),knows_pw|knows_ironflesh_3|knows_power_strike_3|knows_athletics_2|knows_riding_4|knows_looting_2,default_face_1,default_face_2], ["cultist","Cultist","a cultist",tf_guarantee_all,0,0,"fac_commoners", ["itm_red_robe","itm_red_hood","itm_club"], str_12|agi_13|pw_attr,wpex(90,10,100,0,0,30),knows_pw|knows_athletics_1|knows_riding_1|knows_wound_treatment_1|knows_alchemy_2|knows_engineer_2|knows_labouring_5|knows_tailoring_1|knows_herding_2,default_face_1,default_face_2], ["saved_troops_end","saved_troops_end","saved_troops_end",0,0,0,0,[],0,0,0,0,0], #Second Tier ["marshal","Marshal","the marshal",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_tabard","itm_old_hide_boots"], str_16|agi_15|pw_attr,wpex(110,125,120,80,100,50),knows_pw|knows_ironflesh_6|knows_power_strike_5|knows_power_draw_2|knows_shield_2|knows_siege_1|knows_athletics_3|knows_riding_4|knows_wage_1,default_face_1,default_face_2], ["steward","Steward","the steward",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_rich_outfit","itm_courtly_outfit","itm_nobleman_outfit","itm_ripped_woolen_hose","itm_woolen_hose"], str_14|agi_15|pw_attr,wpex(100,80,80,50,80,50),knows_pw|knows_ironflesh_4|knows_power_strike_3|knows_athletics_5|knows_riding_4|knows_sailing_5|knows_trade_1|knows_labouring_3|knows_engineer_2|knows_tailoring_2|knows_wage_1,default_face_1,default_face_2], #Third Tier ["lord","Lord","the lord",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_old_tabard","itm_rich_outfit","itm_courtly_outfit","itm_nobleman_outfit","itm_ripped_woolen_hose","itm_old_hide_boots","itm_worn_sword"], str_16|agi_15|pw_attr,wpex(115,115,115,50,70,50),knows_pw|knows_leadership_1|knows_ironflesh_6|knows_power_strike_4|knows_shield_2|knows_athletics_2|knows_riding_5,default_face_1,default_face_2], ["lady","Lady","the lady",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_lady_dress_ruby","itm_lady_dress_green","itm_lady_dress_blue","itm_court_dress","itm_red_dress","itm_ripped_woolen_hose","itm_woolen_hose","itm_blue_hose","itm_wimple_a","itm_wimple_b","itm_barbette","itm_worn_sword"], str_16|agi_15|pw_attr,wpex(115,115,115,50,70,50),knows_pw|knows_leadership_1|knows_ironflesh_6|knows_power_strike_4|knows_shield_2|knows_athletics_2|knows_riding_5,default_face_1,default_face_2], ["doge","Doge","the doge",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_rich_outfit","itm_courtly_outfit","itm_nobleman_outfit","itm_ripped_woolen_hose","itm_woolen_hose","itm_worn_sword"], str_14|agi_16|pw_attr,wpex(110,80,80,50,100,50),knows_pw|knows_leadership_1|knows_ironflesh_4|knows_power_strike_3|knows_shield_1|knows_athletics_5|knows_riding_7|knows_sailing_7|knows_trade_3|knows_labouring_3|knows_engineer_1|knows_tailoring_2|knows_alchemy_1,default_face_1,default_face_2], ["captain","Captain","the captain",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_tabard","itm_old_hide_boots","itm_worn_sword","itm_crude_bolts","itm_flimsy_crossbow"], str_16|agi_15|pw_attr,wpex(110,125,120,80,100,50),knows_pw|knows_leadership_1|knows_ironflesh_6|knows_power_strike_5|knows_power_draw_2|knows_shield_2|knows_athletics_3|knows_riding_4|knows_sailing_3,default_face_1,default_face_2], ["grandmaster","Grandmaster","the grandmaster",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_tabard","itm_old_hide_boots","itm_worn_sword"], str_16|agi_15|pw_attr,wpex(125,125,110,30,60,50),knows_pw|knows_leadership_1|knows_ironflesh_6|knows_power_strike_6|knows_shield_3|knows_athletics_3|knows_riding_4|knows_sailing_3,default_face_1,default_face_2], ["bishop","Bishop","the bishop",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_priest_robe","itm_priest_leggings","itm_priest_coif"], str_16|agi_14|pw_attr,wpex(60,70,115,50,20,50),knows_pw|knows_leadership_1|knows_ironflesh_8|knows_power_strike_2|knows_athletics_2|knows_wound_treatment_5|knows_riding_5,default_face_1,default_face_2], ["warchief","Warchief","the warchief",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_nomad_vest","itm_nomad_armor","itm_fur_hat","itm_old_hide_boots","itm_falchion","itm_crude_bolts","itm_flimsy_crossbow"], str_15|agi_16|pw_attr,wpex(110,110,90,90,70,40),knows_pw|knows_leadership_1|knows_ironflesh_5|knows_power_strike_4|knows_power_draw_2|knows_athletics_4|knows_looting_6|knows_riding_1|knows_sailing_4|knows_labouring_2,default_face_1,default_face_2], #Final Tier ["king","King","the king",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_old_tabard","itm_rich_outfit","itm_courtly_outfit","itm_nobleman_outfit","itm_ripped_woolen_hose","itm_old_hide_boots","itm_worn_sword"], str_16|agi_15|pw_attr,wpex(115,115,115,50,70,50),knows_pw|knows_leadership_1|knows_ironflesh_6|knows_power_strike_4|knows_shield_2|knows_athletics_2|knows_riding_5,default_face_1,default_face_2], ["queen","Queen","the queen",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_lady_dress_ruby","itm_lady_dress_green","itm_lady_dress_blue","itm_court_dress","itm_red_dress","itm_brown_dress","itm_green_dress","itm_sarranid_lady_dress","itm_sarranid_lady_dress_b","itm_ripped_woolen_hose","itm_woolen_hose","itm_blue_hose","itm_wimple_a","itm_wimple_b","itm_barbette","itm_worn_sword"], str_16|agi_15|pw_attr,wpex(115,115,115,50,70,50),knows_pw|knows_leadership_1|knows_ironflesh_6|knows_power_strike_4|knows_shield_2|knows_athletics_2|knows_riding_5,default_face_1,default_face_2], ["archbishop","Archbishop","the archbishop",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_priest_robe","itm_priest_leggings","itm_bishop_mitre"], str_16|agi_14|pw_attr,wpex(60,70,115,50,20,50),knows_pw|knows_leadership_1|knows_ironflesh_8|knows_power_strike_2|knows_athletics_2|knows_wound_treatment_5|knows_riding_5,default_face_1,default_face_2], ["patrician","Patrician","the patrician",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_rich_outfit","itm_courtly_outfit","itm_nobleman_outfit","itm_ripped_woolen_hose","itm_woolen_hose","itm_worn_sword"], str_14|agi_16|pw_attr,wpex(110,80,80,50,100,50),knows_pw|knows_leadership_1|knows_ironflesh_4|knows_power_strike_3|knows_shield_1|knows_athletics_5|knows_riding_7|knows_sailing_7|knows_trade_3|knows_labouring_3|knows_engineer_1|knows_tailoring_2,default_face_1,default_face_2], ["commander","Commander","the commander",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_tabard","itm_old_hide_boots","itm_worn_sword","itm_crude_bolts","itm_flimsy_crossbow"], str_16|agi_15|pw_attr,wpex(110,125,120,80,100,50),knows_pw|knows_leadership_1|knows_ironflesh_6|knows_power_strike_5|knows_power_draw_2|knows_shield_2|knows_athletics_3|knows_riding_4|knows_sailing_3,default_face_1,default_face_2], ["warlord","Warlord","the warlord",tf_mounted|tf_guarantee_all,0,0,"fac_commoners", ["itm_nomad_vest","itm_nomad_armor","itm_fur_hat","itm_old_hide_boots","itm_one_handed_battle_axe_a","itm_crude_bolts","itm_flimsy_crossbow"], str_15|agi_16|pw_attr,wpex(110,110,90,90,70,40),knows_pw|knows_leadership_1|knows_ironflesh_4|knows_power_strike_4|knows_power_draw_2|knows_athletics_4|knows_looting_6|knows_riding_1|knows_sailing_4|knows_labouring_2|knows_herding_1,default_face_1,default_face_2], #Rebel Tier ["rebel","Rebel","a rebel",tf_guarantee_all,0,0,"fac_commoners", ["itm_ragged_woolen_cap","itm_stained_felt_hat_b","itm_tattered_headcloth","itm_heraldic_coarse_tunic","itm_old_tabard","itm_tattered_wrapping_boots","itm_old_hide_boots","itm_chipped_falchion","itm_rusty_sword","itm_crude_spear","itm_old_shield","itm_flimsy_crossbow","itm_crude_bolts"], str_14|agi_14|pw_attr,wpex(90,70,80,30,75,70),knows_pw|knows_ironflesh_3|knows_power_strike_3|knows_athletics_3|knows_riding_2|knows_engineer_1|knows_sailing_3|knows_tailoring_1,default_face_1,default_face_2], #Admin Tier ["godlike_hero","Admin","a admin",tf_guarantee_all,0,0,"fac_commoners", ["itm_red_shirt","itm_linen_tunic","itm_woolen_hose","itm_invisible_sword","itm_invisible_bow","itm_invisible_arrows"], str_30|agi_30|pw_attr,wpex(300,300,300,300,300,300),knows_pw|knows_ironflesh_10|knows_power_strike_10|knows_power_draw_10|knows_power_throw_10|knows_shield_10|knows_athletics_10|knows_riding_10|knows_engineer_10|knows_siege_10|knows_wound_treatment_10|knows_looting_10|knows_labouring_10|knows_sailing_10|knows_tailoring_10|knows_herding_10|knows_trade_10|knows_musician_10|knows_alchemy_10,default_face_1,default_face_2], ["event_hero","Hero","a hero",tf_guarantee_all,0,0,"fac_commoners", ["itm_red_shirt","itm_linen_tunic","itm_woolen_hose","itm_invisible_sword","itm_invisible_bow","itm_invisible_arrows"], str_30|agi_30|pw_attr,wpex(300,300,300,300,300,300),knows_pw|knows_ironflesh_10|knows_power_strike_10|knows_power_draw_10|knows_power_throw_10|knows_shield_10|knows_athletics_10|knows_riding_10|knows_engineer_10|knows_siege_10|knows_wound_treatment_10|knows_looting_10|knows_labouring_10|knows_sailing_10|knows_tailoring_10|knows_herding_10|knows_trade_10|knows_musician_10|knows_alchemy_10,default_face_1,default_face_2], ["guide","Guide","a guide",tf_guarantee_all,0,0,"fac_commoners", ["itm_fur_hat","itm_sarranid_cloth_robe_b","itm_tattered_wrapping_boots","itm_worn_sword"], str_15|agi_15|pw_attr,wpex(125,110,90,10,40,60),knows_pw|knows_ironflesh_5|knows_power_strike_2|knows_athletics_7|knows_musician_3|knows_sailing_7|knows_riding_7|knows_labouring_2|knows_herding_2|knows_tailoring_2|knows_engineer_2|knows_wound_treatment_4|knows_alchemy_1,default_face_1,default_face_2], #Decap Troop ["decaptroop"," "," ",tf_guarantee_all,0,0,"fac_commoners", ["itm_invisible_head","itm_invisible_body","itm_invisible_foot","itm_invisible_hand"], str_5|agi_5|pw_attr,wpex(0,0,0,0,0,0),knows_pw,default_face_1,default_face_2], ["playable_troops_end","playable_troops_end","playable_troops_end",0,0,0,0,[],0,0,0,0,0], ##NPCS Start ["test_race","Footman","a footman",tf_guarantee_all|tf_giant_male,0,0,"fac_commoners", [], str_15|agi_14|pw_attr,wpex(100,100,130,10,45,80),knows_pw|knows_ironflesh_5|knows_power_strike_4|knows_power_throw_2|knows_shield_1|knows_athletics_5|knows_riding_2|knows_sailing_3,default_face_1,default_face_2], ##NPCS END ["inactive_players_array","inactive_players_array","inactive_players_array",0,0,0,0,[],0,0,0,0,0], ["mission_data","mission_data","mission_data",0,0,0,0,[],0,0,0,0,0], ["banner_background_color_array","banner_background_color_array","banner_background_color_array",0,0,0,0,[],0,0,0,0,0], ["temp_array","temp_array","temp_array",0,0,0,0,[],0,0,0,0,0], ["last_chat_message","-","-",0,0,0,0,[],0,0,0,0,0], ["chat_overlay_ring_buffer_0","-","-",0,0,0,0,[],0,0,0,0,0], ["chat_overlay_ring_buffer_1","-","-",0,0,0,0,[],0,0,0,0,0], ["chat_overlay_ring_buffer_2","-","-",0,0,0,0,[],0,0,0,0,0], ["chat_overlay_ring_buffer_3","-","-",0,0,0,0,[],0,0,0,0,0], ["chat_overlay_ring_buffer_4","-","-",0,0,0,0,[],0,0,0,0,0], ["chat_overlay_ring_buffer_5","-","-",0,0,0,0,[],0,0,0,0,0], ["chat_overlay_ring_buffer_6","-","-",0,0,0,0,[],0,0,0,0,0], ["chat_overlay_ring_buffer_7","-","-",0,0,0,0,[],0,0,0,0,0], ["chat_overlay_ring_buffer_8","-","-",0,0,0,0,[],0,0,0,0,0], ["chat_overlay_ring_buffer_9","-","-",0,0,0,0,[],0,0,0,0,0], ["chat_overlay_ring_buffer_10","-","-",0,0,0,0,[],0,0,0,0,0], ["chat_overlay_ring_buffer_end","-","-",0,0,0,0,[],0,0,0,0,0], ["ship_array","ship_array","ship_array",0,0,0,0,[],0,0,0,0,0], ["cart_array","cart_array","cart_array",0,0,0,0,[],0,0,0,0,0], ["removed_scene_props","removed_scene_props","removed_scene_props",0,0,0,0,[],0,0,0,0,0], ["animation_menu_strings","animation_menu_strings","animation_menu_strings",0,0,0,0,[],0,0,0,0,0], ["animation_durations","animation_durations","animation_durations",0,0,0,0,[],0,0,0,0,0], ]
95.764706
425
0.799631
5,742
32,560
4.049112
0.075409
0.025118
0.023871
0.026839
0.79828
0.764086
0.736387
0.707226
0.66886
0.634624
0
0.081856
0.045117
32,560
339
426
96.047198
0.665948
0.052826
0
0.186047
0
0
0.294711
0.081575
0
0
0.010881
0
0
1
0.015504
false
0
0.015504
0.015504
0.046512
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
dd23d629ad15d46d2a9ad3f3c7a73b4cdd012f23
220
py
Python
salt/transport/road/raet/__init__.py
pille/salt
47322575309faac8c4755287d930469caffc1c65
[ "Apache-2.0" ]
null
null
null
salt/transport/road/raet/__init__.py
pille/salt
47322575309faac8c4755287d930469caffc1c65
[ "Apache-2.0" ]
null
null
null
salt/transport/road/raet/__init__.py
pille/salt
47322575309faac8c4755287d930469caffc1c65
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- ''' raet modules __init__.py file for raet package ''' # Import raet modules from . import packeting from . import stacking from . import raeting __all__ = ['packeting', 'stacking', 'raeting']
15.714286
46
0.686364
27
220
5.296296
0.592593
0.20979
0
0
0
0
0
0
0
0
0
0.005495
0.172727
220
13
47
16.923077
0.78022
0.409091
0
0
0
0
0.198347
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
06b779349b4bc07df980583afe5e096981fa0962
153
py
Python
setup.py
narumiruna/labelme-utils
fd0c4e6344c01ed3bc5d040b580a4840334d2459
[ "MIT" ]
null
null
null
setup.py
narumiruna/labelme-utils
fd0c4e6344c01ed3bc5d040b580a4840334d2459
[ "MIT" ]
null
null
null
setup.py
narumiruna/labelme-utils
fd0c4e6344c01ed3bc5d040b580a4840334d2459
[ "MIT" ]
null
null
null
from setuptools import find_packages, setup setup( name='labelmeutils', version='0.1.0', packages=find_packages(), install_requires=[])
19.125
43
0.69281
18
153
5.722222
0.722222
0.23301
0
0
0
0
0
0
0
0
0
0.02381
0.176471
153
7
44
21.857143
0.793651
0
0
0
0
0
0.111111
0
0
0
0
0
0
1
0
true
0
0.166667
0
0.166667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
06ce02ec4197674bb74a6b6d4087093639516976
3,759
py
Python
tests/www/views/synapse_space/daa/test_form.py
ki-tools/sls_ki_synapse_admin_py
d9483d01000b61c4e8d129bdc06497ae1a27484b
[ "Apache-2.0" ]
null
null
null
tests/www/views/synapse_space/daa/test_form.py
ki-tools/sls_ki_synapse_admin_py
d9483d01000b61c4e8d129bdc06497ae1a27484b
[ "Apache-2.0" ]
null
null
null
tests/www/views/synapse_space/daa/test_form.py
ki-tools/sls_ki_synapse_admin_py
d9483d01000b61c4e8d129bdc06497ae1a27484b
[ "Apache-2.0" ]
null
null
null
import pytest from www.server import app from www.views.synapse_space.daa.forms import GrantDaaSynapseAccessForm def test_it_sets_the_team_name(client, daa_config, set_daa_config): with app.test_request_context(): form = GrantDaaSynapseAccessForm() data_collection_name = daa_config['data_collections'][0]['name'] form.field_data_collection.data = data_collection_name expected_collection_name = data_collection_name.replace(' ', '_') for short_name, expected_name in [ ('test', 'KiAccess_{0}_test'.format(expected_collection_name)), (' test ', 'KiAccess_{0}_test'.format(expected_collection_name)), ('test.1', 'KiAccess_{0}_test1'.format(expected_collection_name)), ('test-1', 'KiAccess_{0}_test_1'.format(expected_collection_name)), ('test 1', 'KiAccess_{0}_test_1'.format(expected_collection_name)), ('test 1_ . - ', 'KiAccess_{0}_test_1____'.format(expected_collection_name)) ]: form.field_institution_short_name.data = short_name form.try_set_team_name() assert form.team_name == expected_name # Test additional_parties add_party_codes = [a['code'] for a in daa_config['additional_parties']] add_party_codes_str = '_'.join(add_party_codes) for short_name, expected_name in [ ('test', 'KiAccess_{0}_{1}_test'.format(expected_collection_name, add_party_codes_str)), (' test ', 'KiAccess_{0}_{1}_test'.format(expected_collection_name, add_party_codes_str)), ('test.1', 'KiAccess_{0}_{1}_test1'.format(expected_collection_name, add_party_codes_str)), ('test-1', 'KiAccess_{0}_{1}_test_1'.format(expected_collection_name, add_party_codes_str)), ('test 1', 'KiAccess_{0}_{1}_test_1'.format(expected_collection_name, add_party_codes_str)), ('test 1_ . - ', 'KiAccess_{0}_{1}_test_1____'.format(expected_collection_name, add_party_codes_str)) ]: form.field_institution_short_name.data = short_name form.field_institution_add_party.data = add_party_codes form.try_set_team_name() assert form.team_name == expected_name form.field_institution_add_party.data = None # Test include_collection_name_in_team_name daa_config['data_collections'][0]['include_collection_name_in_team_name'] = False set_daa_config([daa_config]) for short_name, expected_name in [ ('test', 'KiAccess_test'), (' test ', 'KiAccess_test'), ('test.1', 'KiAccess_test1'), ('test-1', 'KiAccess_test_1'), ('test 1', 'KiAccess_test_1'), ('test 1_ . - ', 'KiAccess_test_1____') ]: form.field_institution_short_name.data = short_name form.try_set_team_name() assert form.team_name == expected_name # Test additional_parties with include_collection_name_in_team_name for short_name, expected_name in [ ('test', 'KiAccess_{0}_test'.format(add_party_codes_str)), (' test ', 'KiAccess_{0}_test'.format(add_party_codes_str)), ('test.1', 'KiAccess_{0}_test1'.format(add_party_codes_str)), ('test-1', 'KiAccess_{0}_test_1'.format(add_party_codes_str)), ('test 1', 'KiAccess_{0}_test_1'.format(add_party_codes_str)), ('test 1_ . - ', 'KiAccess_{0}_test_1____'.format(add_party_codes_str)) ]: form.field_institution_short_name.data = short_name form.field_institution_add_party.data = add_party_codes form.try_set_team_name() assert form.team_name == expected_name
52.208333
113
0.65576
468
3,759
4.726496
0.117521
0.063291
0.09991
0.094033
0.80651
0.78481
0.713834
0.699367
0.677667
0.629295
0
0.019911
0.22506
3,759
71
114
52.943662
0.739444
0.03485
0
0.5
0
0
0.200055
0.06043
0
0
0
0
0.066667
1
0.016667
false
0
0.05
0
0.066667
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
06f971e2f1ac1ec1a0de6d5bf17ce27eac0fe9c2
27
py
Python
pulse2percept/version.py
tanyabhatia/pulse2percept
b322c7daf22154d60f7abd8adb039c5982824a7c
[ "BSD-3-Clause" ]
null
null
null
pulse2percept/version.py
tanyabhatia/pulse2percept
b322c7daf22154d60f7abd8adb039c5982824a7c
[ "BSD-3-Clause" ]
10
2018-04-18T20:56:12.000Z
2020-07-24T20:11:01.000Z
pulse2percept/version.py
tanyabhatia/pulse2percept
b322c7daf22154d60f7abd8adb039c5982824a7c
[ "BSD-3-Clause" ]
null
null
null
__version__ = '0.7.0.dev0'
13.5
26
0.666667
5
27
2.8
0.8
0
0
0
0
0
0
0
0
0
0
0.166667
0.111111
27
1
27
27
0.416667
0
0
0
0
0
0.37037
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
06fb40368734e82fa95c1c1e21967e2e68059095
227
py
Python
kafka_roller/kafka_roller/cli.py
jyates/kafka-helmsman
acd3cadd17752055b0a5d241b1f6fd79b0b350b3
[ "MIT" ]
68
2019-09-25T21:29:27.000Z
2022-03-24T14:10:52.000Z
kafka_roller/kafka_roller/cli.py
jyates/kafka-helmsman
acd3cadd17752055b0a5d241b1f6fd79b0b350b3
[ "MIT" ]
18
2019-11-12T00:23:30.000Z
2021-12-17T18:15:24.000Z
kafka_roller/kafka_roller/cli.py
jyates/kafka-helmsman
acd3cadd17752055b0a5d241b1f6fd79b0b350b3
[ "MIT" ]
27
2019-10-01T14:23:32.000Z
2022-02-07T06:06:34.000Z
"""CLI entrypoint.""" import logging.config import os from fabric.main import program def main(): """Entry point.""" os.chdir(os.path.dirname(__file__)) logging.config.fileConfig("logging.ini") program.run()
17.461538
44
0.682819
29
227
5.206897
0.689655
0.172185
0
0
0
0
0
0
0
0
0
0
0.15859
227
12
45
18.916667
0.790576
0.123348
0
0
0
0
0.058511
0
0
0
0
0
0
1
0.142857
true
0
0.428571
0
0.571429
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
664e87f7e81eb4daa5206d28dc935ffcf085dcb1
1,438
py
Python
ooobuild/cssdyn/bridge/oleautomation/__init__.py
Amourspirit/ooo_uno_tmpl
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
[ "Apache-2.0" ]
null
null
null
ooobuild/cssdyn/bridge/oleautomation/__init__.py
Amourspirit/ooo_uno_tmpl
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
[ "Apache-2.0" ]
null
null
null
ooobuild/cssdyn/bridge/oleautomation/__init__.py
Amourspirit/ooo_uno_tmpl
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from ....dyn.bridge.oleautomation.application_registration import ApplicationRegistration as ApplicationRegistration from ....dyn.bridge.oleautomation.bridge_supplier import BridgeSupplier as BridgeSupplier from ....dyn.bridge.oleautomation.currency import Currency as Currency from ....dyn.bridge.oleautomation.date import Date as Date from ....dyn.bridge.oleautomation.decimal import Decimal as Decimal from ....dyn.bridge.oleautomation.factory import Factory as Factory from ....dyn.bridge.oleautomation.named_argument import NamedArgument as NamedArgument from ....dyn.bridge.oleautomation.property_put_argument import PropertyPutArgument as PropertyPutArgument from ....dyn.bridge.oleautomation.s_code import SCode as SCode from ....dyn.bridge.oleautomation.x_automation_object import XAutomationObject as XAutomationObject
53.259259
116
0.80459
193
1,438
5.953368
0.492228
0.060923
0.113142
0.226284
0
0
0
0
0
0
0
0.007075
0.115438
1,438
26
117
55.307692
0.896226
0.400556
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
b0983754bc52f44190b32571bbe66977dc293c01
1,502
py
Python
code/FacebookEvents/extractor.py
UNITN-KDI-2020/MoiraisObservatory
d0b5aeca88e0f0229b713e60262de76ac44864a3
[ "MIT", "Unlicense" ]
null
null
null
code/FacebookEvents/extractor.py
UNITN-KDI-2020/MoiraisObservatory
d0b5aeca88e0f0229b713e60262de76ac44864a3
[ "MIT", "Unlicense" ]
1
2020-11-06T13:20:04.000Z
2020-11-06T13:20:04.000Z
code/FacebookEvents/extractor.py
UNITN-KDI-2020/MoiraisObservatory
d0b5aeca88e0f0229b713e60262de76ac44864a3
[ "MIT", "Unlicense" ]
1
2020-11-04T12:19:38.000Z
2020-11-04T12:19:38.000Z
import facebook_events_scraper as sc import json import time driver = sc.driver('./chromedriver') event_urls = [ "https://www.facebook.com/events/442646183573774", "https://www.facebook.com/events/119061183165620/", "https://www.facebook.com/events/367546061252509/", "https://www.facebook.com/events/802522200323292/", "https://www.facebook.com/events/781389142591365/", "https://www.facebook.com/events/279219430101617/", "https://www.facebook.com/events/183993549943591/", "https://www.facebook.com/events/3021076944663769/", "https://www.facebook.com/events/581245542747775/", "https://www.facebook.com/events/756326468432319/", "https://www.facebook.com/events/147690982536063/", "https://www.facebook.com/events/355864681668421/", "https://www.facebook.com/events/1821151754701683/", "https://www.facebook.com/events/1262094610521833/", "https://www.facebook.com/events/232678604034073/", "https://www.facebook.com/events/375869966960695/", "https://www.facebook.com/events/1243399506045392/", "https://www.facebook.com/events/667724533948337/", "https://www.facebook.com/events/165082432004468/", "https://www.facebook.com/events/401666710962331/", "https://www.facebook.com/events/726574938275467/" ] events = [] for event_uri in event_urls: response_data = sc.event_info(driver, link = event_uri) events.append(response_data) with open('data.json', 'w') as outfile: json.dump(events, outfile)
40.594595
59
0.718375
173
1,502
6.184971
0.283237
0.157009
0.314019
0.372897
0.490654
0
0
0
0
0
0
0.237175
0.104527
1,502
37
60
40.594595
0.558364
0
0
0
0
0
0.688623
0
0
0
0
0
0
1
0
false
0
0.090909
0
0.090909
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
b0bfb4c7dc40c46491c582764a178cd888ffeabd
521
py
Python
google_api_clients/bigquery/errors.py
shojikai/python-google-api-clients
2770dc0ff77045565ae38e01d6756a513e148f81
[ "Apache-2.0" ]
null
null
null
google_api_clients/bigquery/errors.py
shojikai/python-google-api-clients
2770dc0ff77045565ae38e01d6756a513e148f81
[ "Apache-2.0" ]
null
null
null
google_api_clients/bigquery/errors.py
shojikai/python-google-api-clients
2770dc0ff77045565ae38e01d6756a513e148f81
[ "Apache-2.0" ]
null
null
null
class AlreadyExistsError(Exception): pass class BigQueryError(Exception): pass class DatasetIsNotEmptyError(Exception): pass class Http4xxError(Exception): pass class Http5xxError(Exception): pass class InsertError(Exception): pass class InvalidRowError(Exception): pass class JobWaitTimeoutError(Exception): pass class LoadError(Exception): pass class NoSuchFieldError(Exception): pass class NotFoundError(Exception): pass class ParameterError(Exception): pass
14.472222
40
0.74856
48
521
8.125
0.3125
0.4
0.507692
0
0
0
0
0
0
0
0
0.004695
0.182342
521
35
41
14.885714
0.910798
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
4
b0c3788b588d5ae61dd3c4aa295bf21b012ec0ce
33,861
py
Python
ozpcenter/api/notification/notifications.py
emosher/ozp-backend
d31d00bb8a28a8d0c999813f616b398f41516244
[ "Apache-2.0" ]
1
2018-10-05T17:03:01.000Z
2018-10-05T17:03:01.000Z
ozpcenter/api/notification/notifications.py
emosher/ozp-backend
d31d00bb8a28a8d0c999813f616b398f41516244
[ "Apache-2.0" ]
1
2017-01-06T19:20:32.000Z
2017-01-06T19:20:32.000Z
ozpcenter/api/notification/notifications.py
emosher/ozp-backend
d31d00bb8a28a8d0c999813f616b398f41516244
[ "Apache-2.0" ]
7
2016-12-16T15:42:05.000Z
2020-09-05T01:11:27.000Z
""" https://github.com/aml-development/ozp-documentation/wiki/Notifications Notification Type SYSTEM = 'system' # System-wide Notifications AGENCY = 'agency' # Agency-wide Notifications AGENCY_BOOKMARK = 'agency_bookmark' # Agency-wide Bookmark Notifications # Not requirement (erivera 20160621) LISTING = 'listing' # Listing Notifications PEER = 'peer' # Peer to Peer Notifications PEER_BOOKMARK = 'peer_bookmark' # Peer to Peer Bookmark Notifications SUBSCRIPTION = 'subscription' # Category and Tag Subscription Notification Group Target ALL = 'all' # All users STEWARDS = 'stewards' APP_STEWARD = 'app_steward' ORG_STEWARD = 'org_steward' USER = 'user' =====Notification Type===== +--> SystemWide | +--> AgencyWide | +--> AgencyWideBookmark | Notification +------+--> Listing | | | +--> ListingReview | | | +--> ListingPrivateStatus | | | +--> PendingDeletionToOwner | | | +--> PendingDeletionToSteward | | | +--> PendingDeletionApproved | | | +--> ListingSubmission | +--> Peer | +--> PeerBookmark | +--> CategorySubscription | +--> TagSubscription =====Vocab===== Target: is a Profile that should receive a notification Target List: A list of Profiles that should receive notifications Direct notification: The notification is produced by an action that the user does. In-direct Notification: The notification is produced by the observing a user action. """ import datetime import logging import pytz from django.db.models import Q from django.db import transaction from ozpcenter import errors from ozpcenter.models import Notification from ozpcenter.models import NotificationMailBox from ozpcenter.models import Profile from ozpcenter.models import Agency from ozpcenter.models import Listing from ozpcenter.models import ApplicationLibraryEntry from ozpcenter.models import Subscription import ozpcenter.model_access as generic_model_access logger = logging.getLogger('ozp-center.' + str(__name__)) permission_dict = { 'APPS_MALL_STEWARD': [ 'add_system_notification', 'change_system_notification', 'delete_system_notification', 'add_agency_notification', 'change_agency_notification', 'delete_agency_notification', 'add_listing_notification', 'change_listing_notification', 'delete_listing_notification', 'add_peer_notification', 'change_peer_notification', 'delete_peer_notification', 'add_peer_bookmark_notification', 'add_restore_bookmark_notification', 'change_peer_bookmark_notification', 'delete_peer_bookmark_notification', 'add_subscription_notification', 'change_subscription_notification', 'delete_subscription_notification' ], 'ORG_STEWARD': [ 'add_system_notification', 'change_system_notification', 'delete_system_notification', 'add_agency_notification', 'change_agency_notification', 'delete_agency_notification', 'add_listing_notification', 'change_listing_notification', 'delete_listing_notification', 'add_peer_notification', 'change_peer_notification', 'delete_peer_notification', 'add_peer_bookmark_notification', 'add_restore_bookmark_notification', 'change_peer_bookmark_notification', 'delete_peer_bookmark_notification', 'add_subscription_notification', 'change_subscription_notification', 'delete_subscription_notification' ], 'USER': [ 'add_listing_notification', 'change_listing_notification', 'delete_listing_notification', 'add_peer_notification', 'change_peer_notification', 'delete_peer_notification', 'add_peer_bookmark_notification', 'add_restore_bookmark_notification', 'change_peer_bookmark_notification', 'delete_peer_bookmark_notification', 'add_subscription_notification', 'change_subscription_notification', 'delete_subscription_notification' ] } # Method is decorated with @transaction.atomic to ensure all logic is executed in a single transaction @transaction.atomic def bulk_notifications_saver(notification_instances): # Loop over each store and invoke save() on each entry for notification_instance in notification_instances: notification_instance.save() def check_notification_permission(profile_instance, action, notification_type): """ Check to see if user has permission Args: profile_instance(Profile): Profile Instance action(string): add/change/delete notification_type(string): notification type Return: True or PermissionDenied Exception """ profile_role = profile_instance.highest_role() assert (profile_role in permission_dict), 'Profile group {} not found in permissions'.format(profile_role) user_action = '{}_{}_notification'.format(action, notification_type) profile_permission_list = permission_dict[profile_role] if user_action not in profile_permission_list: raise errors.PermissionDenied('Profile does not have [{}] permissions'.format(user_action)) return True class NotificationBase(object): """ Process: Init NotificationBase Super Class Object Set sender_profile and entities list Validate sender_profile and entities list Do Global Permission Check Notify Validate expires_date, message """ def set_sender_and_entity(self, sender_profile_username, entity_dict): """ Set Sender Profile, entity object, metadata Args: sender_profile_username(string): Sender's Profile username (normally the request profile) entity(object): """ assert (sender_profile_username is not None), 'Sender Profile Username is necessary' self.sender_profile_username = sender_profile_username self.sender_profile = generic_model_access.get_profile(sender_profile_username) self.entity_dict = entity_dict def check_local_permission(self, entity): return True def permission_check(self): """ Global and Local check """ check_notification_permission(self.sender_profile, 'add', self.get_notification_db_type()) self.check_local_permission(self.entity_dict) def generate_model(self, expires_date, message): notification = Notification() notification.expires_date = expires_date notification.message = message notification.author = self.sender_profile notification.notification_type = self.get_notification_db_type() notification.notification_subtype = self.get_notification_db_subtype() notification.entity_id = self.get_entity_id() notification.group_target = self.get_group_target() return notification def get_notification_db_type(self): raise RuntimeError('Not Implemented') def get_notification_db_subtype(self): return None def get_entity_id(self): return None def get_group_target(self): return Notification.ALL def get_target_list(self): raise RuntimeError('Not Implemented') def modify_notification_before_save(self, notification_object): pass def notify(self, expires_date, message): assert (expires_date is not None), 'Expires Date is necessary' assert (message is not None), 'Message is necessary' self.permission_check() notification = self.generate_model(expires_date, message) self.modify_notification_before_save(notification) notification.save() target_list = self.get_target_list() bulk_notification_list = [] for target_profile in target_list: notificationMailBox = NotificationMailBox() notificationMailBox.target_profile = target_profile notificationMailBox.notification = notification # All the flags default to false notificationMailBox.emailed_status = False bulk_notification_list.append(notificationMailBox) if len(bulk_notification_list) >= 2000: bulk_notifications_saver(bulk_notification_list) bulk_notification_list = [] if bulk_notification_list: bulk_notifications_saver(bulk_notification_list) return notification class SystemWideNotification(NotificationBase): """ issue: AMLNG-395 notification_type: System Wide description: As a user, I want to receive System-Wide Notifications target: All Users permission constraint: Only APP_MALL_STEWARDs can send notifications invoked: Directly test_case: todo """ def get_notification_db_type(self): return Notification.SYSTEM def get_notification_db_subtype(self): return None def get_target_list(self): return Profile.objects.all() class AgencyWideNotification(NotificationBase): """ issue: AMLNG-398 notification_type: Agency Wide description: As a user, I want to receive Agency-Wide Notifications target: Get all profiles that belongs to an organization and get all stewards for that organization permission constraint: Only APP_MALL_STEWARDs, ORG_STEWARDs can send notifications invoked: Directly test_case: todo """ def modify_notification_before_save(self, notification_object): notification_object.agency = self.entity_dict['agency'] def get_notification_db_type(self): return Notification.AGENCY def get_notification_db_subtype(self): return None def get_entity_id(self): return self.entity_dict['agency'].id def get_target_list(self): agency = self.entity_dict['agency'] return Profile.objects.filter(Q(organizations__in=[agency]) | Q(stewarded_organizations__in=[agency])).all() class AgencyWideBookmarkNotification(NotificationBase): """ issue: AMLNG-398 notification_type: AgencyWide Bookmark description: As a user, I want to receive Agency-Wide Notifications target: Get all profiles that belongs to an organization and get all stewards for that organization permission constraint: Only APP_MALL_STEWARDs, ORG_STEWARDs can send notifications invoked: Directly test_case: todo """ def modify_notification_before_save(self, notification_object): notification_object.agency = self.entity_dict['agency'] def get_notification_db_type(self): return Notification.AGENCY_BOOKMARK def get_notification_db_subtype(self): return None def get_entity_id(self): return self.entity_dict['agency'].id def get_target_list(self): agency = self.entity_dict['agency'] return Profile.objects.filter(Q(organizations__in=[agency]) | Q(stewarded_organizations__in=[agency])).all() class ListingNotification(NotificationBase): """ issue: AMLNG-396 notification_type: Listing Notifications description: Send a notification to all the users that bookmark given listing target: All users that bookmarked listing permission constraint: Only APP_MALL_STEWARDs and ORG_STEWARDs or owners of listing can send notifications invoked: Directly test_case: todo """ def modify_notification_before_save(self, notification_object): notification_object.listing = self.entity_dict['listing'] def get_notification_db_type(self): return Notification.LISTING def get_notification_db_subtype(self): return None def get_entity_id(self): return self.entity_dict['listing'].id def get_target_list(self): owner_id_list = ApplicationLibraryEntry.objects.filter(listing__in=[self.entity_dict['listing']], listing__isnull=False, listing__approval_status=Listing.APPROVED, listing__is_deleted=False).values_list('owner', flat=True).distinct() return Profile.objects.filter(id__in=owner_id_list, listing_notification_flag=True).all() def check_local_permission(self, entity): if self.sender_profile.highest_role() in ['APPS_MALL_STEWARD', 'ORG_STEWARD']: return True if self.sender_profile not in self.entity_dict['listing'].owners.all(): raise errors.PermissionDenied('Cannot create a notification for a listing you do not own') else: return True return False class PeerNotification(NotificationBase): """ issue: AMLNG-381 notification_type: Peer description: As a user, I want to receive notification when someone send a message to me target: User Given Target permission constraint: ? test_case: todo """ def modify_notification_before_save(self, notification_object): notification_object.peer = self.entity_dict['peer'] def get_notification_db_type(self): return Notification.PEER def get_notification_db_subtype(self): return None def get_group_target(self): return Notification.USER def get_target_list(self): # entity: 'peer_profile': peer_profile, # metadata: 'peer': peer, entities_id = [entity.id for entity in [self.entity_dict['peer_profile']]] return Profile.objects.filter(id__in=entities_id).all() class PeerBookmarkNotification(NotificationBase): """ issue: AMLNG-381 notification_type: PeerBookmark description: As a user, I want to receive notification when someone shares a folder with me target: User Given Target permission constraint: Must be owner of shared folder to send test_case: Logged on as jones Shared a folder with aaronson Logged on as aaronson RESULTS: aaronson has a new notification added to the notification count.Add folder button is present and adds the shared folder to HuD screen. """ def modify_notification_before_save(self, notification_object): notification_object.peer = self.entity_dict['peer'] def get_notification_db_type(self): return Notification.PEER_BOOKMARK def get_notification_db_subtype(self): return None def get_group_target(self): return Notification.USER def get_target_list(self): # entity: 'peer_profile': peer_profile, # metadata: 'peer': peer, entities_id = [entity.id for entity in [self.entity_dict['peer_profile']]] return Profile.objects.filter(id__in=entities_id).all() class RestoreBookmarkNotification(NotificationBase): """ issue: AMLNG-700 notification_type: Restore Bookmark Folder description: As a user, I would like the ability to be able to recover a deleted folder full of bookmarked listings with an undo delete function. target: User Given Target permission constraint: Must be owner of folder test_case: Logged on as jones Delete a folder Restore folder from Notifications RESULTS: Restore folder button is present and readds the folder to HuD screen. """ def modify_notification_before_save(self, notification_object): notification_object.peer = self.entity_dict['peer'] def get_notification_db_type(self): return Notification.RESTORE_BOOKMARK def get_notification_db_subtype(self): return None def get_group_target(self): return Notification.USER def get_target_list(self): entities_id = [entity.id for entity in [self.entity_dict['peer_profile']]] return Profile.objects.filter(id__in=entities_id).all() class ListingReviewNotification(NotificationBase): # Not Verified """ issue: AMLNG-377 notification_type: Listing Review description: As an owner or CS, I want to receive notification of user rating and reviews target: Users that ___ invoked: In-directly test_case: Description - Verify the CS and listing owner receives a notification when the review is added or modified. *Pre-req*- Add aaronson as listing owner to Airmail. Log on as syme (minipax) Deleted, Added and Modified review on Airmail ( minitru) Log on as wsmith (minitru-org steward) EXPECTED RESULTS - At least two notifications should display for wsmith. Log on as aaronson EXPECTED RESULTS - At least two notifications should display for aaronson. """ def modify_notification_before_save(self, notification_object): notification_object.listing = self.entity_dict['listing'] def get_notification_db_type(self): return Notification.LISTING def get_notification_db_subtype(self): return Notification.LISTING_REVIEW def get_group_target(self): return Notification.USER def get_entity_id(self): return self.entity_dict['listing'].id def get_target_list(self): current_listing = self.entity_dict['listing'] target_set = set() for owner in current_listing.owners.filter(listing_notification_flag=True).all(): target_set.add(owner) current_listing_agency_id = current_listing.agency.id for steward in Profile.objects.filter(stewarded_organizations__in=[current_listing_agency_id], listing_notification_flag=True).all(): target_set.add(steward) return list(target_set) class ListingOwnerNotification(NotificationBase): # Not Verified """ issue: AMLNG-??? notification_type: Listing Owner As an user, I want to send a notification to the owners and org_steward for that listing's agency target: owners and org_steward for that listing's agency invoked: Directly """ def modify_notification_before_save(self, notification_object): notification_object.listing = self.entity_dict['listing'] def get_entity_id(self): return self.entity_dict['listing'].id def get_notification_db_type(self): return Notification.LISTING def get_group_target(self): return Notification.USER def get_target_list(self): current_listing = self.entity_dict['listing'] target_set = set() for owner in current_listing.owners.filter(listing_notification_flag=True).all(): target_set.add(owner) current_listing_agency_id = current_listing.agency.id for steward in Profile.objects.filter(stewarded_organizations__in=[current_listing_agency_id], listing_notification_flag=True).all(): target_set.add(steward) return list(target_set) class ListingPrivateStatusNotification(NotificationBase): """ issue: AMLNG-383 notification_type: ListingPrivateStatus description: As a owner, I want to notify users who have bookmarked my listing when the listing is changed from public to private and vice-versa permission constraint: Only APP_MALL_STEWARDs and ORG_STEWARDs or owners of listing can target: Users that bookmarked listing invoked: In-directly test_case: Bookmarked an app listing in my own org Went to Bookmarked App Listing Quick View Modal | Send Notifications | Sent a notification RESULTS - I received the notification Bookmarked an app listing that did not belong to the org I was in Went to Bookmarked App Listing Quick View Modal | Send Notifications | Sent a notification RESULTS - I received the notification """ def modify_notification_before_save(self, notification_object): notification_object.listing = self.entity_dict['listing'] def get_entity_id(self): return self.entity_dict['listing'].id def get_notification_db_type(self): return Notification.LISTING def get_notification_db_subtype(self): return Notification.LISTING_PRIVATE_STATUS def get_target_list(self): owner_id_list = ApplicationLibraryEntry.objects.filter(listing__in=[self.entity_dict['listing']], listing__isnull=False, listing__approval_status=Listing.APPROVED, listing__is_enabled=True, listing__is_deleted=False).values_list('owner', flat=True).distinct() return Profile.objects.filter(id__in=owner_id_list, listing_notification_flag=True).all() def check_local_permission(self, entity): if self.sender_profile.highest_role() in ['APPS_MALL_STEWARD', 'ORG_STEWARD']: return True if self.sender_profile not in self.entity_dict['listing'].owners.all(): raise errors.PermissionDenied('Cannot create a notification for a listing you do not own') else: return True return False class PendingDeletionToOwnerNotification(NotificationBase): # Not Verified """ issue: AMLNG-170 notification_type: PendingDeletionToOwner description: As an Owner I want to receive notice of whether my deletion request has been rejected target: Users that ___ invoked: In-directly event_occurs: Listing DELETED - Steward approved deletion PENDING_DELETION --> DELETED User undeleted the listing - Steward rejects deletion PENDING_DELETION --> PENDING test_case: Logged on as jones Set Test Notification Listing to Pend for Deletion state Logged on as minitrue Org Content Steward- julia Approved the deletion Logged on as jones RESULTS The notification launched = Test Notification Listing listing was approved for deletion by steward """ def modify_notification_before_save(self, notification_object): notification_object.listing = self.entity_dict['listing'] def get_entity_id(self): return self.entity_dict['listing'].id def get_notification_db_type(self): return Notification.LISTING def get_notification_db_subtype(self): return Notification.PENDING_DELETION_TO_OWNER def get_target_list(self): current_listing = self.entity_dict['listing'] return current_listing.owners.filter(listing_notification_flag=True).all().distinct() class PendingDeletionToStewardNotification(NotificationBase): # Not Verified """ issue: AMLNG-173, AMLOS-490 notification_type: PendingDeletionToSteward As an cs I want a notification if an owner has cancelled an app that was pending deletion as a cs, I want a notification if an owner pends a listing for deletion event_occurs: This event occurs when User undeleted the listing PENDING_DELETION --> PENDING User pends a listing for deletion ANY --> PENDING_DELETION test_case: Set Test Notification Listing to Pend for Deletion Status RESULTS - Notificaiton launched = Listing Owner has requested the deletion of Test Notification Listing listing Logged on as jones (owner of <Test Notification> Listing) Undeleted the Test Notification Listing Logged on as Org Content Steward - julia RESULTS - Notificaiton launched = Listing Owner cancelled deletion of Test Notification Listing listing """ def modify_notification_before_save(self, notification_object): notification_object.listing = self.entity_dict['listing'] def get_entity_id(self): return self.entity_dict['listing'].id def get_notification_db_type(self): return Notification.LISTING def get_notification_db_subtype(self): return Notification.PENDING_DELETION_TO_STEWARD def get_target_list(self): current_listing = self.entity_dict['listing'] current_listing_agency_id = current_listing.agency.id target_set = set() for steward in Profile.objects.filter(stewarded_organizations__in=[current_listing_agency_id], listing_notification_flag=True).all(): target_set.add(steward) for admin in Profile.objects.filter(user__groups__name='APPS_MALL_STEWARD', listing_notification_flag=True).all(): target_set.add(admin) return list(target_set) class PendingDeletionApprovedNotification(NotificationBase): """ notification_type: PendingDeletionApproved As a listing owner I want a notification if a steward has approved the deletion of the listing event_occurs: This event occurs when Steward approves listing PENDING_DELETION --> DELETED test_case: Set Test Notification Listing to Pend for Deletion Status RESULTS - Notificaiton launched = Listing Owner has requested the deletion of Test Notification Listing listing Logged on as Org Content Steward - julia Approve the deletion requested Logged on as owner - jones RESULTS - Notificaiton launched = Listing Owner cancelled deletion of Test Notification Listing listing """ def modify_notification_before_save(self, notification_object): notification_object.listing = self.entity_dict['listing'] def get_entity_id(self): return self.entity_dict['listing'].id def get_notification_db_type(self): return Notification.LISTING def get_notification_db_subtype(self): return Notification.PENDING_DELETION_APPROVED def get_target_list(self): current_listing = self.entity_dict['listing'] current_listing_agency_id = current_listing.agency.id target_set = set() for steward in Profile.objects.filter(stewarded_organizations__in=[current_listing_agency_id], listing_notification_flag=True).all(): target_set.add(steward) for admin in Profile.objects.filter(user__groups__name='APPS_MALL_STEWARD', listing_notification_flag=True).all(): target_set.add(admin) for owner in current_listing.owners.filter(listing_notification_flag=True).all().distinct(): target_set.add(owner) return list(target_set) class ListingSubmissionNotification(NotificationBase): """ issue: AMLNG-376 notification_type: ListingSubmission description: As a CS, I want to receive notification of Listings submitted for my organization target: Listing Agency ORG_STEWARDs invoked: In-directly event_occurs: This event occurs when User Submitted Listings IN_PROGRESS --> PENDING a = Listing.objects.last(); a.approval_status = Listing.IN_PROGRESS; a.save() test_case: Logged into Apps Mall as jones ( minitrue) Submitted a new listing using org minitrue. Logged into Apps mall as CS - julia (minitrue) RESULTS - Notification displays = Test Notification Listing listing was submitted """ def modify_notification_before_save(self, notification_object): notification_object.listing = self.entity_dict['listing'] def get_entity_id(self): return self.entity_dict['listing'].id def get_group_target(self): return Notification.ORG_STEWARD def get_notification_db_type(self): return Notification.LISTING def get_notification_db_subtype(self): return Notification.LISTING_NEW def get_target_list(self): current_listing = self.entity_dict['listing'] current_listing_agency_id = current_listing.agency.id return Profile.objects.filter(stewarded_organizations__in=[current_listing_agency_id], listing_notification_flag=True).all().distinct() class TagSubscriptionNotification(NotificationBase): # Not Verified """ issue: AMLNG-392 notification_type: TagSubscription As a user, I want to receive notification when a Listing is added to a subscribed tag target: Users that ___ invoked: In-directly """ def modify_notification_before_save(self, notification_object): notification_object.listing = self.entity_dict['listing'] def get_entity_id(self): return self.entity_dict['listing'].id def get_notification_db_type(self): return Notification.SUBSCRIPTION def get_notification_db_subtype(self): return Notification.SUBSCRIPTION_TAG def get_target_list(self): # entity: listing, metadata: entities subscription_entries = Subscription.objects.filter(entity_type='tag', entity_id__in=list(self.entity_dict['entities'])) target_profiles = set() for subscription_entry in subscription_entries: target_profile = subscription_entry.target_profile if target_profile.subscription_notification_flag: target_profiles.add(target_profile) return list(target_profiles) class CategorySubscriptionNotification(NotificationBase): # Not Verified """ issue: AMLNG-380 notification_type: CategorySubscription description: As a user, I want to receive notification when a Listing is added to a subscribed category target: Users that are subscribed to category invoked: In-directly event_occurs: Should occur when a user submits a listing with a category and listing gets approved, it should send out notifications for users that have that category subscribed and has the Subscription Preference Flag to True Should occur when a published listing add new category, it should send out notifications for users that have that category subscribed and has the Subscription Preference Flag to True test_case: Logged on as jones Subscribed to Finance Logged on as big brother Add any Listing to Finance Logged on as jones RESULTS- Notification "A new listing in category Finance" """ def modify_notification_before_save(self, notification_object): notification_object.listing = self.entity_dict['listing'] def get_entity_id(self): return self.entity_dict['listing'].id def get_notification_db_type(self): return Notification.SUBSCRIPTION def get_notification_db_subtype(self): return Notification.SUBSCRIPTION_CATEGORY def get_target_list(self): # entity: listing, metadata: entities subscription_entries = Subscription.objects.filter(entity_type='category', entity_id__in=list(self.entity_dict['entities'])) target_profiles = set() for subscription_entry in subscription_entries: target_profile = subscription_entry.target_profile if target_profile.subscription_notification_flag: target_profiles.add(target_profile) return list(target_profiles) class StewardAppNotification(NotificationBase): """ issue: AMLNG-745 notification_type: StewardAppNotification description: A process to notify and instruct content stewards to review and update, as needed, current listing information for all listing in their organization. An admin has the ability to send out a notification to all Content Stewards, notifying them to review their org's Listings and make any necessary changes. Content Steward updates Listing as needed via Create New Listing Form (the usual means of updating listings) Content Steward and listing owner(s) are notified when listing has been updated and approve target: All ORG_STEWARDS permission constraint: Only APP_MALL_STEWARDs can send notifications invoked: Directly test_case: As bigbrother, Go to Center Settings (from the dropdown menu) Go to the Notifications Tab Select the 'Update Request' type See that the Notification text is populated with a default message (you can change this message if you like) Set expires on date Press send Login as david, julia, obrien and wsmith to make sure that they have received the message and that it has the link as detailed in acceptance criteria bigbrother should not receive the notification hodor should not receive the notification jones should not receive the notification """ def modify_notification_before_save(self, notification_object): pass def get_entity_id(self): pass def get_notification_db_type(self): return Notification.SYSTEM def get_notification_db_subtype(self): return Notification.REVIEW_REQUEST def get_group_target(self): return Notification.ORG_STEWARD def get_target_list(self): target_set = set() agencies = Agency.objects.all() for steward in Profile.objects.filter(stewarded_organizations__in=agencies, listing_notification_flag=True).all(): target_set.add(steward) return list(target_set)
35.680717
162
0.693718
3,861
33,861
5.851334
0.104377
0.019919
0.028506
0.030984
0.637925
0.596273
0.568741
0.556303
0.548601
0.53603
0
0.002339
0.242314
33,861
948
163
35.718354
0.878235
0.362807
0
0.739024
0
0
0.107779
0.069055
0
0
0
0.005274
0.009756
1
0.246341
false
0.007317
0.034146
0.139024
0.52439
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
b0c6156493e7d1d103288c926c4356cf627ef31d
471
py
Python
Inmortal.py/inmortal.py
AnaLopezP/Ejercicios-de-Agregacion-y-Composicion-de-POO
d078f894a50380abcd505a3670b5caabc64e19b0
[ "Apache-2.0" ]
null
null
null
Inmortal.py/inmortal.py
AnaLopezP/Ejercicios-de-Agregacion-y-Composicion-de-POO
d078f894a50380abcd505a3670b5caabc64e19b0
[ "Apache-2.0" ]
null
null
null
Inmortal.py/inmortal.py
AnaLopezP/Ejercicios-de-Agregacion-y-Composicion-de-POO
d078f894a50380abcd505a3670b5caabc64e19b0
[ "Apache-2.0" ]
null
null
null
#si quitamos el yin.yang = yang (asi como el print(yin.yang is yang) para que no de error), el mensaje aparece antes de la interrogacion. #esto ocurre porque al hacer esa asignacion creamos dos variables apuntando al mismo objeto. #entonces, al destruirlo, destruye una de las dos variables, pero no el objeto, que se destruye automaticamente al final de la ejecucion. #otra manera de evitar esto es llamando al destructor con las dos variables antes de la interrogacion.
117.75
137
0.796178
79
471
4.746835
0.620253
0.032
0.048
0.117333
0
0
0
0
0
0
0
0
0.161359
471
4
138
117.75
0.949367
0.985138
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
b0c93205c960de1aa8c5c1b64d04772df691457d
20
py
Python
data/studio21_generated/introductory/4415/starter_code.py
vijaykumawat256/Prompt-Summarization
614f5911e2acd2933440d909de2b4f86653dc214
[ "Apache-2.0" ]
null
null
null
data/studio21_generated/introductory/4415/starter_code.py
vijaykumawat256/Prompt-Summarization
614f5911e2acd2933440d909de2b4f86653dc214
[ "Apache-2.0" ]
null
null
null
data/studio21_generated/introductory/4415/starter_code.py
vijaykumawat256/Prompt-Summarization
614f5911e2acd2933440d909de2b4f86653dc214
[ "Apache-2.0" ]
null
null
null
def proc_arr(arr):
10
18
0.7
4
20
3.25
0.75
0
0
0
0
0
0
0
0
0
0
0
0.15
20
2
19
10
0.764706
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
b0d602836cace314da75aa57f03b79d0e46a8cdd
165
py
Python
basic/05_slice.py
onezens/python
73cdc22901a006751338d0145b6e120e55fdf80f
[ "MIT" ]
null
null
null
basic/05_slice.py
onezens/python
73cdc22901a006751338d0145b6e120e55fdf80f
[ "MIT" ]
null
null
null
basic/05_slice.py
onezens/python
73cdc22901a006751338d0145b6e120e55fdf80f
[ "MIT" ]
null
null
null
#!usr/bin/python #encoding=utf8 #下标 name = "xiaoming" print(name[0]) print(name[2]) #切边 print(name[0:6]) print(name[0:6:2]) print(name[6:]) print(name[-1::-1])
9.705882
19
0.630303
31
165
3.354839
0.451613
0.519231
0.288462
0.211538
0
0
0
0
0
0
0
0.07483
0.109091
165
16
20
10.3125
0.632653
0.193939
0
0
0
0
0.062016
0
0
0
0
0
0
1
0
false
0
0
0
0
0.857143
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
b0fa7caa65a6e46aaa0595efe90c8648776ad0dc
27
py
Python
bernard/__init__.py
leviroth/bernard
f4d766d45ffbde0ff5d91ce8e3c66d2728d6c2aa
[ "MIT" ]
2
2017-03-10T19:13:04.000Z
2017-06-19T05:11:32.000Z
bernard/__init__.py
leviroth/bernard
f4d766d45ffbde0ff5d91ce8e3c66d2728d6c2aa
[ "MIT" ]
7
2017-10-18T23:25:04.000Z
2018-06-26T02:44:53.000Z
bernard/__init__.py
leviroth/bernard
f4d766d45ffbde0ff5d91ce8e3c66d2728d6c2aa
[ "MIT" ]
1
2017-04-12T20:38:51.000Z
2017-04-12T20:38:51.000Z
"""The bernard package."""
13.5
26
0.62963
3
27
5.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.111111
27
1
27
27
0.708333
0.740741
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
9fe0e7d55431467d269deeeeca040a2a620f238c
7,642
py
Python
python/bad_copyrights.py
lachlanorr/gaen
325b4604ac8a8a366aa93cca9d8393ccdbc682fa
[ "Zlib" ]
2
2015-05-07T21:03:25.000Z
2017-10-11T11:17:28.000Z
python/bad_copyrights.py
lachlanorr/gaen
325b4604ac8a8a366aa93cca9d8393ccdbc682fa
[ "Zlib" ]
4
2016-06-25T21:41:50.000Z
2016-12-26T19:35:19.000Z
python/bad_copyrights.py
lachlanorr/gaen
325b4604ac8a8a366aa93cca9d8393ccdbc682fa
[ "Zlib" ]
1
2016-08-27T18:05:49.000Z
2016-08-27T18:05:49.000Z
#!/usr/bin/env python #------------------------------------------------------------------------------- # bad_copyrights.py - Check for copyright headers in source files # # Gaen Concurrency Engine - http://gaen.org # Copyright (c) 2014-2021 Lachlan Orr # # This software is provided 'as-is', without any express or implied # warranty. In no event will the authors be held liable for any damages # arising from the use of this software. # # Permission is granted to anyone to use this software for any purpose, # including commercial applications, and to alter it and redistribute it # freely, subject to the following restrictions: # # 1. The origin of this software must not be misrepresented; you must not # claim that you wrote the original software. If you use this software # in a product, an acknowledgment in the product documentation would be # appreciated but is not required. # # 2. Altered source versions must be plainly marked as such, and must not be # misrepresented as being the original software. # # 3. This notice may not be removed or altered from any source # distribution. #------------------------------------------------------------------------------- ''' Prints out source file paths that are missing standard gaen copyright/license message. ''' import os import posixpath import re import datetime as dt SCRIPT_HEADER = (r"#-------------------------------------------------------------------------------\n" r"# %s - .+[^\.]\n" r"#\n" r"# Gaen Concurrency Engine - http://gaen\.org\n" r"# Copyright \(c\) 2014-%d Lachlan Orr\n" r"#\n" r"# This software is provided 'as-is', without any express or implied\n" r"# warranty\. In no event will the authors be held liable for any damages\n" r"# arising from the use of this software\.\n" r"#\n" r"# Permission is granted to anyone to use this software for any purpose,\n" r"# including commercial applications, and to alter it and redistribute it\n" r"# freely, subject to the following restrictions:\n" r"#\n" r"# 1\. The origin of this software must not be misrepresented; you must not\n" r"# claim that you wrote the original software\. If you use this software\n" r"# in a product, an acknowledgment in the product documentation would be\n" r"# appreciated but is not required\.\n" r"#\n" r"# 2\. Altered source versions must be plainly marked as such, and must not be\n" r"# misrepresented as being the original software\.\n" r"#\n" r"# 3\. This notice may not be removed or altered from any source\n" r"# distribution\.\n" r"#-------------------------------------------------------------------------------\n") C_HEADER = (r"/\*------------------------------------------------------------------------------\n" r"%s - .+[^\.]\n" r"\n" r"Gaen Concurrency Engine - http://gaen\.org\n" r"Copyright \(c\) 2014-%d Lachlan Orr\n" r"\n" r"This software is provided 'as-is', without any express or implied\n" r"warranty\. In no event will the authors be held liable for any damages\n" r"arising from the use of this software\.\n" r"\n" r"Permission is granted to anyone to use this software for any purpose,\n" r"including commercial applications, and to alter it and redistribute it\n" r"freely, subject to the following restrictions:\n" r"\n" r" 1\. The origin of this software must not be misrepresented; you must not\n" r" claim that you wrote the original software\. If you use this software\n" r" in a product, an acknowledgment in the product documentation would be\n" r" appreciated but is not required\.\n" r"\n" r" 2\. Altered source versions must be plainly marked as such, and must not be\n" r" misrepresented as being the original software\.\n" r"\n" r" 3\. This notice may not be removed or altered from any source\n" r" distribution\.\n" r"------------------------------------------------------------------------------\*/\n") CPP_HEADER = (r"//------------------------------------------------------------------------------\n" r"// %s - .+[^\.]\n" r"//\n" r"// Gaen Concurrency Engine - http://gaen\.org\n" r"// Copyright \(c\) 2014-%d Lachlan Orr\n" r"//\n" r"// This software is provided 'as-is', without any express or implied\n" r"// warranty\. In no event will the authors be held liable for any damages\n" r"// arising from the use of this software\.\n" r"//\n" r"// Permission is granted to anyone to use this software for any purpose,\n" r"// including commercial applications, and to alter it and redistribute it\n" r"// freely, subject to the following restrictions:\n" r"//\n" r"// 1\. The origin of this software must not be misrepresented; you must not\n" r"// claim that you wrote the original software\. If you use this software\n" r"// in a product, an acknowledgment in the product documentation would be\n" r"// appreciated but is not required\.\n" r"//\n" r"// 2\. Altered source versions must be plainly marked as such, and must not be\n" r"// misrepresented as being the original software\.\n" r"//\n" r"// 3\. This notice may not be removed or altered from any source\n" r"// distribution\.\n" r"//------------------------------------------------------------------------------\n") def current_year(): return dt.datetime.now().year HEADERS = [(re.compile(r'^.*\.(h|c|cpp|cmp|m|mm)$'), CPP_HEADER), (re.compile(r'^.*\.(y|l)$'), C_HEADER), (re.compile(r'^.*\.py$'), SCRIPT_HEADER), (re.compile(r'^.*(CMakeLists.txt|\.cmake)$'), SCRIPT_HEADER), ] EXCLUDE_DIR_RE = re.compile(r'^.*(/external/|/build/|/python/templates/|/compose/compose_(parser|scanner)\.).*$') def expected_header(path): fname = posixpath.split(path)[1] for pattern, header in HEADERS: if re.match(pattern, path): return header % (fname, current_year()) return None def verify_header(path, exphdr): with open(path, 'r') as f: data = f.read() return re.search(exphdr, data, re.MULTILINE) def checkfile(path): exphdr = expected_header(path) if (exphdr is not None): return verify_header(path, exphdr) return True def checkdir(path): for root, dirs, files in os.walk(path): for f in files: fullpath = posixpath.join(root.replace('\\', '/'), f) if not re.match(EXCLUDE_DIR_RE, fullpath): if not checkfile(fullpath): print fullpath def checkdirs(): scriptdir = os.path.split(os.path.abspath(__file__))[0].replace('\\', '/') gaendir = posixpath.split(scriptdir)[0] checkdir(gaendir) if __name__=='__main__': checkdirs()
46.315152
113
0.531013
955
7,642
4.216754
0.182199
0.035759
0.015644
0.017879
0.737522
0.730817
0.703253
0.695555
0.695555
0.695555
0
0.006407
0.285135
7,642
164
114
46.597561
0.730734
0.149176
0
0.156522
0
0
0.566369
0.09826
0
0
0
0
0
0
null
null
0
0.034783
null
null
0.008696
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
9fff321ab9b32b6c9bc67b646ae1cdeebb9df8be
191
py
Python
BaseKnowledge/tensor/inv.py
Kose-i/python_test
d7b031aa33d699aeb9fe196fe0a6d216aa006f0d
[ "Unlicense" ]
null
null
null
BaseKnowledge/tensor/inv.py
Kose-i/python_test
d7b031aa33d699aeb9fe196fe0a6d216aa006f0d
[ "Unlicense" ]
null
null
null
BaseKnowledge/tensor/inv.py
Kose-i/python_test
d7b031aa33d699aeb9fe196fe0a6d216aa006f0d
[ "Unlicense" ]
null
null
null
import numpy as np a = np.matrix([[1,2],[3,4]]) print(a) print(np.linalg.inv(a)) b = np.matrix([[1,2],[2,4]]) print(np.linalg.det(b)) if np.linalg.det(b) != 0: print(np.linalg.inv(b))
13.642857
28
0.591623
41
191
2.756098
0.414634
0.283186
0.345133
0.176991
0
0
0
0
0
0
0
0.054217
0.13089
191
13
29
14.692308
0.626506
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0.5
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
b0071ff7de4c25ece92898782fc3a853cb9654ac
468
py
Python
sample_app/app.py
BkrmDahal/kubernetes_flaskapp
7690796250298104621e00e2d26b245fb71280e0
[ "MIT" ]
1
2021-12-10T05:26:42.000Z
2021-12-10T05:26:42.000Z
sample_app/app.py
BkrmDahal/kubernetes_flaskapp
7690796250298104621e00e2d26b245fb71280e0
[ "MIT" ]
null
null
null
sample_app/app.py
BkrmDahal/kubernetes_flaskapp
7690796250298104621e00e2d26b245fb71280e0
[ "MIT" ]
null
null
null
import os from flask import Flask from flask import request import flask import redis import time import json from flask import Response, stream_with_context app = Flask(__name__) @app.route('/api/') def cluster(): return "welcome to cluster /api/" @app.route('/api/<welcome>') def cluster_2(welcome): return "welcome to cluster /api/ " + welcome @app.route('/') def cluster_1(): return "welcome to cluster" if __name__ == "__main__": app.run()
16.714286
48
0.707265
66
468
4.772727
0.393939
0.139683
0.142857
0.209524
0.15873
0
0
0
0
0
0
0.005155
0.17094
468
27
49
17.333333
0.806701
0
0
0
0
0
0.202991
0
0
0
0
0
0
1
0.15
false
0
0.4
0.15
0.7
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
4
b024207829c40ad643325b825b27bbf88f69fbe2
521
py
Python
tests/examples/process/current.py
looking-for-a-job/elapsed.py
72089f6d24b907d0b0171358cc543b8841a1bc17
[ "Unlicense" ]
3
2020-10-02T23:18:20.000Z
2020-10-21T01:21:14.000Z
tests/examples/process/current.py
looking-for-a-job/elapsed.py
72089f6d24b907d0b0171358cc543b8841a1bc17
[ "Unlicense" ]
1
2022-01-27T00:01:10.000Z
2022-03-08T19:26:57.000Z
tests/examples/process/current.py
looking-for-a-job/elapsed.py
72089f6d24b907d0b0171358cc543b8841a1bc17
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python import time import elapsed time.sleep(2) print("elapsed.get: %s" % elapsed.get()) print("elapsed.get.seconds: %s" % elapsed.get().seconds) print("elapsed.get.minutes: %s" % elapsed.get().minutes) print("elapsed.get.hours: %s" % elapsed.get().hours) print("elapsed.get.days: %s" % elapsed.get().days) print("") print("elapsed.seconds(): %s" % elapsed.seconds()) print("elapsed.minutes: %s" % elapsed.minutes()) print("elapsed.hours: %s" % elapsed.hours()) print("elapsed.days: %s" % elapsed.days())
32.5625
56
0.681382
74
521
4.797297
0.216216
0.28169
0.211268
0
0
0
0
0
0
0
0
0.002101
0.086372
521
15
57
34.733333
0.743697
0.038388
0
0
0
0
0.35
0
0
0
0
0
0
1
0
true
0
0.153846
0
0.153846
0.769231
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
4
b025f193f7a03b55064cf51c8d3c569cf88ed577
100
py
Python
__init__.py
JustTang/pyexp
671aeb0b2c64ce9294035ac5a680109a1efe87eb
[ "MIT" ]
null
null
null
__init__.py
JustTang/pyexp
671aeb0b2c64ce9294035ac5a680109a1efe87eb
[ "MIT" ]
null
null
null
__init__.py
JustTang/pyexp
671aeb0b2c64ce9294035ac5a680109a1efe87eb
[ "MIT" ]
null
null
null
import os,sys #add dll path in pyexp to sys.path sys.path.append(os.path.dirname(__file__)+'\\dll')
25
50
0.74
19
100
3.684211
0.631579
0.2
0
0
0
0
0
0
0
0
0
0
0.1
100
3
51
33.333333
0.777778
0.33
0
0
0
0
0.075758
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
b033f9baa755266488d6325fe33ece256b29d310
21
py
Python
jupyterlabpymolpysnips/Help/helpDocs.py
MooersLab/pymolpysnips
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
[ "MIT" ]
null
null
null
jupyterlabpymolpysnips/Help/helpDocs.py
MooersLab/pymolpysnips
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
[ "MIT" ]
null
null
null
jupyterlabpymolpysnips/Help/helpDocs.py
MooersLab/pymolpysnips
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
[ "MIT" ]
null
null
null
cmd.do('help(help)')
10.5
20
0.619048
4
21
3.25
0.75
0
0
0
0
0
0
0
0
0
0
0
0.047619
21
1
21
21
0.65
0
0
0
0
0
0.47619
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
b056711b954bf645df425e92a1d1ec13e9ac3abf
4,910
py
Python
pyvolt/models/permissions.py
Gael-devv/Pyvolt
1d84ba95f1fd3f959a933051c25f8a3e60500c5d
[ "MIT" ]
null
null
null
pyvolt/models/permissions.py
Gael-devv/Pyvolt
1d84ba95f1fd3f959a933051c25f8a3e60500c5d
[ "MIT" ]
null
null
null
pyvolt/models/permissions.py
Gael-devv/Pyvolt
1d84ba95f1fd3f959a933051c25f8a3e60500c5d
[ "MIT" ]
null
null
null
from __future__ import annotations from .flags import BaseFlags, flag_value, fill_with_flags __all__ = ( "ChannelPermissions", "ServerPermissions" ) # Channel permissions # # View = 0b00000000000000000000000000000001 // 1 # SendMessage = 0b00000000000000000000000000000010 // 2 # ManageMessages = 0b00000000000000000000000000000100 // 4 # ManageChannel = 0b00000000000000000000000000001000 // 8 # VoiceCall = 0b00000000000000000000000000010000 // 16 # InviteOthers = 0b00000000000000000000000000100000 // 32 # EmbedLinks = 0b00000000000000000000000001000000 // 64 # UploadFiles = 0b00000000000000000000000010000000 // 128 # Server permissions # # View = 0b00000000000000000000000000000001 // 1 # ManageRoles = 0b00000000000000000000000000000010 // 2 # ManageChannels = 0b00000000000000000000000000000100 // 4 # ManageServer = 0b00000000000000000000000000001000 // 8 # KickMembers = 0b00000000000000000000000000010000 // 16 # BanMembers = 0b00000000000000000000000000100000 // 32 # ChangeNickname = 0b00000000000000000001000000000000 // 4096 # ManageNicknames = 0b00000000000000000010000000000000 // 8192 # ChangeAvatar = 0b00000000000000000100000000000000 // 16382 # RemoveAvatars = 0b00000000000000001000000000000000 // 32768 @fill_with_flags() class ChannelPermissions(BaseFlags): """Represents the channel permissions for a role as seen in channel settings.""" __slots__ = () def __init__(self, permissions: int = 0, **kwargs: bool): if not isinstance(permissions, int): raise TypeError(f"Expected int parameter, received {permissions.__class__.__name__} instead.") self.value = permissions for key, value in kwargs.items(): if key not in self.VALID_FLAGS: raise TypeError(f"{key!r} is not a valid permission name.") setattr(self, key, value) @classmethod def none(cls) -> ChannelPermissions: """A factory method that creates a :class:`ChannelPermissions` with all permissions set to ``False``. """ return cls(0) @classmethod def all(cls) -> ChannelPermissions: """A factory method that creates a :class:`ChannelPermissions` with all permissions set to ``True``. """ return cls(0b11111011) @classmethod def text(cls) -> ChannelPermissions: """A factory method that creates a :class:`ChannelPermissions` with all "Text" channel permissions from the Revolt api set to ``True``. """ return cls(0b11000011) @flag_value def view_channel(self) -> int: return 1 << 0 @flag_value def send_message(self) -> int: return 1 << 1 @flag_value def manage_messages(self) -> int: return 1 << 2 @flag_value def manage_channel(self) -> int: return 1 << 3 @flag_value def connect(self) -> int: return 1 << 4 @flag_value def invite_others(self) -> int: return 1 << 5 @flag_value def embed_links(self) -> int: return 1 << 6 @flag_value def upload_files(self) -> int: return 1 << 7 @fill_with_flags() class ServerPermissions(BaseFlags): """Represents the server permissions for a role as seen in server settings.""" __slots__ = () def __init__(self, permissions: int = 0, **kwargs: bool): if not isinstance(permissions, int): raise TypeError(f"Expected int parameter, received {permissions.__class__.__name__} instead.") self.value = permissions for key, value in kwargs.items(): if key not in self.VALID_FLAGS: raise TypeError(f"{key!r} is not a valid permission name.") setattr(self, key, value) @classmethod def none(cls) -> ServerPermissions: return cls(0) @classmethod def all(cls) -> ServerPermissions: return cls(0b1111000000111111) @classmethod def moderator(cls) -> ServerPermissions: return cls(0b1111000000101111) @flag_value def view_server(self) -> int: return 1 << 0 @flag_value def manage_roles(self) -> int: return 1 << 1 @flag_value def manage_channels(self) -> int: return 1 << 2 @flag_value def manage_server(self) -> int: return 1 << 3 @flag_value def kick_members(self) -> int: return 1 << 4 @flag_value def ban_members(self) -> int: return 1 << 5 @flag_value def change_nicknames(self) -> int: return 1 << 12 @flag_value def manage_nicknames(self) -> int: return 1 << 13 @flag_value def change_avatar(self) -> int: return 1 << 14 @flag_value def remove_avatars(self) -> int: return 1 << 15
27.740113
106
0.639104
497
4,910
6.144869
0.285714
0.055992
0.070727
0.082515
0.470203
0.431893
0.431893
0.395547
0.32482
0.281598
0
0.203291
0.269654
4,910
176
107
27.897727
0.648355
0.327902
0
0.58
0
0
0.081512
0.019988
0
0
0
0
0
1
0.26
false
0
0.02
0.21
0.56
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
b062493d7e021cec3d76931477f2fcf5762473c7
395
py
Python
python/8kyu/fundamentails_return.py
Sigmanificient/codewars
b34df4bf55460d312b7ddf121b46a707b549387a
[ "MIT" ]
3
2021-06-08T01:57:13.000Z
2021-06-26T10:52:47.000Z
python/8kyu/fundamentails_return.py
Sigmanificient/codewars
b34df4bf55460d312b7ddf121b46a707b549387a
[ "MIT" ]
null
null
null
python/8kyu/fundamentails_return.py
Sigmanificient/codewars
b34df4bf55460d312b7ddf121b46a707b549387a
[ "MIT" ]
2
2021-06-10T21:20:13.000Z
2021-06-30T10:13:26.000Z
"""Kata url: https://www.codewars.com/kata/55a5befdf16499bffb00007b.""" def add(a: int, b: int) -> int: return a + b def multiply(a: int, b: int) -> int: return a * b def divide(a: int, b: int) -> float: return a / b def mod(a: int, b: int) -> int: return a % b def exponent(a: int, b: int) -> int: return a ** b def subt(a: int, b: int) -> int: return a - b
15.192308
71
0.556962
68
395
3.235294
0.279412
0.109091
0.136364
0.218182
0.486364
0.486364
0.486364
0.486364
0.4
0
0
0.044828
0.265823
395
25
72
15.8
0.713793
0.164557
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4