hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
e847b9863c33aeff3540db859decc8eba52b32b2
14
py
Python
try.py
yenwel/DAT208x
83113394bc04214763ba6f7a39f26285628d389c
[ "Unlicense" ]
null
null
null
try.py
yenwel/DAT208x
83113394bc04214763ba6f7a39f26285628d389c
[ "Unlicense" ]
null
null
null
try.py
yenwel/DAT208x
83113394bc04214763ba6f7a39f26285628d389c
[ "Unlicense" ]
null
null
null
print( 4 + 5 )
14
14
0.5
3
14
2.333333
1
0
0
0
0
0
0
0
0
0
0
0.2
0.285714
14
1
14
14
0.5
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
e859fe02fc865d6a6542284e6fe490acb6dc5cb5
97
py
Python
code/dk-iris-pipeline/airflow_home/src/explore/__init__.py
dushyantkhosla/airflow4ds
b5ae213f7169c54d31f4eca58d235ec6b09fd56f
[ "MIT" ]
13
2019-01-15T14:42:58.000Z
2021-11-10T07:29:36.000Z
code/dk-iris-pipeline/airflow_home/src/explore/__init__.py
dushyantkhosla/airflow4ds
b5ae213f7169c54d31f4eca58d235ec6b09fd56f
[ "MIT" ]
1
2018-10-03T12:52:56.000Z
2018-10-03T12:52:56.000Z
code/dk-iris-pipeline/airflow_home/src/explore/__init__.py
dushyantkhosla/airflow4ds
b5ae213f7169c54d31f4eca58d235ec6b09fd56f
[ "MIT" ]
3
2020-04-21T14:55:39.000Z
2021-04-29T15:42:00.000Z
from .make_plots import class_imbalance, variance_by_group, anova_results from .run import anova
32.333333
73
0.85567
15
97
5.2
0.8
0
0
0
0
0
0
0
0
0
0
0
0.103093
97
2
74
48.5
0.896552
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e889f1be64a1396ce2a2ac855aa3ad3bd640114c
4,464
py
Python
trajectories.py
itrosen/hall-solver
70ca5364b6c16bf62b7faa69ac30d14f972d7320
[ "MIT" ]
null
null
null
trajectories.py
itrosen/hall-solver
70ca5364b6c16bf62b7faa69ac30d14f972d7320
[ "MIT" ]
null
null
null
trajectories.py
itrosen/hall-solver
70ca5364b6c16bf62b7faa69ac30d14f972d7320
[ "MIT" ]
null
null
null
""" Created on Dec 16 2021 @author: Ilan Rosen Trajectories in (s_xy, s_xx) space functions output lambda = s_xy / s_xx all conductivities in units e2/h resistivities in units ohms/sq RG flow semicircle trajectory: sxx ** 2 + (sxy - 0.5) ** 2 = 0.25 """ import numpy as np def lambda_sxy_const(lower = 100, upper = 5000, n = 10): ''' trajectory along sxy = 1 Args: lower : first value of p_xx upper : final value of p_xx n : number of points Returns: lmbda : lambda along trajectory ''' pxx = np.linspace(lower, upper, n)/25812 sxx = pxx / ( pxx**2 + 1 ) sxy = 1 / ( pxx**2 + 1 ) return sxy / sxx def pxx_sxy_const(lower = 100, upper = 5000, n = 10): ''' trajectory along sxy = 1 Args: lower : first value of p_xx upper : final value of p_xx n : number of points Returns: pxx : pxx along trajectory ''' pxx = np.linspace(lower, upper, n) return pxx def semicircle_x2y(x): ''' converts sxx to sxy along RG flow trajectory Args: x : sxx. Must be between 0 and 0.5 Returns: y : sxy ''' if np.any(x > 0.5) or np.any(x < 0): raise Exception ('x out of bounds') return np.sqrt(0.25 - x**2) + 0.5 def semicircle_y2x(y): ''' converts sxx to sxy along RG flow trajectory Args: x : sxx Returns: y : sxy ''' return np.sqrt(0.25 - (y - 0.5)**2) def lambda_semicircle(lower = 100, upper = 5000, n = 10): ''' trajectory along RG semicircle Args: lower : first value of p_xx upper : final value of p_xx n : number of points Returns: lambda : sxy/sxx along trajectory ''' if lower < 0.1 or lower > 10000: raise Exception ('lower limit out of bounds') if upper < 0.1 or upper > 10000: raise Exception ('upper limit out of bounds') if lower >= upper: raise Exception ('lower limit exceeds upper limit') # estimate sxx lower and sxx final sxx_lower = 25812 * lower / (lower**2 + 25812**2) sxx_upper = 25812 * upper / (upper**2 + 25812**2) # make sxx linearly spaced sxx = np.linspace(sxx_lower, sxx_upper, n) # solve for sxy sxy = semicircle_x2y(sxx) return sxy / sxx def sxx_semicircle(lower = 100, upper = 5000, n = 10): ''' trajectory along RG semicircle Args: lower : first value of p_xx upper : final value of p_xx n : number of points Returns: lambda : sxy/sxx along trajectory ''' if lower < 0.1 or lower > 10000: raise Exception ('lower limit out of bounds') if upper < 0.1 or upper > 10000: raise Exception ('upper limit out of bounds') if lower >= upper: raise Exception ('lower limit exceeds upper limit') # estimate sxx lower and sxx final sxx_lower = 25812 * lower / (lower**2 + 25812**2) sxx_upper = 25812 * upper / (upper**2 + 25812**2) # make sxx linearly spaced sxx = np.linspace(sxx_lower, sxx_upper, n) # solve for sxy sxy = semicircle_x2y(sxx) return sxx def sxy_semicircle(lower = 100, upper = 5000, n = 10): ''' trajectory along RG semicircle Args: lower : first value of p_xx upper : final value of p_xx n : number of points Returns: lambda : sxy/sxx along trajectory ''' if lower < 0.1 or lower > 10000: raise Exception ('lower limit out of bounds') if upper < 0.1 or upper > 10000: raise Exception ('upper limit out of bounds') if lower >= upper: raise Exception ('lower limit exceeds upper limit') # estimate sxx lower and sxx final sxx_lower = 25812 * lower / (lower**2 + 25812**2) sxx_upper = 25812 * upper / (upper**2 + 25812**2) # make sxx linearly spaced sxx = np.linspace(sxx_lower, sxx_upper, n) # solve for sxy sxy = semicircle_x2y(sxx) return sxy def pxx_semicircle(lower = 100, upper = 5000, n = 10): ''' trajectory along RG semicircle Args: lower : first value of p_xx upper : final value of p_xx n : number of points Returns: pxx : pxx along trajectory ''' if lower < 0.1 or lower > 10000: raise Exception ('lower limit out of bounds') if upper < 0.1 or upper > 10000: raise Exception ('upper limit out of bounds') if lower >= upper: raise Exception ('lower limit exceeds upper limit') # estimate sxx lower and sxx final sxx_lower = (lower / 25812) / (1 + (lower/25812)**2) sxx_upper = (upper / 25812) / (0.95 + (upper/25812)**2) # this is super crude but whatever # make sxx linearly spaced sxx = np.linspace(sxx_lower, sxx_upper, n) # solve for sxy sxy = semicircle_x2y(sxx) # convert to resistance units pxx = 25812 * sxx / (sxx**2 + sxy**2) return pxx def lambda2pxx(lmbda): return 25812 / lmbda
23.129534
91
0.664427
719
4,464
4.061196
0.139082
0.062329
0.032877
0.041096
0.79726
0.786986
0.786986
0.786986
0.765411
0.765411
0
0.078505
0.226703
4,464
193
92
23.129534
0.767381
0.421371
0
0.692308
0
0
0.129439
0
0
0
0
0
0
1
0.115385
false
0
0.012821
0.012821
0.24359
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e8c5a40f8fbef46746fdeb34b7ebffda3519454a
904
py
Python
politicians/migrations/0003_auto_20191230_1715.py
zinaukarenku/zkr-platform
8daf7d1206c482f1f8e0bcd54d4fde783e568774
[ "Apache-2.0" ]
2
2018-11-16T21:45:17.000Z
2019-02-03T19:55:46.000Z
politicians/migrations/0003_auto_20191230_1715.py
zinaukarenku/zkr-platform
8daf7d1206c482f1f8e0bcd54d4fde783e568774
[ "Apache-2.0" ]
13
2018-08-17T19:12:11.000Z
2022-03-11T23:27:41.000Z
politicians/migrations/0003_auto_20191230_1715.py
zinaukarenku/zkr-platform
8daf7d1206c482f1f8e0bcd54d4fde783e568774
[ "Apache-2.0" ]
null
null
null
# Generated by Django 2.2.9 on 2019-12-30 17:15 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('politicians', '0002_remove_politicians_slug'), ] operations = [ migrations.RemoveField( model_name='politicians', name='created_by', ), migrations.RemoveField( model_name='politicians', name='updated_by', ), migrations.RemoveField( model_name='promiseaction', name='created_by', ), migrations.RemoveField( model_name='promiseaction', name='updated_by', ), migrations.RemoveField( model_name='promises', name='created_by', ), migrations.RemoveField( model_name='promises', name='updated_by', ), ]
23.789474
56
0.542035
76
904
6.25
0.394737
0.265263
0.328421
0.378947
0.658947
0.658947
0.549474
0
0
0
0
0.032368
0.350664
904
37
57
24.432432
0.776831
0.049779
0
0.774194
1
0
0.190198
0.032672
0
0
0
0
0
1
0
false
0
0.032258
0
0.129032
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e8cd1c9f65ef95738e3b8625cd9fe883c4cb125d
754
py
Python
tests/packages/CommandCasing/plugin.py
Thom1729/st_package_reviewer
71e4eaad60fe3391b0a4d39405b784ec84ea58bc
[ "MIT" ]
8
2017-06-07T07:52:32.000Z
2021-04-26T23:46:36.000Z
tests/packages/CommandCasing/plugin.py
Thom1729/st_package_reviewer
71e4eaad60fe3391b0a4d39405b784ec84ea58bc
[ "MIT" ]
26
2017-05-29T21:11:10.000Z
2021-05-16T20:58:23.000Z
tests/packages/CommandCasing/plugin.py
Thom1729/st_package_reviewer
71e4eaad60fe3391b0a4d39405b784ec84ea58bc
[ "MIT" ]
8
2017-05-31T21:16:49.000Z
2021-03-20T16:43:26.000Z
from sublime_plugin import ApplicationCommand # The print calls show what sublime_plugin.Command.name # translates the class names to. class not_pascal_case_command(ApplicationCommand): def run(self): print("not_pascal_case") class not_PascalCaseCommand(ApplicationCommand): def run(self): print("not__pascal_case") class not_PASCALcase_command(ApplicationCommand): def run(self): print("not__pASCALcase") class NotPascalCase_command(ApplicationCommand): def run(self): print("not_pascal_case") class NotPASCALCaseCommand(ApplicationCommand): def run(self): print("not_pASCALCase") class PascalCaseCommand(ApplicationCommand): def run(self): print("pascal_case")
18.390244
55
0.730769
83
754
6.39759
0.313253
0.237288
0.271186
0.316384
0.65725
0.65725
0.531073
0.517891
0.3258
0.3258
0
0
0.183024
754
40
56
18.85
0.862013
0.111406
0
0.421053
0
0
0.128936
0
0
0
0
0
0
1
0.315789
false
0
0.052632
0
0.684211
0.315789
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
fa635e15ee478fc1a7823a2191b4b2ec52f1308a
99
py
Python
enthought/enable/tools/viewport_pan_tool.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
3
2016-12-09T06:05:18.000Z
2018-03-01T13:00:29.000Z
enthought/enable/tools/viewport_pan_tool.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
1
2020-12-02T00:51:32.000Z
2020-12-02T08:48:55.000Z
enthought/enable/tools/viewport_pan_tool.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
null
null
null
# proxy module from __future__ import absolute_import from enable.tools.viewport_pan_tool import *
24.75
44
0.848485
14
99
5.5
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.111111
99
3
45
33
0.875
0.121212
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
fa63ce8a99a272002e6d6e7c564021a228ac7d22
90
py
Python
sicp/sicp/__init__.py
akshitdewan/cs61a-apps
155f2afe98b238fb4b1c4ca1c79610ec55e826e6
[ "MIT" ]
5
2020-09-10T01:45:09.000Z
2022-01-10T23:24:03.000Z
sicp/sicp/__init__.py
akshitdewan/cs61a-apps
155f2afe98b238fb4b1c4ca1c79610ec55e826e6
[ "MIT" ]
424
2020-08-24T06:22:59.000Z
2021-10-10T02:36:11.000Z
sicp/sicp/__init__.py
akshitdewan/cs61a-apps
155f2afe98b238fb4b1c4ca1c79610ec55e826e6
[ "MIT" ]
7
2020-08-28T22:05:10.000Z
2022-03-04T12:47:05.000Z
import sys import pathlib sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
18
62
0.788889
13
90
5.153846
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.066667
90
4
63
22.5
0.797619
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
fa6ed702fdff872ae94fc94199f30c88237532b5
56
py
Python
models/UploadRowModel/__init__.py
Marti2203/DBL_HTI
4b1d9176d31880159d496dbb2393ad6fb55f03d3
[ "MIT" ]
null
null
null
models/UploadRowModel/__init__.py
Marti2203/DBL_HTI
4b1d9176d31880159d496dbb2393ad6fb55f03d3
[ "MIT" ]
null
null
null
models/UploadRowModel/__init__.py
Marti2203/DBL_HTI
4b1d9176d31880159d496dbb2393ad6fb55f03d3
[ "MIT" ]
null
null
null
from .UploadRow import generate_model,generate_relations
56
56
0.910714
7
56
7
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.053571
56
1
56
56
0.924528
0
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
3afa1bb2be25bc6149fb6b0b67b5e8671bbcbd00
98
py
Python
libs/core/cms/api/admin.py
myog-io/WebDjangular
73d3c40aa449eec5acc59d4493ee94059bddabbd
[ "MIT" ]
1
2018-09-14T15:17:19.000Z
2018-09-14T15:17:19.000Z
libs/core/cms/api/admin.py
MyOwnGamesLLC/WebDjangular
73d3c40aa449eec5acc59d4493ee94059bddabbd
[ "MIT" ]
41
2018-12-16T16:58:54.000Z
2019-02-22T20:08:58.000Z
libs/core/cms/api/admin.py
MyOwnGamesLLC/WebDjangular
73d3c40aa449eec5acc59d4493ee94059bddabbd
[ "MIT" ]
1
2018-10-02T16:45:46.000Z
2018-10-02T16:45:46.000Z
from django.contrib import admin from .models.Page import Page admin.site.register(Page)
14
33
0.744898
14
98
5.214286
0.642857
0
0
0
0
0
0
0
0
0
0
0
0.183673
98
6
34
16.333333
0.9125
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3afbbe732d4f4ad927a25c643755212500ee7862
59
py
Python
corehq/messaging/smsbackends/turn/exceptions.py
roboton/commcare-hq
3ccbe59508d98dd1963ca87cf249dd2df8af8ecc
[ "BSD-3-Clause" ]
null
null
null
corehq/messaging/smsbackends/turn/exceptions.py
roboton/commcare-hq
3ccbe59508d98dd1963ca87cf249dd2df8af8ecc
[ "BSD-3-Clause" ]
1
2021-06-02T04:45:16.000Z
2021-06-02T04:45:16.000Z
corehq/messaging/smsbackends/turn/exceptions.py
roboton/commcare-hq
3ccbe59508d98dd1963ca87cf249dd2df8af8ecc
[ "BSD-3-Clause" ]
null
null
null
class WhatsAppTemplateStringException(Exception): pass
19.666667
49
0.830508
4
59
12.25
1
0
0
0
0
0
0
0
0
0
0
0
0.118644
59
2
50
29.5
0.942308
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
d73148fdbe47267bda45b775047853c8e70bfdfe
32
py
Python
simulator/__init__.py
lucassm02/fiap-cptm
cd2d5d7ce0390f2d5a745ca8d1cc96dc7d6f3351
[ "MIT" ]
null
null
null
simulator/__init__.py
lucassm02/fiap-cptm
cd2d5d7ce0390f2d5a745ca8d1cc96dc7d6f3351
[ "MIT" ]
null
null
null
simulator/__init__.py
lucassm02/fiap-cptm
cd2d5d7ce0390f2d5a745ca8d1cc96dc7d6f3351
[ "MIT" ]
null
null
null
from .__main__ import Simulator
16
31
0.84375
4
32
5.75
1
0
0
0
0
0
0
0
0
0
0
0
0.125
32
1
32
32
0.821429
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
d733b1a882b1851ddaff82470b2e7c72c87c36be
265
py
Python
openamundsen/errors/__init__.py
openamundsen/openamundsen
2ac09eb34b0c72c84c421a0dac08d114a05b7b1c
[ "MIT" ]
3
2021-05-28T06:46:36.000Z
2021-06-14T13:39:25.000Z
openamundsen/errors/__init__.py
openamundsen/openamundsen
2ac09eb34b0c72c84c421a0dac08d114a05b7b1c
[ "MIT" ]
22
2021-04-28T12:31:58.000Z
2022-03-09T18:29:12.000Z
openamundsen/errors/__init__.py
openamundsen/openamundsen
2ac09eb34b0c72c84c421a0dac08d114a05b7b1c
[ "MIT" ]
1
2021-06-01T12:48:54.000Z
2021-06-01T12:48:54.000Z
class YamlReaderError(Exception): pass class ConfigurationError(Exception): pass class RasterFileError(Exception): pass class MeteoDataError(Exception): pass class CategoryError(Exception): pass class RuntimeError(Exception): pass
11.521739
36
0.735849
24
265
8.125
0.375
0.4
0.461538
0
0
0
0
0
0
0
0
0
0.196226
265
22
37
12.045455
0.915493
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
d74d80538ae8728fdeac198134a5e69365499422
108
py
Python
scratch_bot/scratch_bot.py
fo432/good
4f9f6f10ac9b8cf887616cf96c902d18acc381eb
[ "MIT" ]
13
2019-05-25T20:25:51.000Z
2022-03-19T13:36:23.000Z
scratch_bot/scratch_bot.py
fo432/good
4f9f6f10ac9b8cf887616cf96c902d18acc381eb
[ "MIT" ]
53
2019-06-07T13:31:59.000Z
2022-03-28T22:53:47.000Z
scratch_bot/scratch_bot.py
fo432/good
4f9f6f10ac9b8cf887616cf96c902d18acc381eb
[ "MIT" ]
78
2019-06-30T08:42:13.000Z
2022-03-23T20:11:42.000Z
from rlbot.agents.base_scratch_agent import BaseScratchAgent class ScratchBot(BaseScratchAgent): pass
18
60
0.833333
12
108
7.333333
0.916667
0
0
0
0
0
0
0
0
0
0
0
0.12037
108
5
61
21.6
0.926316
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
5
ad0c4ff78110d0ac25fb08990a47e6423dbce13b
58
py
Python
test/entrypoint.py
LaudateCorpus1/tf-super-resolution
440aa39fd93a0a5daabd29e9c95c9e0b766c709a
[ "Apache-2.0" ]
6
2019-12-02T17:15:52.000Z
2022-01-02T15:49:13.000Z
test/entrypoint.py
deepai-org/tf-super-resolution
440aa39fd93a0a5daabd29e9c95c9e0b766c709a
[ "Apache-2.0" ]
1
2022-02-24T02:28:46.000Z
2022-02-24T02:28:46.000Z
test/entrypoint.py
LaudateCorpus1/tf-super-resolution
440aa39fd93a0a5daabd29e9c95c9e0b766c709a
[ "Apache-2.0" ]
1
2022-02-24T02:28:36.000Z
2022-02-24T02:28:36.000Z
from inference import initialize_model initialize_model()
19.333333
38
0.87931
7
58
7
0.714286
0.612245
0
0
0
0
0
0
0
0
0
0
0.086207
58
3
39
19.333333
0.924528
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
ad1817e900f023886b2940b85336b358d51a60c7
190
py
Python
CPMel(2)/__init__.py
2921251087/CPMel-1
09afe43052f646e9e9bbd5c876614db211b1c42b
[ "Apache-2.0" ]
1
2021-08-01T12:49:50.000Z
2021-08-01T12:49:50.000Z
CPMel(3)/__init__.py
2921251087/CPMel-1
09afe43052f646e9e9bbd5c876614db211b1c42b
[ "Apache-2.0" ]
null
null
null
CPMel(3)/__init__.py
2921251087/CPMel-1
09afe43052f646e9e9bbd5c876614db211b1c42b
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python #-*-coding:gbk -*- import tool import createNodeMod from pymel.core import * from outputMaya import * def Update(): reload ( createNodeMod ) reload ( tool )
21.111111
29
0.668421
22
190
5.772727
0.681818
0
0
0
0
0
0
0
0
0
0
0
0.210526
190
9
30
21.111111
0.846667
0.173684
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
true
0
0.571429
0
0.714286
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
ad2f2a6cfcb435cedc8c8937832d201960fc5a94
194
py
Python
src/outCards.py
Danni4real/Poker-Python
42429218e7d281894c5e6a2a7ee4c9219ee9c9a3
[ "MIT" ]
null
null
null
src/outCards.py
Danni4real/Poker-Python
42429218e7d281894c5e6a2a7ee4c9219ee9c9a3
[ "MIT" ]
null
null
null
src/outCards.py
Danni4real/Poker-Python
42429218e7d281894c5e6a2a7ee4c9219ee9c9a3
[ "MIT" ]
null
null
null
import cards class OutCards(Cards): def setPattern(self): pass def __lt__(self, otherOutCards): pass def __gt__(self, otherOutCards): pass
12.125
36
0.56701
19
194
5.368421
0.578947
0.137255
0.411765
0
0
0
0
0
0
0
0
0
0.360825
194
15
37
12.933333
0.822581
0
0
0.375
0
0
0
0
0
0
0
0
0
1
0.375
false
0.375
0.125
0
0.625
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
ad794e01f1478844104cfe638e7ed6e7d4099dae
5,494
py
Python
app/tests/v1/test_meetup_records.py
SolomonMacharia/Questioner
b81499e3375d447e91029da144ab704587b8e963
[ "MIT" ]
1
2019-03-01T11:00:40.000Z
2019-03-01T11:00:40.000Z
app/tests/v1/test_meetup_records.py
SolomonMacharia/Questioner
b81499e3375d447e91029da144ab704587b8e963
[ "MIT" ]
25
2019-01-04T11:40:06.000Z
2019-01-18T13:43:23.000Z
app/tests/v1/test_meetup_records.py
SolomonMacharia/Questioner
b81499e3375d447e91029da144ab704587b8e963
[ "MIT" ]
1
2019-01-10T20:36:36.000Z
2019-01-10T20:36:36.000Z
import os from datetime import datetime import pytest import unittest import json from app import create_app from app.api.v1.models.meetup_record_models import MeetupRecord from ...api.v1.views.meetup_record_views import meetup class TestQuestionModels(unittest.TestCase): def setUp(self): self.app = create_app() self.client = self.app.test_client() self.meetup = { "meetupId": "meetupId", "topic": "topic", "location": "The location", "images": "images", "tags": "tags" } def tearDown(self): del meetup.all_meetup_records[:] def test_api_can_create_a_meetup_record(self): res = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup), content_type='application/json') self.assertEqual(res.status_code, 201) self.assertIn("The location", str(self.meetup)) self.assertIn("images", str(self.meetup)) def test_api_cannot_post_data_without_images(self): payload = {'location': 'location', 'tags': 'tags', 'topic': 'topic'} response = self.client.post('/api/v1/meetups', data=json.dumps(payload), content_type='application/json') self.assertEqual(response.status_code, 400) result = json.loads(response.data) self.assertEqual(result['errors'], ["images required"]) def test_api_cannot_post_data_without_location(self): payload = {'images': 'images', 'tags': 'tags', 'topic': 'topic'} response = self.client.post('/api/v1/meetups', data=json.dumps(payload), content_type='application/json') self.assertEqual(response.status_code, 400) result = json.loads(response.data) self.assertEqual(result['errors'], ["location required"]) def test_api_cannot_post_data_without_tags(self): payload = {'images': 'images', 'location': 'location', 'topic': 'topic'} response = self.client.post('/api/v1/meetups', data=json.dumps(payload), content_type='application/json') self.assertEqual(response.status_code, 400) result = json.loads(response.data) self.assertEqual(result['errors'], ["tags required"]) def test_api_cannot_post_data_without_topic(self): payload = {'images': 'images', 'location': 'location', 'tags': 'tags'} response = self.client.post('/api/v1/meetups', data=json.dumps(payload), content_type='application/json') self.assertEqual(response.status_code, 400) result = json.loads(response.data) self.assertEqual(result['errors'], ["topic required"]) def test_api_cannot_post_data_without_image_data(self): payload = {'images': '', 'topic': 'topic', 'location': 'location', 'tags': 'tags'} response = self.client.post('/api/v1/meetups', data=json.dumps(payload), content_type='application/json') self.assertEqual(response.status_code, 400) result = json.loads(response.data) self.assertEqual(result['errors'], ["images cannot be empty"]) def test_api_cannot_post_data_without_topic_data(self): payload = {'images': 'images', 'topic': '', 'location': 'location', 'tags': 'tags'} response = self.client.post('/api/v1/meetups', data=json.dumps(payload), content_type='application/json') self.assertEqual(response.status_code, 400) result = json.loads(response.data) self.assertEqual(result['errors'], ["topic cannot be empty"]) def test_api_cannot_post_data_without_location_data(self): payload = {'images': 'images', 'topic': 'topic', 'location': '', 'tags': 'tags'} response = self.client.post('/api/v1/meetups', data=json.dumps(payload), content_type='application/json') self.assertEqual(response.status_code, 400) result = json.loads(response.data) self.assertEqual(result['errors'], ["location cannot be empty"]) def test_api_cannot_post_data_without_tags_data(self): payload = {'images': 'images', 'topic': 'topic', 'location': 'location', 'tags': ''} response = self.client.post('/api/v1/meetups', data=json.dumps(payload), content_type='application/json') self.assertEqual(response.status_code, 400) result = json.loads(response.data) self.assertEqual(result['errors'], ["tags cannot be empty"]) def test_api_can_fetch_all_meetup_records(self): res = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup), content_type='application/json') self.assertEqual(res.status_code, 201) res = self.client.get('/api/v1/meetups/upcoming', content_type='application/json') self.assertEqual(res.status_code, 200) self.assertIn("The location", str(self.meetup)) def test_api_can_fetch_single_meetup_record(self): res = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup), content_type='application/json') self.assertEqual(res.status_code, 201) res = self.client.get('/api/v1/meetups/upcoming/1', content_type='application/json') self.assertEqual(res.status_code, 200) def test_api_can_delete_record(self): res = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup), content_type='application/json') self.assertEqual(res.status_code, 201) response = self.client.delete('/api/v1/meetups/upcoming/1/delete', content_type='application/json') self.assertEqual(response.status_code, 404) if __name__ == '__main__': unittest.main()
49.945455
113
0.6751
678
5,494
5.292035
0.10767
0.096154
0.050167
0.108696
0.830546
0.819955
0.777035
0.748328
0.67252
0.656076
0
0.014106
0.17419
5,494
110
114
49.945455
0.776725
0
0
0.391304
0
0
0.200182
0.015105
0
0
0
0
0.282609
1
0.152174
false
0
0.086957
0
0.25
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
a8f284307bc49821dfcecac2e32445be49fff298
149
py
Python
division.py
aash-gates/aash-python-babysteps
cb88b02b0d33ac74acb183d4f11f6baad0ad3db9
[ "Unlicense" ]
7
2020-11-16T18:23:21.000Z
2021-12-18T14:08:54.000Z
division.py
00mjk/aash-python-babysteps
c52ffbc2690ea387eaad6639bb9764b9ee015bfd
[ "Unlicense" ]
null
null
null
division.py
00mjk/aash-python-babysteps
c52ffbc2690ea387eaad6639bb9764b9ee015bfd
[ "Unlicense" ]
1
2020-12-21T15:59:44.000Z
2020-12-21T15:59:44.000Z
''' practice qusestion from chapter 1 Module 5 of IBM Digital Nation Courses by Aashik J Krishnan/Aash Gates ''' print(14 // 3) #end-of-the program
18.625
72
0.738255
25
149
4.4
0.96
0
0
0
0
0
0
0
0
0
0
0.040323
0.167785
149
8
73
18.625
0.846774
0.825503
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
a8f7113b49815b299dc420620ae0743cbfdabc51
283
py
Python
pyiw/exceptions.py
zatevaxin/pyiw
0b13611d67b62ab7d3006f0c6b44e3c05790299b
[ "MIT" ]
null
null
null
pyiw/exceptions.py
zatevaxin/pyiw
0b13611d67b62ab7d3006f0c6b44e3c05790299b
[ "MIT" ]
null
null
null
pyiw/exceptions.py
zatevaxin/pyiw
0b13611d67b62ab7d3006f0c6b44e3c05790299b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- class DeviceBusyError(Exception): pass class BlockedByRfKillError(Exception): pass class IncorrectInterfaceError(Exception): pass class IncorrectInterfaceStateError(Exception): pass class IncorrectInterfaceNameError(Exception): pass
15.722222
46
0.75265
23
283
9.26087
0.478261
0.305164
0.338028
0
0
0
0
0
0
0
0
0.004219
0.162544
283
17
47
16.647059
0.894515
0.074205
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
d13e169cfe616dbe04db769cc91c151603823a93
107
py
Python
25/02/oct.py
pylangstudy/201707
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
[ "CC0-1.0" ]
null
null
null
25/02/oct.py
pylangstudy/201707
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
[ "CC0-1.0" ]
46
2017-06-30T22:19:07.000Z
2017-07-31T22:51:31.000Z
25/02/oct.py
pylangstudy/201707
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
[ "CC0-1.0" ]
null
null
null
#oct(x) for i in range(16): print(i, oct(i)) print() for i in range(16): print('{0:2d} {0:>4o}'.format(i))
21.4
53
0.588785
24
107
2.625
0.5
0.126984
0.190476
0.349206
0.571429
0.571429
0
0
0
0
0
0.086022
0.130841
107
4
54
26.75
0.591398
0.056075
0
0
0
0
0.14
0
0
0
0
0
0
1
0
false
0
0
0
0
1
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
d14fa5b4a9461c2217f57feea75e2a71cac23808
135
py
Python
setup.py
hans00/ThriftPy2-HTTPX-Client
e94944218915bcec6b2e0c00200f5d5e6f823053
[ "MIT" ]
1
2021-05-21T16:34:24.000Z
2021-05-21T16:34:24.000Z
setup.py
hans00/ThriftPy2-HTTPX-Client
e94944218915bcec6b2e0c00200f5d5e6f823053
[ "MIT" ]
5
2021-07-13T13:56:17.000Z
2022-03-02T02:43:46.000Z
setup.py
aiudirog/Aiuti
ef3aec72d8b42074f9f4b84b30631745b09bde89
[ "MIT" ]
2
2021-07-13T06:08:59.000Z
2022-03-16T22:15:57.000Z
import versioneer from setuptools import setup setup( version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), )
16.875
39
0.77037
15
135
6.8
0.533333
0.254902
0
0
0
0
0
0
0
0
0
0
0.140741
135
7
40
19.285714
0.87931
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
d16263231d677ef9daf4c1024b8c4bd824ee2671
609
py
Python
server/datawake/util/authentication/user.py
Sotera/Datawake
ae63058f74afb476ecc42a0d88c007049980db2e
[ "Apache-2.0" ]
21
2015-02-24T17:43:31.000Z
2021-04-04T12:11:01.000Z
server/datawake/util/authentication/user.py
Sotera/Datawake
ae63058f74afb476ecc42a0d88c007049980db2e
[ "Apache-2.0" ]
16
2015-01-07T17:56:24.000Z
2015-10-06T12:49:43.000Z
server/datawake/util/authentication/user.py
Sotera/Datawake
ae63058f74afb476ecc42a0d88c007049980db2e
[ "Apache-2.0" ]
7
2015-01-29T21:03:19.000Z
2017-09-03T10:24:19.000Z
class User(object): def __init__(self, user_name, user_id, email): self.user_name = user_name self.user_id = user_id self.email = email self.teams = [] def get_user_name(self): return self.user_name def get_user_id(self): return self.user_id def get_teams(self): return self.teams # TODO, remove this method and add support for multiple teams for each user. def get_org(self): return self.teams[0]['name'] def get_email(self): return self.email def set_teams(self, teams): self.teams = teams
24.36
80
0.622332
88
609
4.079545
0.284091
0.111421
0.194986
0.089136
0
0
0
0
0
0
0
0.002309
0.288998
609
25
81
24.36
0.82679
0.121511
0
0
0
0
0.007491
0
0
0
0
0.04
0
1
0.388889
false
0
0
0.277778
0.722222
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
1
1
0
0
5
d168f97baeee5b4e7b53fba525a6da8409eee752
199
py
Python
src/coming_soon/admin.py
tsotetsi/django-coming-soon
292eaa2df328d0c810389487c9206978829bcfd8
[ "MIT" ]
null
null
null
src/coming_soon/admin.py
tsotetsi/django-coming-soon
292eaa2df328d0c810389487c9206978829bcfd8
[ "MIT" ]
null
null
null
src/coming_soon/admin.py
tsotetsi/django-coming-soon
292eaa2df328d0c810389487c9206978829bcfd8
[ "MIT" ]
null
null
null
from django.contrib import admin from coming_soon.models import PrelaunchSignUp class PrelaunchSignUpAdmin(admin.ModelAdmin): pass admin.site.register(PrelaunchSignUp, PrelaunchSignUpAdmin)
18.090909
58
0.834171
21
199
7.857143
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.110553
199
10
59
19.9
0.932203
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.2
0.4
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
5
66fd7f055a736a316cc8ae0078d1162ed09fede2
90
py
Python
aoc2015/day25/day25_part2.py
GetPastTheMonkey/advent-of-code
db80be6d87baba4d5315cc69276905c55762da86
[ "MIT" ]
1
2019-09-15T16:37:24.000Z
2019-09-15T16:37:24.000Z
aoc2015/day25/day25_part2.py
GetPastTheMonkey/advent-of-code
db80be6d87baba4d5315cc69276905c55762da86
[ "MIT" ]
null
null
null
aoc2015/day25/day25_part2.py
GetPastTheMonkey/advent-of-code
db80be6d87baba4d5315cc69276905c55762da86
[ "MIT" ]
null
null
null
print("Merry christmas! You found the 50th star at the bottom of the weather machine :)")
45
89
0.755556
15
90
4.533333
0.866667
0
0
0
0
0
0
0
0
0
0
0.026667
0.166667
90
1
90
90
0.88
0
0
0
0
0
0.888889
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
0f1d24e169d5553a08017717f2dfd012bc94d5a0
128
wsgi
Python
PyQuM/ver(1.0)/pyqum/web.wsgi
takehuge/PYQUM
bfc9d9b1c2f4246c7aac3a371baaf587c99f8069
[ "MIT" ]
null
null
null
PyQuM/ver(1.0)/pyqum/web.wsgi
takehuge/PYQUM
bfc9d9b1c2f4246c7aac3a371baaf587c99f8069
[ "MIT" ]
null
null
null
PyQuM/ver(1.0)/pyqum/web.wsgi
takehuge/PYQUM
bfc9d9b1c2f4246c7aac3a371baaf587c99f8069
[ "MIT" ]
null
null
null
import sys sys.path.insert(0, 'C:/Users/ASQUM/Documents/GitHub/PYQUM/TEST/FACE') from pyqum.apache import app as application
25.6
69
0.773438
21
128
4.714286
0.857143
0
0
0
0
0
0
0
0
0
0
0.008696
0.101563
128
5
70
25.6
0.852174
0
0
0
0
0
0.364341
0.364341
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0f21ecf1117ccf4150413b98fbd369d5782f1207
505
py
Python
Curso de Python 3 - Mundo 1 - Curso em Video/ex009.py
HebertFB/Curso-de-Python-Mundo-1-Curso-em-Video
885e3efe4227dd610a70f13b2260612249b48865
[ "MIT" ]
null
null
null
Curso de Python 3 - Mundo 1 - Curso em Video/ex009.py
HebertFB/Curso-de-Python-Mundo-1-Curso-em-Video
885e3efe4227dd610a70f13b2260612249b48865
[ "MIT" ]
null
null
null
Curso de Python 3 - Mundo 1 - Curso em Video/ex009.py
HebertFB/Curso-de-Python-Mundo-1-Curso-em-Video
885e3efe4227dd610a70f13b2260612249b48865
[ "MIT" ]
null
null
null
"""Faça um programa que leia um número Inteiro qualquer e mostre na tela a sua tabuada""" num = int(input('Informe um número: ')) print(f'=' * 11) print(f'{num} X {1:2} = {num*1}') print(f'{num} X {2:2} = {num*2}') print(f'{num} X {3:2} = {num*3}') print(f'{num} X {4:2} = {num*4}') print(f'{num} X {5:2} = {num*5}') print(f'{num} X {6:2} = {num*6}') print(f'{num} X {7:2} = {num*7}') print(f'{num} X {8:2} = {num*8}') print(f'{num} X {9:2} = {num*9}') print(f'{num} X {10} = {num*10}') print(f'=' * 11)
31.5625
89
0.536634
107
505
2.53271
0.308411
0.265683
0.332103
0.369004
0
0
0
0
0
0
0
0.08216
0.156436
505
15
90
33.666667
0.553991
0.164356
0
0.153846
0
0
0.603365
0
0
0
0
0
0
1
0
false
0
0
0
0
0.923077
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
0f3ddd1d46f09e30883217447a69b4d09a282a9d
75
py
Python
contact/models/__init__.py
rkisdp/rkisdp.django.backend
771481cdeea6a101305c4819b06b839266ce6921
[ "MIT" ]
null
null
null
contact/models/__init__.py
rkisdp/rkisdp.django.backend
771481cdeea6a101305c4819b06b839266ce6921
[ "MIT" ]
null
null
null
contact/models/__init__.py
rkisdp/rkisdp.django.backend
771481cdeea6a101305c4819b06b839266ce6921
[ "MIT" ]
null
null
null
from .social_links import SocialLink from .contact_form import ContactForm
25
37
0.866667
10
75
6.3
0.8
0
0
0
0
0
0
0
0
0
0
0
0.106667
75
2
38
37.5
0.940299
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0f60214991b0ed14cdbc3964aee15356c6aaf2aa
288
py
Python
mmdet/models/roi_heads/roi_extractors/__init__.py
Brym-Gyimah/mmdetection
d5d749afe57c77e2ec4500395faed3566fdfedae
[ "Apache-2.0" ]
20,190
2018-09-10T01:11:53.000Z
2022-03-31T22:31:33.000Z
mmdet/models/roi_heads/roi_extractors/__init__.py
Joker-co/mmdet_pro
96abfd90cf0e38c5ce398795f949e9328eb85c1b
[ "Apache-2.0" ]
6,736
2018-09-17T09:45:51.000Z
2022-03-31T22:54:10.000Z
mmdet/models/roi_heads/roi_extractors/__init__.py
Joker-co/mmdet_pro
96abfd90cf0e38c5ce398795f949e9328eb85c1b
[ "Apache-2.0" ]
7,837
2018-09-11T02:58:23.000Z
2022-03-31T22:31:38.000Z
# Copyright (c) OpenMMLab. All rights reserved. from .base_roi_extractor import BaseRoIExtractor from .generic_roi_extractor import GenericRoIExtractor from .single_level_roi_extractor import SingleRoIExtractor __all__ = ['BaseRoIExtractor', 'SingleRoIExtractor', 'GenericRoIExtractor']
41.142857
75
0.847222
29
288
8.034483
0.586207
0.154506
0.23176
0
0
0
0
0
0
0
0
0
0.086806
288
6
76
48
0.885932
0.15625
0
0
0
0
0.219917
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
0f6a72fb178bdd796874e3181fbead174d5276f1
49
py
Python
refData/mlpy/mlpy-3.5.0/mlpy/hcluster/__init__.py
xrick/DTW-Tutorial
bbbce1c2beff91384cdcb7dbf503f93ad2fa285c
[ "MIT" ]
null
null
null
refData/mlpy/mlpy-3.5.0/mlpy/hcluster/__init__.py
xrick/DTW-Tutorial
bbbce1c2beff91384cdcb7dbf503f93ad2fa285c
[ "MIT" ]
null
null
null
refData/mlpy/mlpy-3.5.0/mlpy/hcluster/__init__.py
xrick/DTW-Tutorial
bbbce1c2beff91384cdcb7dbf503f93ad2fa285c
[ "MIT" ]
null
null
null
from hc import * import hc __all__ = hc.__all__
9.8
20
0.734694
8
49
3.5
0.5
0.357143
0
0
0
0
0
0
0
0
0
0
0.204082
49
4
21
12.25
0.717949
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
7e3f9c46d696a4d37463a58e7f796ed893d6c647
338
py
Python
game/views/__init__.py
SavagePastaMan/pyweek-2021
f5f0fc57d23507f065f98548663e3defe54b1f79
[ "MIT" ]
8
2021-03-27T21:20:28.000Z
2021-03-31T08:09:26.000Z
game/views/__init__.py
SavagePastaMan/pyweek-2021
f5f0fc57d23507f065f98548663e3defe54b1f79
[ "MIT" ]
49
2021-03-27T21:18:08.000Z
2021-04-03T02:53:53.000Z
game/views/__init__.py
SavagePastaMan/flawless
f5f0fc57d23507f065f98548663e3defe54b1f79
[ "MIT" ]
1
2021-04-02T21:58:39.000Z
2021-04-02T21:58:39.000Z
from .base_view import BaseView from .menu_view import MenuView, MenuField from .credits_view import CreditsView from .settings_view import SettingsView from .main_menu_view import MainMenuView from .pause_view import PauseView from .win_view import WinView from .instructions_view import InstructionsView from .game_view import GameView
33.8
47
0.860947
47
338
5.978723
0.468085
0.320285
0.099644
0
0
0
0
0
0
0
0
0
0.109467
338
9
48
37.555556
0.933555
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7e9f5c1d9d653a25f0961323da5f75f09f7dd6d9
57
py
Python
edsnlp/pipelines/misc/reason/__init__.py
MohamedBsh/edsnlp
a58b31d62e14b029ed390364a7e15d99c1decd16
[ "BSD-3-Clause" ]
32
2022-03-08T16:45:09.000Z
2022-03-31T15:21:00.000Z
edsnlp/pipelines/misc/reason/__init__.py
MohamedBsh/edsnlp
a58b31d62e14b029ed390364a7e15d99c1decd16
[ "BSD-3-Clause" ]
19
2022-03-09T11:44:43.000Z
2022-03-31T14:32:06.000Z
edsnlp/pipelines/misc/reason/__init__.py
MohamedBsh/edsnlp
a58b31d62e14b029ed390364a7e15d99c1decd16
[ "BSD-3-Clause" ]
1
2022-03-11T16:14:21.000Z
2022-03-11T16:14:21.000Z
from .patterns import reasons from .reason import Reason
19
29
0.824561
8
57
5.875
0.625
0
0
0
0
0
0
0
0
0
0
0
0.140351
57
2
30
28.5
0.959184
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0e4345006ec71e632217661d8268a444846a0f82
14,168
py
Python
experiments/transformers/model.py
baajur/transfer-nlp
85515b73165c299b7a9b96d3608bd4e8ee567154
[ "MIT" ]
307
2019-03-26T11:06:41.000Z
2022-02-20T00:34:25.000Z
experiments/transformers/model.py
baajur/transfer-nlp
85515b73165c299b7a9b96d3608bd4e8ee567154
[ "MIT" ]
30
2019-04-14T01:35:48.000Z
2021-04-15T20:11:59.000Z
experiments/transformers/model.py
baajur/transfer-nlp
85515b73165c299b7a9b96d3608bd4e8ee567154
[ "MIT" ]
19
2019-04-13T20:36:51.000Z
2020-07-05T15:19:17.000Z
""" This file contains models presented in the Transfer Learning for NLP Tutorial at NAACL 2019 Models are adapted from https://colab.research.google.com/drive/1iDHCYIrWswIKp-n-pOg69xLoZO09MEgf#scrollTo=_FfRT6GTjHhC&forceEdit=true&offline=true&sandboxMode=true This is a WIP document and work is needed so that we don't have to replicate so many transformer classes Ideally we'd like to have flexible transformer classes from which we can easily add task-dependent heads and add adapter tools, e.g. freezing the backbone and add residual connexion between layers. """ import torch from pytorch_pretrained_bert import BertTokenizer from transfer_nlp.plugins.config import register_plugin @register_plugin class Transformer(torch.nn.Module): def __init__(self, embed_dim: int, hidden_dim: int, num_embeddings: int, num_max_positions: int, num_heads: int, num_layers: int, dropout: float, causal: bool): super().__init__() self.causal: bool = causal self.tokens_embeddings: torch.nn.Embedding = torch.nn.Embedding(num_embeddings, embed_dim) self.position_embeddings: torch.nn.Embedding = torch.nn.Embedding(num_max_positions, embed_dim) self.dropout: torch.nn.Dropout = torch.nn.Dropout(dropout) self.attentions, self.feed_forwards = torch.nn.ModuleList(), torch.nn.ModuleList() self.layer_norms_1, self.layer_norms_2 = torch.nn.ModuleList(), torch.nn.ModuleList() for _ in range(num_layers): self.attentions.append(torch.nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout)) self.feed_forwards.append(torch.nn.Sequential(torch.nn.Linear(embed_dim, hidden_dim), torch.nn.ReLU(), torch.nn.Linear(hidden_dim, embed_dim))) self.layer_norms_1.append(torch.nn.LayerNorm(embed_dim, eps=1e-12)) self.layer_norms_2.append(torch.nn.LayerNorm(embed_dim, eps=1e-12)) self.attn_mask = None self.tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False) def forward(self, x): """ x has shape [batch, seq length]""" padding_mask = (x == self.tokenizer.vocab['[PAD]']) x = x.transpose(0, 1).contiguous() positions = torch.arange(len(x), device=x.device).unsqueeze(-1) h = self.tokens_embeddings(x) h = h + self.position_embeddings(positions).expand_as(h) h = self.dropout(h) attn_mask = None if self.causal: attn_mask = torch.full((len(x), len(x)), -float('Inf'), device=h.device, dtype=h.dtype) attn_mask = torch.triu(attn_mask, diagonal=1) for layer_norm_1, attention, layer_norm_2, feed_forward in zip(self.layer_norms_1, self.attentions, self.layer_norms_2, self.feed_forwards): h = layer_norm_1(h) x, _ = attention(h, h, h, attn_mask=attn_mask, need_weights=False, key_padding_mask=padding_mask) x = self.dropout(x) h = x + h h = layer_norm_2(h) x = feed_forward(h) x = self.dropout(x) h = x + h return h @register_plugin class TransformerWithLMHead(torch.nn.Module): def __init__(self, embed_dim: int, hidden_dim: int, num_max_positions: int, num_heads: int, num_layers: int, dropout: float, causal: bool, initializer_range: float): """ Transformer with a language modeling head on top (tied weights) """ super().__init__() tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False) num_embeddings = len(tokenizer.vocab) self.initializer_range = initializer_range self.transformer = Transformer(embed_dim, hidden_dim, num_embeddings, num_max_positions, num_heads, num_layers, dropout, causal=causal) self.lm_head = torch.nn.Linear(embed_dim, num_embeddings, bias=False) self.apply(self.init_weights) self.tie_weights() def tie_weights(self): self.lm_head.weight = self.transformer.tokens_embeddings.weight def init_weights(self, module): """ initialize weights - nn.MultiheadAttention is already initalized by PyTorch (xavier) """ if isinstance(module, (torch.nn.Linear, torch.nn.Embedding, torch.nn.LayerNorm)): module.weight.data.normal_(mean=0.0, std=self.initializer_range) if isinstance(module, (torch.nn.Linear, torch.nn.LayerNorm)) and module.bias is not None: module.bias.data.zero_() def forward(self, x): """ x has shape [batch, seq length]""" hidden_states = self.transformer(x) logits = self.lm_head(hidden_states) return logits @register_plugin class LMLoss: def __init__(self, causal: bool): self.causal: bool = causal def __call__(self, input, target): input = input.transpose(0, 1).contiguous() shift_logits = input[:-1] if self.causal else input shift_labels = target[1:] if self.causal else target loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-1) loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) return loss @register_plugin class TransformerWithClfHead(torch.nn.Module): def __init__(self, embed_dim: int, hidden_dim: int, num_max_positions: int, num_heads: int, num_layers: int, dropout: float, causal: bool, initializer_range: float, num_classes: int): super().__init__() self.tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False) num_embeddings = len(self.tokenizer.vocab) self.initializer_range = initializer_range self.transformer = Transformer(embed_dim, hidden_dim, num_embeddings, num_max_positions, num_heads, num_layers, dropout, causal=causal) self.classification_head = torch.nn.Linear(embed_dim, num_classes) self.apply(self.init_weights) def init_weights(self, module): if isinstance(module, (torch.nn.Linear, torch.nn.Embedding, torch.nn.LayerNorm)): module.weight.data.normal_(mean=0.0, std=self.initializer_range) if isinstance(module, (torch.nn.Linear, torch.nn.LayerNorm)) and module.bias is not None: module.bias.data.zero_() def forward(self, x): # x = x.transpose(0, 1).contiguous().to('cpu') clf_tokens_mask = (x.transpose(0, 1).contiguous().to('cpu') == self.tokenizer.vocab['[CLS]']) hidden_states = self.transformer(x) msk = clf_tokens_mask.unsqueeze(-1).float() clf_tokens_states = (hidden_states * msk).sum(dim=0) clf_logits = self.classification_head(clf_tokens_states) return clf_logits @register_plugin class FineTuningLoss: def __call__(self, input, target): loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-1) loss = loss_fct(input.view(-1, input.size(-1)), target.view(-1)) return loss class TransformerWithAdapters(Transformer): def __init__(self, adapters_dim, embed_dim, hidden_dim, num_embeddings, num_max_positions, num_heads, num_layers, dropout, causal): """ Transformer with adapters (small bottleneck layers) """ super().__init__(embed_dim, hidden_dim, num_embeddings, num_max_positions, num_heads, num_layers, dropout, causal) self.adapters_1 = torch.nn.ModuleList() self.adapters_2 = torch.nn.ModuleList() for _ in range(num_layers): self.adapters_1.append(torch.nn.Sequential(torch.nn.Linear(embed_dim, adapters_dim), torch.nn.ReLU(), torch.nn.Linear(adapters_dim, embed_dim))) self.adapters_2.append(torch.nn.Sequential(torch.nn.Linear(embed_dim, adapters_dim), torch.nn.ReLU(), torch.nn.Linear(adapters_dim, embed_dim))) def forward(self, x): """ x has shape [batch, seq length]""" padding_mask = (x == self.tokenizer.vocab['[PAD]']) x = x.transpose(0, 1).contiguous() positions = torch.arange(len(x), device=x.device).unsqueeze(-1) h = self.tokens_embeddings(x) h = h + self.position_embeddings(positions).expand_as(h) h = self.dropout(h) attn_mask = None if self.causal: attn_mask = torch.full((len(x), len(x)), -float('Inf'), device=h.device, dtype=h.dtype) attn_mask = torch.triu(attn_mask, diagonal=1) for (layer_norm_1, attention, adapter_1, layer_norm_2, feed_forward, adapter_2) \ in zip(self.layer_norms_1, self.attentions, self.adapters_1, self.layer_norms_2, self.feed_forwards, self.adapters_2): h = layer_norm_1(h) x, _ = attention(h, h, h, attn_mask=attn_mask, need_weights=False, key_padding_mask=padding_mask) x = self.dropout(x) x = adapter_1(x) + x # Add an adapter with a skip-connection after attention module h = x + h h = layer_norm_2(h) x = feed_forward(h) x = self.dropout(x) x = adapter_2(x) + x # Add an adapter with a skip-connection after feed-forward module h = x + h return h @register_plugin class TransformerWithClfHeadAndAdapters(torch.nn.Module): def __init__(self, adapters_dim: int, embed_dim: int, hidden_dim: int, num_max_positions: int, num_heads: int, num_layers: int, dropout: float, causal: bool, initializer_range: float, num_classes: int): """ Transformer with a classification head and adapters. """ super().__init__() self.initializer_range: float = initializer_range self.tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False) num_embeddings = len(self.tokenizer.vocab) self.num_layers = num_layers self.transformer: TransformerWithAdapters = TransformerWithAdapters(adapters_dim, embed_dim, hidden_dim, num_embeddings, num_max_positions, num_heads, num_layers, dropout, causal=causal) self.classification_head = torch.nn.Linear(embed_dim, num_classes) self.apply(self.init_weights) def init_weights(self, module): if isinstance(module, (torch.nn.Linear, torch.nn.Embedding, torch.nn.LayerNorm)): module.weight.data.normal_(mean=0.0, std=self.initializer_range) if isinstance(module, (torch.nn.Linear, torch.nn.LayerNorm)) and module.bias is not None: module.bias.data.zero_() def forward(self, x): clf_tokens_mask = (x.transpose(0, 1).contiguous().to('cpu') == self.tokenizer.vocab['[CLS]']) hidden_states = self.transformer(x) clf_tokens_states = (hidden_states * clf_tokens_mask.unsqueeze(-1).float()).sum(dim=0) clf_logits = self.classification_head(clf_tokens_states) return clf_logits @register_plugin class TransformerWithClfHeadAndLMHead(torch.nn.Module): def __init__(self, embed_dim: int, hidden_dim: int, num_max_positions: int, num_heads: int, num_layers: int, dropout: float, causal: bool, initializer_range: float, num_classes: int): super().__init__() self.initializer_range: float = initializer_range self.tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False) num_embeddings = len(self.tokenizer.vocab) self.num_layers = num_layers self.transformer = Transformer(embed_dim, hidden_dim, num_embeddings, num_max_positions, num_heads, num_layers, dropout, causal=causal) self.lm_head = torch.nn.Linear(embed_dim, num_embeddings, bias=False) self.classification_head = torch.nn.Linear(embed_dim, num_classes) self.apply(self.init_weights) self.tie_weights() def tie_weights(self): self.lm_head.weight = self.transformer.tokens_embeddings.weight def init_weights(self, module): if isinstance(module, (torch.nn.Linear, torch.nn.Embedding, torch.nn.LayerNorm)): module.weight.data.normal_(mean=0.0, std=self.initializer_range) if isinstance(module, (torch.nn.Linear, torch.nn.LayerNorm)) and module.bias is not None: module.bias.data.zero_() def forward(self, x): """ x and clf_tokens_mask have shape [seq length, batch] padding_mask has shape [batch, seq length] """ clf_tokens_mask = (x.transpose(0, 1).contiguous().to('cpu') == self.tokenizer.vocab['[CLS]']) hidden_states = self.transformer(x) lm_logits = self.lm_head(hidden_states) clf_tokens_states = (hidden_states * clf_tokens_mask.unsqueeze(-1).float()).sum(dim=0) clf_logits = self.classification_head(clf_tokens_states) return lm_logits, clf_logits @register_plugin class MultiTaskLoss: def __init__(self, causal: bool): self.causal: bool = causal def __call__(self, lm_logits, clf_logits, lm_labels, clf_labels): lm_logits = lm_logits.transpose(0, 1).contiguous() loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-1) loss_clf = loss_fct(clf_logits.view(-1, clf_logits.size(-1)), clf_labels.view(-1)) shift_logits = lm_logits[:-1] if self.causal else lm_logits shift_labels = lm_labels[1:] if self.causal else lm_labels loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-1) loss_lm = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) return loss_lm, loss_clf
45.55627
164
0.644481
1,809
14,168
4.807629
0.124931
0.049097
0.028401
0.016557
0.778774
0.748764
0.724273
0.711395
0.694377
0.660803
0
0.009016
0.248447
14,168
311
165
45.55627
0.807757
0.083498
0
0.710407
0
0
0.008895
0
0
0
0
0
0
1
0.104072
false
0
0.013575
0
0.199095
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
0e4a60de959371d5bb34873a97c1691c09cc90c7
433
py
Python
Hierarchical Tags (1835859645) by Patrice Neff/__init__.py
kb1900/Anki-Addons
3b764af8657065c369d404025a3f11c964192a33
[ "MIT" ]
1
2019-06-23T04:46:24.000Z
2019-06-23T04:46:24.000Z
Hierarchical Tags (1835859645) by Patrice Neff/__init__.py
kb1900/Anki-Addons
3b764af8657065c369d404025a3f11c964192a33
[ "MIT" ]
null
null
null
Hierarchical Tags (1835859645) by Patrice Neff/__init__.py
kb1900/Anki-Addons
3b764af8657065c369d404025a3f11c964192a33
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # Entry point for the add-on into Anki # Please do not edit this if you do not know what you are doing. # Hierarchical Tags Addon: https://github.com/pneff/anki-hierarchical-tags # Copyright: 2014(c) Patrice Neff http://patrice.ch/> # Ported to Anki 2.1 by Frodo 07/10/2017 # This addon is licensed under the same license as Anki itself (GNU Affero General Public License 3) from . import hierarchical_tags
43.3
100
0.741339
74
433
4.324324
0.797297
0.15
0
0
0
0
0
0
0
0
0
0.044077
0.161663
433
10
101
43.3
0.837466
0.886836
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0e652ecfaead0fd3fcc5bf2bc1ba76e23ca4705a
92
py
Python
config_gen/admin.py
lkmhaqer/gtools-python
cff6d80525b78a4fadfb686566489fbe1687d889
[ "MIT" ]
5
2016-10-31T17:46:17.000Z
2022-02-02T00:40:49.000Z
config_gen/admin.py
lkmhaqer/gtools-python
cff6d80525b78a4fadfb686566489fbe1687d889
[ "MIT" ]
33
2018-05-09T06:07:50.000Z
2021-09-22T17:39:56.000Z
config_gen/admin.py
lkmhaqer/gtools-python
cff6d80525b78a4fadfb686566489fbe1687d889
[ "MIT" ]
1
2020-05-14T21:44:25.000Z
2020-05-14T21:44:25.000Z
# file: config_gen/admin.py from django.contrib import admin # Register your models here.
15.333333
32
0.771739
14
92
5
0.928571
0
0
0
0
0
0
0
0
0
0
0
0.152174
92
5
33
18.4
0.897436
0.565217
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0eac811b768a8999fcc600aafc6895b564af02b9
99
py
Python
wsgi.py
mohamed-aziz/todoapp-vuejs
65ce9470e17ece659dd549fe0d93071d39773473
[ "MIT" ]
null
null
null
wsgi.py
mohamed-aziz/todoapp-vuejs
65ce9470e17ece659dd549fe0d93071d39773473
[ "MIT" ]
1
2017-03-06T10:48:50.000Z
2017-03-06T12:32:38.000Z
wsgi.py
mohamed-aziz/todoapp-vuejs
65ce9470e17ece659dd549fe0d93071d39773473
[ "MIT" ]
null
null
null
from todoapp import create_app from todoapp.config import prodConfig app = create_app(prodConfig)
19.8
37
0.838384
14
99
5.785714
0.5
0.271605
0
0
0
0
0
0
0
0
0
0
0.121212
99
4
38
24.75
0.931034
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
7ebe1f3dbf1ef5ee47596a3bd5bd9bff6d1c0ff7
137
py
Python
tests/test_tgwf_datasette.py
thegreenwebfoundation/dataset-browser
961a25dc8a9004dbd79beb5c9bfbf3bc33026fa5
[ "Apache-2.0" ]
null
null
null
tests/test_tgwf_datasette.py
thegreenwebfoundation/dataset-browser
961a25dc8a9004dbd79beb5c9bfbf3bc33026fa5
[ "Apache-2.0" ]
4
2021-02-18T09:22:09.000Z
2021-04-28T13:36:44.000Z
tests/test_tgwf_datasette.py
thegreenwebfoundation/dataset-browser
961a25dc8a9004dbd79beb5c9bfbf3bc33026fa5
[ "Apache-2.0" ]
null
null
null
from tgwf_datasette import __version__ def test_version(): assert __version__ == "0.1.0" def test_new_thing(): assert 1 == 1
13.7
38
0.693431
20
137
4.15
0.6
0.168675
0
0
0
0
0
0
0
0
0
0.045872
0.20438
137
9
39
15.222222
0.715596
0
0
0
0
0
0.036496
0
0
0
0
0
0.4
1
0.4
true
0
0.2
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
1
0
0
5
7ef10b3ca8e359ee832010960b3b9e219513e37a
1,490
py
Python
src/mhw_armor_edit/editor/__init__.py
CarlosFdez/mhw_armor_edit
5c4fd8db33d16c4ea34acd0ada000cdc5ffc626c
[ "Unlicense" ]
42
2018-09-17T05:34:31.000Z
2021-07-14T08:32:26.000Z
src/mhw_armor_edit/editor/__init__.py
spallSPELLW/mhw_armor_edit
88781d32f76a43867141ad3a287fb1a82bc654c5
[ "Unlicense" ]
20
2018-10-28T15:45:48.000Z
2020-07-27T01:43:29.000Z
src/mhw_armor_edit/editor/__init__.py
spallSPELLW/mhw_armor_edit
88781d32f76a43867141ad3a287fb1a82bc654c5
[ "Unlicense" ]
16
2018-10-05T02:20:15.000Z
2020-12-05T13:55:15.000Z
# coding: utf-8 from mhw_armor_edit.editor.arm_up_editor import ArmUpPlugin from mhw_armor_edit.editor.armor_editor import AmDatPlugin from mhw_armor_edit.editor.bbtbl_editor import BbtblPlugin from mhw_armor_edit.editor.crafting_editor import EqCrtPlugin from mhw_armor_edit.editor.eqcus_editor import EqCusPlugin from mhw_armor_edit.editor.gmd_editor import GmdPlugin from mhw_armor_edit.editor.itm_editor import ItmPlugin from mhw_armor_edit.editor.kire_editor import KirePlugin from mhw_armor_edit.editor.lbm_base_editor import LbmBasePlugin from mhw_armor_edit.editor.lbm_skill_editor import LbmSkillPlugin from mhw_armor_edit.editor.mkex_editor import MkexPlugin from mhw_armor_edit.editor.mkit_editor import MkitPlugin from mhw_armor_edit.editor.otomo_armor_editor import OtomoArmorEditorPlugin from mhw_armor_edit.editor.sgpa_editor import SgpaPlugin from mhw_armor_edit.editor.shell_table_editor import ShlTblPlugin from mhw_armor_edit.editor.skill_data_editor import SklDatPlugin from mhw_armor_edit.editor.skill_point_data_editor import SklPtDatPlugin from mhw_armor_edit.editor.weapon_editor import WpDatPlugin from mhw_armor_edit.editor.weapon_gun_editor import WpDatGPlugin from mhw_armor_edit.editor.wep_glan_editor import WepGlanPlugin from mhw_armor_edit.editor.wep_wsl_editor import WepWslPlugin from mhw_armor_edit.editor.wep_saxe_editor import WepSaxePlugin from mhw_armor_edit.editor.sed_editor import SedPlugin from mhw_armor_edit.editor.stmp_editor import StmpPlugin
57.307692
75
0.899329
231
1,490
5.437229
0.25974
0.133758
0.229299
0.305732
0.449841
0.187102
0
0
0
0
0
0.000719
0.066443
1,490
25
76
59.6
0.902229
0.008725
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7d032d67f95b054d7b28a46beb9488b034acc670
599
py
Python
python/spdm/geometry/Surface.py
simpla-fusion/spdb
be6667eb6c7d464f68b0fd51ca2a8f021581eb84
[ "MIT" ]
null
null
null
python/spdm/geometry/Surface.py
simpla-fusion/spdb
be6667eb6c7d464f68b0fd51ca2a8f021581eb84
[ "MIT" ]
null
null
null
python/spdm/geometry/Surface.py
simpla-fusion/spdb
be6667eb6c7d464f68b0fd51ca2a8f021581eb84
[ "MIT" ]
null
null
null
from .GeoObject import GeoObject class Surface(GeoObject): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) def points(self, *args, **kwargs): return super().points(*args, **kwargs) def map(self, u, *args, **kwargs): return NotImplemented def derivative(self, u, *args, **kwargs): return NotImplemented def dl(self, u, *args, **kwargs): return NotImplemented def pullback(self, func, *args, **kwargs): return NotImplemented def make_one_form(self, func): return NotImplemented
23.96
48
0.622705
67
599
5.41791
0.358209
0.220386
0.220386
0.330579
0.404959
0.31405
0.31405
0
0
0
0
0
0.24374
599
24
49
24.958333
0.801325
0
0
0.3125
0
0
0
0
0
0
0
0
0
1
0.4375
false
0
0.0625
0.375
0.9375
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5
7d0690673ace500374d03a9b7b351053f24ab990
101
py
Python
app/questions/__init__.py
ThembiNsele/ClimateMind-Backend
0e418000b2a0141a1e4a7c11dbe3564082a3f4bb
[ "MIT" ]
6
2020-08-20T10:49:59.000Z
2022-01-24T16:49:46.000Z
app/questions/__init__.py
ThembiNsele/ClimateMind-Backend
0e418000b2a0141a1e4a7c11dbe3564082a3f4bb
[ "MIT" ]
95
2020-07-24T22:32:34.000Z
2022-03-05T15:01:16.000Z
app/questions/__init__.py
ThembiNsele/ClimateMind-Backend
0e418000b2a0141a1e4a7c11dbe3564082a3f4bb
[ "MIT" ]
5
2020-07-30T17:29:09.000Z
2021-01-10T19:46:15.000Z
from flask import Blueprint bp = Blueprint("questions", __name__) from app.questions import routes
16.833333
37
0.792079
13
101
5.846154
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.138614
101
5
38
20.2
0.873563
0
0
0
0
0
0.089109
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
5
adb6eb989bcf246f7f163dea7a5776854a352629
1,902
py
Python
tests/test_response.py
hexatester/dapodik
d89c0fb899c89e866527f6b7b57f741abd6444ea
[ "MIT" ]
4
2021-02-01T15:19:35.000Z
2022-01-26T02:47:21.000Z
tests/test_response.py
hexatester/dapodik
d89c0fb899c89e866527f6b7b57f741abd6444ea
[ "MIT" ]
3
2020-01-08T17:07:15.000Z
2020-01-08T18:05:12.000Z
tests/test_response.py
hexatester/dapodik
d89c0fb899c89e866527f6b7b57f741abd6444ea
[ "MIT" ]
2
2021-08-04T13:48:08.000Z
2021-12-25T02:36:49.000Z
from dapodik.peserta_didik import PesertaDidik from dapodik.response import DapodikResponse def test_dapodik_response(): data = """{ 'success' : true, 'message' : 'Berhasil mengupdate PesertaDidik', 'rows' : {"peserta_didik_id":"a5a3ccff-53e2-4228-b321-541f59324a9e","nama":"ADAM","jenis_kelamin":"L","nisn":null,"nik":"1111222233334444","no_kk":"1111222233334444","tempat_lahir":"INDONESIA","tanggal_lahir":"1945-8-17","agama_id":1,"kebutuhan_khusus_id":0,"alamat_jalan":"Indonesia","rt":"1","rw":"1","nama_dusun":"JAKARTA","desa_kelurahan":"KALIREJO","kode_wilayah":"123456","kode_pos":"10000","lintang":"0","bujur":"0","jenis_tinggal_id":"1","alat_transportasi_id":"1","nik_ayah":"1111222233334444","nik_ibu":"1111222233334444","anak_keberapa":"1","nik_wali":null,"nomor_telepon_rumah":null,"nomor_telepon_seluler":null,"email":null,"penerima_kps":"0","no_kps":null,"layak_pip":"0","penerima_kip":"0","no_kip":null,"nm_kip":"0","no_kks":null,"reg_akta_lahir":null,"id_layak_pip":null,"id_bank":null,"rekening_bank":null,"nama_kcp":null,"rekening_atas_nama":null,"status_data":0,"nama_ayah":"ADAM SENIOR","tahun_lahir_ayah":"1945","jenjang_pendidikan_ayah":"6","pekerjaan_id_ayah":6,"penghasilan_id_ayah":13,"kebutuhan_khusus_id_ayah":0,"nama_ibu_kandung":"MAMA ADAM","tahun_lahir_ibu":"1945","jenjang_pendidikan_ibu":"6","penghasilan_id_ibu":13,"pekerjaan_id_ibu":6,"kebutuhan_khusus_id_ibu":0,"nama_wali":null,"tahun_lahir_wali":null,"jenjang_pendidikan_wali":"0","pekerjaan_id_wali":0,"penghasilan_id_wali":0,"kewarganegaraan":"ID","pekerjaan_id":0,"create_date":"2021-08-08 11:26:30","last_update":"2021-08-07 20:10:41","soft_delete":"0","last_sync":"2021-08-07 19:40:41","updater_id":"69374600-ca7f-4f4b-8d50-128fb3825220"} }""" response = DapodikResponse.from_str(data, PesertaDidik) assert isinstance(response, DapodikResponse) assert isinstance(response.rows, PesertaDidik)
190.2
1,618
0.758149
280
1,902
4.839286
0.453571
0.032472
0.037638
0
0
0
0
0
0
0
0
0.111714
0.030494
1,902
9
1,619
211.333333
0.623102
0
0
0
0
0.142857
0.841746
0.797581
0
0
0
0
0.285714
1
0.142857
false
0
0.285714
0
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
1
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
add0a472ae5633384e82ff3215139e143eb08372
106
py
Python
src/typeDefs/section_1_12/section_1_12.py
dheerajgupta0001/wrldc_mis_monthly_report_generator
dd5ae6f28ec6bf8e6532820fd71dd63f8b223f0b
[ "MIT" ]
null
null
null
src/typeDefs/section_1_12/section_1_12.py
dheerajgupta0001/wrldc_mis_monthly_report_generator
dd5ae6f28ec6bf8e6532820fd71dd63f8b223f0b
[ "MIT" ]
null
null
null
src/typeDefs/section_1_12/section_1_12.py
dheerajgupta0001/wrldc_mis_monthly_report_generator
dd5ae6f28ec6bf8e6532820fd71dd63f8b223f0b
[ "MIT" ]
null
null
null
from typing import TypedDict, List class ISection_1_12(TypedDict): num_plts_sec_inter_regional: int
21.2
37
0.811321
16
106
5
0.9375
0
0
0
0
0
0
0
0
0
0
0.032967
0.141509
106
5
37
21.2
0.846154
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
addcad082e5c4d1cf9c82a58a123dea9139939a5
31
py
Python
templates/base/datasets/my_datasets.py
timojl/tralo
90b928c0cb38dbc2a324d8761bce1b2a422f5e31
[ "MIT" ]
null
null
null
templates/base/datasets/my_datasets.py
timojl/tralo
90b928c0cb38dbc2a324d8761bce1b2a422f5e31
[ "MIT" ]
null
null
null
templates/base/datasets/my_datasets.py
timojl/tralo
90b928c0cb38dbc2a324d8761bce1b2a422f5e31
[ "MIT" ]
null
null
null
# put your custom datasets here
31
31
0.806452
5
31
5
1
0
0
0
0
0
0
0
0
0
0
0
0.16129
31
1
31
31
0.961538
0.935484
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
70d5e0661518aee73380187b20a040dd3eb19dc7
33
py
Python
src/lib/advanced/advanced_compute.py
eamonboyle/abacus
75ae8a0b8b5213c33949f68e2cdf112ec9392a4e
[ "MIT" ]
null
null
null
src/lib/advanced/advanced_compute.py
eamonboyle/abacus
75ae8a0b8b5213c33949f68e2cdf112ec9392a4e
[ "MIT" ]
1
2020-01-23T09:29:28.000Z
2020-01-23T09:48:19.000Z
src/lib/advanced/advanced_compute.py
eamonboyle/abacus
75ae8a0b8b5213c33949f68e2cdf112ec9392a4e
[ "MIT" ]
null
null
null
#Add methods for sin, tan and cos
33
33
0.757576
7
33
3.571429
1
0
0
0
0
0
0
0
0
0
0
0
0.181818
33
1
33
33
0.925926
0.969697
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
70d8f0694756bbac93e629cc5979a2ab35112fcc
242
py
Python
Zad_Bridge/Biblioteka.py
Paarzivall/Wzorce-Projektowe
aa4136f140ad02c0fc0de45709b5a01ca42b417f
[ "MIT" ]
null
null
null
Zad_Bridge/Biblioteka.py
Paarzivall/Wzorce-Projektowe
aa4136f140ad02c0fc0de45709b5a01ca42b417f
[ "MIT" ]
null
null
null
Zad_Bridge/Biblioteka.py
Paarzivall/Wzorce-Projektowe
aa4136f140ad02c0fc0de45709b5a01ca42b417f
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod class Biblioteka(ABC): def __init__(self): pass @abstractmethod def rysujLinie(self, x1, y1, x2, y2): pass @abstractmethod def rysujOkrag(self, x, y, z): pass
17.285714
41
0.61157
29
242
4.965517
0.655172
0.25
0.291667
0
0
0
0
0
0
0
0
0.023529
0.297521
242
14
42
17.285714
0.823529
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0.3
false
0.3
0.1
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5
cb0b11ba483c4595abc25bb18a0de40feb23133a
160
py
Python
Aula_55/dao/autor_dao.py
Mateus-Silva11/AulasPython
d34dc4f62ade438e68b0a80e0baac4d6ec0d378e
[ "MIT" ]
null
null
null
Aula_55/dao/autor_dao.py
Mateus-Silva11/AulasPython
d34dc4f62ade438e68b0a80e0baac4d6ec0d378e
[ "MIT" ]
null
null
null
Aula_55/dao/autor_dao.py
Mateus-Silva11/AulasPython
d34dc4f62ade438e68b0a80e0baac4d6ec0d378e
[ "MIT" ]
null
null
null
from Aula_55.dao.base_dao import BaseDao from Aula_55.model.autor import Autor class AutorDao(BaseDao): def __init__(self): super().__init__(Autor)
26.666667
40
0.75
24
160
4.541667
0.625
0.146789
0.183486
0
0
0
0
0
0
0
0
0.02963
0.15625
160
6
41
26.666667
0.777778
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.8
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
cb162ef60fe85ec5979e9c0957bf3955718f87e3
1,571
py
Python
tests/test_others.py
mwouts/world_bank_data
79fc0c429c3f64c7a9bea2b90951c1c1e73b7b3d
[ "MIT" ]
91
2019-04-07T06:52:37.000Z
2022-03-24T09:02:24.000Z
tests/test_others.py
mwouts/world_bank_data
79fc0c429c3f64c7a9bea2b90951c1c1e73b7b3d
[ "MIT" ]
16
2019-04-08T12:17:17.000Z
2020-12-17T12:51:52.000Z
tests/test_others.py
mwouts/world_bank_data
79fc0c429c3f64c7a9bea2b90951c1c1e73b7b3d
[ "MIT" ]
29
2019-06-02T04:40:37.000Z
2022-02-06T21:52:46.000Z
from world_bank_data import get_lendingtypes, get_incomelevels, get_sources, get_topics from .tools import assert_numeric_or_string def test_lending_types(): df = get_lendingtypes() assert df.index.names == ['id'] assert set(df.columns) == set(['iso2code', 'value']) assert_numeric_or_string(df) def test_income_levels(): df = get_incomelevels() assert df.index.names == ['id'] assert set(df.columns) == set(['iso2code', 'value']) assert_numeric_or_string(df) def test_topics(): df = get_topics() assert df.index.names == ['id'] assert set(df.columns) == set(['value', 'sourceNote']) assert_numeric_or_string(df) def test_sources(): df = get_sources() assert df.index.names == ['id'] assert set(df.columns) == set(['lastupdated', 'name', 'code', 'description', 'url', 'dataavailability', 'metadataavailability', 'concepts']) assert_numeric_or_string(df) def test_sources_int(): df = get_sources(11) assert df.index.names == ['id'] assert set(df.columns) == set(['lastupdated', 'name', 'code', 'description', 'url', 'dataavailability', 'metadataavailability', 'concepts']) assert_numeric_or_string(df) def test_sources_two_int(): df = get_sources([11, 36]) assert df.index.names == ['id'] assert set(df.columns) == set(['lastupdated', 'name', 'code', 'description', 'url', 'dataavailability', 'metadataavailability', 'concepts']) assert_numeric_or_string(df)
32.729167
107
0.634628
184
1,571
5.179348
0.228261
0.095488
0.110178
0.15425
0.767051
0.731375
0.731375
0.731375
0.69255
0.69255
0
0.006462
0.211967
1,571
47
108
33.425532
0.763328
0
0
0.571429
0
0
0.180777
0
0
0
0
0
0.542857
1
0.171429
false
0
0.057143
0
0.228571
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
5
cb342d43295e929e4cf69d6ed970929f204a2b59
45,929
py
Python
tests/sc/test_analysis.py
Rogdham/pyTenable
79f3f7360f8ef31b964f1db99d0c7b8a0bc25d7a
[ "MIT" ]
1
2022-03-01T17:17:19.000Z
2022-03-01T17:17:19.000Z
tests/sc/test_analysis.py
Rogdham/pyTenable
79f3f7360f8ef31b964f1db99d0c7b8a0bc25d7a
[ "MIT" ]
25
2021-11-16T18:41:36.000Z
2022-03-25T05:43:31.000Z
tests/sc/test_analysis.py
Rogdham/pyTenable
79f3f7360f8ef31b964f1db99d0c7b8a0bc25d7a
[ "MIT" ]
2
2022-03-02T12:24:40.000Z
2022-03-29T05:12:04.000Z
''' test file for testing various scenarios in analysis ''' import pytest from tenable.errors import UnexpectedValueError from tenable.sc.analysis import AnalysisResultsIterator from ..checker import check def test_analysis_constructor_type_error(security_center): ''' test analysis constructor for type error ''' with pytest.raises(TypeError): getattr(security_center.analysis, '_analysis')(tool=1, type='type', sort_field='field', sort_direction=1) with pytest.raises(TypeError): getattr(security_center.analysis, '_analysis')(tool=1, type='type', sort_field='field', sort_direction='ASC', offset=0, limit='limit') def test_analysis_constructor_success(security_center): ''' test analysis constructor for success ''' analysis = getattr(security_center.analysis, '_analysis')(tool=1, type='type', sort_field='field', sort_direction='ASC', offset=0, payload={'sourceType': 'individual'}) assert isinstance(analysis, AnalysisResultsIterator) def test_analysis_asset_expansion_simple(security_center): ''' test analysis asset expansion simple for success ''' resp = getattr(security_center.analysis, '_combo_expansion')(('or', 1, 2)) assert resp == { 'operator': 'union', 'operand1': {'id': '1'}, 'operand2': {'id': '2'}, } def test_analysis_asset_expansion_complex(security_center): ''' test analysis asset expansion complex for success ''' resp = getattr(security_center.analysis, '_combo_expansion')( ('or', ('and', 1, 2), ('not', ('or', 3, 4)))) assert resp == { 'operator': 'union', 'operand1': { 'operator': 'intersection', 'operand1': {'id': '1'}, 'operand2': {'id': '2'}, }, 'operand2': { 'operator': 'complement', 'operand1': { 'operator': 'union', 'operand1': {'id': '3'}, 'operand2': {'id': '4'} } } } def test_analysis_query_constructor_simple(security_center): ''' test analysis query constructor simple for success ''' resp = getattr(security_center.analysis, '_query_constructor')( ('filter1', 'operator1', 'value1'), ('filter2', 'operator2', 'value2'), tool='tool_test', type='type_test') assert resp == { 'tool': 'tool_test', 'query': { 'tool': 'tool_test', 'type': 'type_test', 'filters': [{ 'filterName': 'filter1', 'operator': 'operator1', 'value': 'value1', }, { 'filterName': 'filter2', 'operator': 'operator2', 'value': 'value2' }] } } def test_analysis_query_constructor_replace(security_center): ''' test analysis query constructor replace for success ''' resp = getattr(security_center.analysis, '_query_constructor')( ('filter1', 'operator1', 'badvalue'), ('filter1', 'operator1', 'value1'), ('filter2', 'operator2', 'value2'), tool='tool_test', type='type_test') assert resp == { 'tool': 'tool_test', 'query': { 'tool': 'tool_test', 'type': 'type_test', 'filters': [{ 'filterName': 'filter1', 'operator': 'operator1', 'value': 'value1', }, { 'filterName': 'filter2', 'operator': 'operator2', 'value': 'value2' }] } } def test_analysis_query_constructor_remove(security_center): ''' test analysis query constructor remove for success ''' resp = getattr(security_center.analysis, '_query_constructor')( ('filter3', 'operator1', 'badvalue'), ('filter1', 'operator1', 'value1'), ('filter2', 'operator2', 'value2'), ('filter3', None, None), tool='tool_test', type='type_test') assert resp == { 'tool': 'tool_test', 'query': { 'tool': 'tool_test', 'type': 'type_test', 'filters': [{ 'filterName': 'filter1', 'operator': 'operator1', 'value': 'value1', }, { 'filterName': 'filter2', 'operator': 'operator2', 'value': 'value2' }] } } def test_analysis_query_constructor_asset(security_center): ''' test analysis query constructor asset for success ''' resp = getattr(security_center.analysis, '_query_constructor')(('asset', '~', ('or', 1, 2)), tool='tool', type='type') assert resp == { 'tool': 'tool', 'query': { 'tool': 'tool', 'type': 'type', 'filters': [{ 'filterName': 'asset', 'operator': '~', 'value': { 'operator': 'union', 'operand1': {'id': '1'}, 'operand2': {'id': '2'}, } }] } } def test_analysis_vulns(security_center): ''' test analysis vulnerabilities for success ''' vulns = security_center.analysis.vulns(source='cumulative', scan_id=1) assert isinstance(vulns, AnalysisResultsIterator) def test_analysis_scan(security_center): ''' test analysis scan for success ''' scan = security_center.analysis.scan(1) assert isinstance(scan, AnalysisResultsIterator) def test_analysis_events(security_center): ''' test analysis events for success ''' event = security_center.analysis.events(source='archive', silo_id='silo_id') assert isinstance(event, AnalysisResultsIterator) def test_analysis_events_unexpected_value_error(security_center): ''' test analysis events for unexpected value error ''' with pytest.raises(UnexpectedValueError): security_center.analysis.events(source='archive') @pytest.mark.vcr() def test_analysis_vulns_cceipdetail_tool(security_center): ''' test analysis vulnerabilities cceip detail tool for success ''' vulns = security_center.analysis.vulns(tool='cceipdetail', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) @pytest.mark.vcr() def test_analysis_vulns_cveipdetail_tool(security_center): ''' test analysis vulnerabilities cveip detail tool for success ''' vulns = security_center.analysis.vulns(tool='cveipdetail', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'cveID', str) check(vuln, 'total', str) check(vuln, 'hosts', list) host = vuln['hosts'][0] check(host, 'iplist', list) check(host, 'repositoryID', str) for ip_address in host['iplist']: check(ip_address, 'ip', str) check(ip_address, 'netbiosName', str) check(ip_address, 'dnsName', str) check(ip_address, 'uuid', str) check(ip_address, 'macAddress', str) @pytest.mark.vcr() def test_analysis_vulns_iavmipdetail_tool(security_center): ''' test analysis vulnerabilities iavmip detail tool for success ''' vulns = security_center.analysis.vulns(tool='iavmipdetail', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'iavmID', str) check(vuln, 'total', str) check(vuln, 'hosts', list) host = vuln['hosts'][0] check(host, 'iplist', list) check(host, 'repositoryID', str) for ip_address in host['iplist']: check(ip_address, 'ip', str) check(ip_address, 'netbiosName', str) check(ip_address, 'dnsName', str) check(ip_address, 'uuid', str) check(ip_address, 'macAddress', str) @pytest.mark.vcr() def test_analysis_vulns_iplist_tool(security_center): '''test to get the iplist''' vulns = getattr(security_center.analysis, 'vulns')(tool='iplist', pages=2, limit=5) assert isinstance(vulns, dict) for vuln in vulns: check(vulns, vuln, str) @pytest.mark.vcr() def test_analysis_vulns_listmailclients_tool(security_center): ''' test analysis vulnerabilities list mail clients tool for success ''' vulns = security_center.analysis.vulns(tool='listmailclients', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'count', str) check(vuln, 'detectionMethod', str) check(vuln, 'name', str) @pytest.mark.vcr() def test_analysis_vulns_listservices_tool(security_center): ''' test analysis vulnerabilities list services tool for success ''' vulns = security_center.analysis.vulns(tool='listservices', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'count', str) check(vuln, 'detectionMethod', str) check(vuln, 'name', str) @pytest.mark.vcr() def test_analysis_vulns_listos_tool(security_center): ''' test analysis vulnerabilities list os tool for success ''' vulns = security_center.analysis.vulns(tool='listos', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'count', str) check(vuln, 'detectionMethod', str) check(vuln, 'name', str) @pytest.mark.vcr() def test_analysis_vulns_listsoftware_tool(security_center): ''' test analysis vulnerabilities list software tool for success ''' vulns = security_center.analysis.vulns(tool='listsoftware', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'count', str) check(vuln, 'detectionMethod', str) check(vuln, 'name', str) @pytest.mark.vcr() def test_analysis_vulns_listsshservers_tool(security_center): ''' test analysis vulnerabilities list ssh servers tool for success ''' vulns = security_center.analysis.vulns(tool='listsshservers', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'count', str) check(vuln, 'detectionMethod', str) check(vuln, 'name', str) @pytest.mark.vcr() def test_analysis_vulns_listvuln_tool(security_center): ''' test analysis vulnerabilities list vulnerability tool for success ''' vulns = security_center.analysis.vulns(tool='listvuln', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'macAddress', str) check(vuln, 'uniqueness', str) check(vuln, 'protocol', str) check(vuln, 'severity', dict) check(vuln['severity'], 'description', str) check(vuln['severity'], 'id', str) check(vuln['severity'], 'name', str) check(vuln, 'family', dict) check(vuln['family'], 'type', str) check(vuln['family'], 'id', str) check(vuln['family'], 'name', str) check(vuln, 'pluginInfo', str) check(vuln, 'ip', str) check(vuln, 'netbiosName', str) check(vuln, 'name', str) check(vuln, 'repository', dict) check(vuln['repository'], 'description', str) check(vuln['repository'], 'dataFormat', str) check(vuln['repository'], 'id', str) check(vuln['repository'], 'name', str) check(vuln, 'pluginID', str) check(vuln, 'dnsName', str) check(vuln, 'port', str) check(vuln, 'uuid', str) @pytest.mark.vcr() def test_analysis_vulns_listwebclients_tool(security_center): ''' test analysis vulnerabilities list web clients tool for success ''' vulns = security_center.analysis.vulns(tool='listwebclients', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'count', str) check(vuln, 'detectionMethod', str) check(vuln, 'name', str) @pytest.mark.vcr() def test_analysis_vulns_listwebservers_tool(security_center): ''' test analysis vulnerabilities list web servers tool for success ''' vulns = security_center.analysis.vulns(tool='listwebservers', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'count', str) check(vuln, 'detectionMethod', str) check(vuln, 'name', str) @pytest.mark.vcr() def test_analysis_vulns_sumasset_tool(security_center): ''' test analysis vulnerabilities sum asset tool for success ''' vulns = security_center.analysis.vulns(tool='sumasset', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityMedium', str) check(vuln, 'severityLow', str) check(vuln, 'severityInfo', str) check(vuln, 'total', str) check(vuln, 'score', str) check(vuln, 'asset', dict) check(vuln['asset'], 'status', str) check(vuln['asset'], 'description', str) check(vuln['asset'], 'type', str) check(vuln['asset'], 'id', str) check(vuln['asset'], 'name', str) @pytest.mark.vcr() def test_analysis_vulns_sumcce_tool(security_center): ''' test analysis vulnerabilities sum cce tool for success ''' vulns = security_center.analysis.vulns(tool='sumcce', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) @pytest.mark.vcr() def test_analysis_vulns_sumclassa_tool(security_center): ''' test analysis vulnerabilities sum class-a tool for success ''' vulns = security_center.analysis.vulns(tool='sumclassa', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityMedium', str) check(vuln, 'severityLow', str) check(vuln, 'severityInfo', str) check(vuln, 'total', str) check(vuln, 'score', str) check(vuln, 'ip', str) check(vuln, 'repository', dict) check(vuln['repository'], 'dataFormat', str) check(vuln['repository'], 'description', str) check(vuln['repository'], 'id', str) check(vuln['repository'], 'name', str) @pytest.mark.vcr() def test_analysis_vulns_sumclassb_tool(security_center): ''' test analysis vulnerabilities sum class-b tool for success ''' vulns = security_center.analysis.vulns(tool='sumclassb', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityMedium', str) check(vuln, 'severityLow', str) check(vuln, 'severityInfo', str) check(vuln, 'total', str) check(vuln, 'score', str) check(vuln, 'ip', str) check(vuln, 'repository', dict) check(vuln['repository'], 'dataFormat', str) check(vuln['repository'], 'description', str) check(vuln['repository'], 'id', str) check(vuln['repository'], 'name', str) @pytest.mark.vcr() def test_analysis_vulns_sumclassc_tool(security_center): ''' test analysis vulnerabilities sum class-c tool for success ''' vulns = security_center.analysis.vulns(tool='sumclassc', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityMedium', str) check(vuln, 'severityLow', str) check(vuln, 'severityInfo', str) check(vuln, 'total', str) check(vuln, 'score', str) check(vuln, 'ip', str) check(vuln, 'repository', dict) check(vuln['repository'], 'dataFormat', str) check(vuln['repository'], 'description', str) check(vuln['repository'], 'id', str) check(vuln['repository'], 'name', str) @pytest.mark.vcr() def test_analysis_vulns_sumcve_tool(security_center): ''' test analysis vulnerabilities sum cve detail tool for success ''' vulns = security_center.analysis.vulns(tool='sumcve', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'cveID', str) check(vuln, 'total', str) check(vuln, 'severity', dict) check(vuln['severity'], 'description', str) check(vuln['severity'], 'id', str) check(vuln['severity'], 'name', str) check(vuln, 'hostTotal', str) @pytest.mark.vcr() def test_analysis_vulns_sumdnsname_tool(security_center): ''' test analysis vulnerabilities sum dns name tool for success ''' vulns = security_center.analysis.vulns(tool='sumdnsname', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'dnsName', str) check(vuln, 'score', str) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityMedium', str) check(vuln, 'severityLow', str) check(vuln, 'severityInfo', str) check(vuln, 'repository', dict) check(vuln['repository'], 'dataFormat', str) check(vuln['repository'], 'description', str) check(vuln['repository'], 'id', str) check(vuln['repository'], 'name', str) @pytest.mark.vcr() def test_analysis_vulns_sumfamily_tool(security_center): ''' test analysis vulnerabilities sum family tool for success ''' vulns = security_center.analysis.vulns(tool='sumfamily', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'family', dict) check(vuln['family'], 'type', str) check(vuln['family'], 'id', str) check(vuln['family'], 'name', str) check(vuln, 'score', str) check(vuln, 'total', str) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityMedium', str) check(vuln, 'severityLow', str) check(vuln, 'severityInfo', str) @pytest.mark.vcr() def test_analysis_vulns_sumiavm_tool(security_center): ''' test analysis vulnerabilities sum iavm tool for success ''' vulns = security_center.analysis.vulns(tool='sumiavm', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'iavmID', str) check(vuln, 'total', str) check(vuln, 'severity', dict) check(vuln['severity'], 'description', str) check(vuln['severity'], 'id', str) check(vuln['severity'], 'name', str) check(vuln, 'hostTotal', str) @pytest.mark.vcr() def test_analysis_vulns_sumid_tool(security_center): ''' test analysis vulnerabilities sum id tool for success ''' vulns = security_center.analysis.vulns(tool='sumid', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'severity', dict) check(vuln['severity'], 'description', str) check(vuln['severity'], 'id', str) check(vuln['severity'], 'name', str) check(vuln, 'family', dict) check(vuln['family'], 'type', str) check(vuln['family'], 'id', str) check(vuln['family'], 'name', str) check(vuln, 'hostTotal', str) check(vuln, 'pluginID', str) check(vuln, 'total', str) check(vuln, 'name', str) @pytest.mark.vcr() def test_analysis_vulns_sumip_tool(security_center): ''' test analysis vulnerabilities sum ip tool for success ''' vulns = security_center.analysis.vulns(tool='sumip', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'macAddress', str) check(vuln, 'lastAuthRun', str) check(vuln, 'ip', str) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityMedium', str) check(vuln, 'severityLow', str) check(vuln, 'severityInfo', str) check(vuln, 'total', str) check(vuln, 'mcafeeGUID', str) check(vuln, 'policyName', str) check(vuln, 'uuid', str) check(vuln, 'osCPE', str) check(vuln, 'uniqueness', str) check(vuln, 'score', str) check(vuln, 'dnsName', str) check(vuln, 'lastUnauthRun', str) check(vuln, 'biosGUID', str) check(vuln, 'tpmID', str) check(vuln, 'pluginSet', str) check(vuln, 'netbiosName', str) check(vuln, 'repository', dict) check(vuln['repository'], 'dataFormat', str) check(vuln['repository'], 'description', str) check(vuln['repository'], 'id', str) check(vuln['repository'], 'name', str) @pytest.mark.vcr() def test_analysis_vulns_summsbulletin_tool(security_center): ''' test analysis vulnerabilities sum ms bulletin tool for success ''' vulns = security_center.analysis.vulns(tool='summsbulletin', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'msbulletinID', str) check(vuln, 'total', str) check(vuln, 'severity', dict) check(vuln['severity'], 'description', str) check(vuln['severity'], 'id', str) check(vuln['severity'], 'name', str) check(vuln, 'hostTotal', str) @pytest.mark.vcr() def test_analysis_vulns_sumport_tool(security_center): ''' test analysis vulnerabilities sum port tool for success ''' vulns = security_center.analysis.vulns(tool='sumport', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'port', str) check(vuln, 'score', str) check(vuln, 'total', str) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityMedium', str) check(vuln, 'severityLow', str) check(vuln, 'severityInfo', str) @pytest.mark.vcr() def test_analysis_vulns_sumprotocol_tool(security_center): ''' test analysis vulnerabilities sum protocol tool for success ''' vulns = security_center.analysis.vulns(tool='sumprotocol', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'protocol', str) check(vuln, 'score', str) check(vuln, 'total', str) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityMedium', str) check(vuln, 'severityLow', str) check(vuln, 'severityInfo', str) @pytest.mark.vcr() def test_analysis_vulns_sumremediation_tool(security_center): ''' test analysis vulnerabilities sum remediation tool for success ''' vulns = security_center.analysis.vulns(tool='sumremediation', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'hostTotal', str) check(vuln, 'scorePctg', str) check(vuln, 'totalPctg', str) check(vuln, 'msbulletinTotal', str) check(vuln, 'remediationList', str) check(vuln, 'cpe', str) check(vuln, 'cveTotal', str) check(vuln, 'solution', str) check(vuln, 'pluginID', str) check(vuln, 'score', str) check(vuln, 'total', str) @pytest.mark.vcr() def test_analysis_vulns_sumseverity_tool(security_center): ''' test analysis vulnerabilities sum severity tool for success ''' vulns = security_center.analysis.vulns(tool='sumseverity', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'count', str) check(vuln, 'severity', dict) check(vuln['severity'], 'description', str) check(vuln['severity'], 'id', str) check(vuln['severity'], 'name', str) @pytest.mark.vcr() def test_analysis_vulns_sum_user_responsibility_tool(security_center): ''' test analysis vulnerabilities sum user responsibility tool for success ''' vulns = security_center.analysis.vulns(tool='sumuserresponsibility', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'score', str) check(vuln, 'total', str) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityMedium', str) check(vuln, 'severityLow', str) check(vuln, 'severityInfo', str) check(vuln, 'userList', list) for user in vuln['userList']: check(user, 'firstname', str) check(user, 'id', str) check(user, 'lastname', str) check(user, 'status', str) check(user, 'username', str) @pytest.mark.vcr() def test_analysis_vulns_trend_tool(security_center): ''' test analysis vulnerabilities trend tool for success ''' vulns = security_center.analysis.vulns(tool='trend', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) @pytest.mark.vcr() def test_analysis_vulns_vulndetails_tool(security_center): ''' test analysis vulnerabilities 'vulnerability details' tool for success ''' vulns = security_center.analysis.vulns(tool='vulndetails', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'acceptRisk', str) check(vuln, 'baseScore', str) check(vuln, 'bid', str) check(vuln, 'checkType', str) check(vuln, 'cpe', str) check(vuln, 'cve', str) check(vuln, 'cvssV3BaseScore', str) check(vuln, 'cvssV3TemporalScore', str) check(vuln, 'cvssV3Vector', str) check(vuln, 'cvssVector', str) check(vuln, 'description', str) check(vuln, 'dnsName', str) check(vuln, 'exploitAvailable', str) check(vuln, 'exploitEase', str) check(vuln, 'exploitFrameworks', str) check(vuln, 'family', dict) check(vuln['family'], 'type', str) check(vuln['family'], 'id', str) check(vuln['family'], 'name', str) check(vuln, 'firstSeen', str) check(vuln, 'hasBeenMitigated', str) check(vuln, 'ip', str) check(vuln, 'lastSeen', str) check(vuln, 'macAddress', str) check(vuln, 'netbiosName', str) check(vuln, 'patchPubDate', str) check(vuln, 'pluginID', str) check(vuln, 'pluginInfo', str) check(vuln, 'pluginModDate', str) check(vuln, 'pluginName', str) check(vuln, 'pluginPubDate', str) check(vuln, 'pluginText', str) check(vuln, 'port', str) check(vuln, 'protocol', str) check(vuln, 'recastRisk', str) check(vuln, 'repository', dict) check(vuln['repository'], 'dataFormat', str) check(vuln['repository'], 'description', str) check(vuln['repository'], 'id', str) check(vuln['repository'], 'name', str) check(vuln, 'riskFactor', str) check(vuln, 'seeAlso', str) check(vuln, 'severity', dict) check(vuln['severity'], 'description', str) check(vuln['severity'], 'id', str) check(vuln['severity'], 'name', str) check(vuln, 'solution', str) check(vuln, 'stigSeverity', str) check(vuln, 'synopsis', str) check(vuln, 'temporalScore', str) check(vuln, 'uniqueness', str) check(vuln, 'uuid', str) check(vuln, 'version', str) check(vuln, 'vulnPubDate', str) check(vuln, 'xref', str) @pytest.mark.vcr() def test_analysis_vulns_vulnipdetail_tool(security_center): ''' test analysis vulnerabilities 'vulnerability ip detail' tool for success ''' vulns = security_center.analysis.vulns(tool='vulnipdetail', pages=2, limit=5) for vuln in vulns: check(vuln, 'family', dict) check(vuln['family'], 'type', str) check(vuln['family'], 'id', str) check(vuln['family'], 'name', str) check(vuln, 'hosts', list) host = vuln['hosts'][0] check(host, 'iplist', list) check(host, 'repository', dict) check(host['repository'], 'dataFormat', str) check(host['repository'], 'description', str) check(host['repository'], 'id', str) check(host['repository'], 'name', str) for ip_address in host['iplist']: check(ip_address, 'ip', str) check(ip_address, 'netbiosName', str) check(ip_address, 'dnsName', str) check(ip_address, 'uuid', str) check(ip_address, 'macAddress', str) check(vuln, 'name', str) check(vuln, 'pluginDescription', str) check(vuln, 'pluginID', str) check(vuln, 'severity', dict) check(vuln['severity'], 'description', str) check(vuln['severity'], 'id', str) check(vuln['severity'], 'name', str) check(vuln, 'total', str) @pytest.mark.vcr() def test_analysis_vulns_vulnipsummary_tool(security_center): ''' test analysis vulnerabilities 'vulnerability ip summary' tool for success ''' vulns = security_center.analysis.vulns(tool='vulnipsummary', pages=2, limit=5) for vuln in vulns: check(vuln, 'family', dict) check(vuln['family'], 'type', str) check(vuln['family'], 'id', str) check(vuln['family'], 'name', str) check(vuln, 'hosts', list) host = vuln['hosts'][0] check(host, 'iplist', str) check(host, 'repository', dict) check(host['repository'], 'dataFormat', str) check(host['repository'], 'description', str) check(host['repository'], 'id', str) check(host['repository'], 'name', str) check(vuln, 'name', str) check(vuln, 'pluginDescription', str) check(vuln, 'pluginID', str) check(vuln, 'severity', dict) check(vuln['severity'], 'description', str) check(vuln['severity'], 'id', str) check(vuln['severity'], 'name', str) check(vuln, 'total', str) @pytest.mark.vcr() def test_analysis_console_logs(security_center): ''' test analysis console logs for success ''' logs = security_center.analysis.console(pages=2, limit=5) for log in logs: assert isinstance(log, dict) check(log, 'initiator', dict) check(log['initiator'], 'username', str) check(log['initiator'], 'firstname', str) check(log['initiator'], 'lastname', str) try: check(log['initiator'], 'id', int) except AssertionError: check(log['initiator'], 'id', str) check(log, 'severity', dict) check(log['severity'], 'description', str) check(log['severity'], 'id', str) check(log['severity'], 'name', str) check(log, 'rawLog', str) check(log, 'module', str) check(log, 'date', 'datetime') check(log, 'organization', dict) check(log['organization'], 'description', str) check(log['organization'], 'id', str) check(log['organization'], 'name', str) check(log, 'message', str) @pytest.mark.vcr() def test_analysis_mobile_listvuln(security_center): ''' test analysis mobile list vulnerability for success ''' vulns = security_center.analysis.mobile(tool='listvuln', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'identifier', str) check(vuln, 'pluginID', str) check(vuln, 'pluginName', str) check(vuln, 'repository', dict) check(vuln['repository'], 'description', str) check(vuln['repository'], 'id', str) check(vuln['repository'], 'name', str) check(vuln, 'severity', dict) check(vuln['severity'], 'description', str) check(vuln['severity'], 'id', str) check(vuln['severity'], 'name', str) @pytest.mark.vcr() def test_analysis_mobile_sumdeviceid(security_center): ''' test analysis mobile sum device id for success ''' vulns = security_center.analysis.mobile(tool='sumdeviceid', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'identifier', str) check(vuln, 'model', str) check(vuln, 'repository', dict) check(vuln['repository'], 'description', str) check(vuln['repository'], 'id', str) check(vuln['repository'], 'name', str) check(vuln, 'score', str) check(vuln, 'serial', str) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityInfo', str) check(vuln, 'severityLow', str) check(vuln, 'severityMedium', str) check(vuln, 'total', str) @pytest.mark.vcr() def test_analysis_mobile_summdmuser(security_center): ''' test analysis mobile sum mdm user for success ''' vulns = security_center.analysis.mobile(tool='summdmuser', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'score', str) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityInfo', str) check(vuln, 'severityLow', str) check(vuln, 'severityMedium', str) check(vuln, 'total', str) check(vuln, 'user', str) @pytest.mark.vcr() def test_analysis_mobile_summodel(security_center): ''' test analysis mobile sum model for success ''' vulns = security_center.analysis.mobile(tool='summodel', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'deviceCount', str) check(vuln, 'model', str) check(vuln, 'score', str) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityInfo', str) check(vuln, 'severityLow', str) check(vuln, 'severityMedium', str) check(vuln, 'total', str) @pytest.mark.vcr() def test_analysis_mobile_sumoscpe(security_center): ''' test analysis mobile sum oscpe for success ''' vulns = security_center.analysis.mobile(tool='sumoscpe', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'deviceCount', str) check(vuln, 'osCPE', str) check(vuln, 'score', str) check(vuln, 'severityCritical', str) check(vuln, 'severityHigh', str) check(vuln, 'severityInfo', str) check(vuln, 'severityLow', str) check(vuln, 'severityMedium', str) check(vuln, 'total', str) @pytest.mark.vcr() def test_analysis_mobile_sumpluginid(security_center): ''' test analysis mobile sum plugin id for success ''' vulns = security_center.analysis.mobile(tool='sumpluginid', pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'name', str) check(vuln, 'pluginID', str) check(vuln, 'severity', dict) check(vuln['severity'], 'description', str) check(vuln['severity'], 'id', str) check(vuln['severity'], 'name', str) check(vuln, 'total', str) @pytest.mark.vcr() def test_analysis_mobile_vulndetails(security_center): ''' test analysis mobile vulnerability details for success ''' vulns = security_center.analysis.mobile(pages=2, limit=5) for vuln in vulns: assert isinstance(vuln, dict) check(vuln, 'baseScore', str) check(vuln, 'bid', str) check(vuln, 'checkType', str) check(vuln, 'cpe', str) check(vuln, 'cve', str) check(vuln, 'cvssVector', str) check(vuln, 'description', str) check(vuln, 'deviceVersion', str) check(vuln, 'exploitAvailable', str) check(vuln, 'exploitEase', str) check(vuln, 'exploitFrameworks', str) check(vuln, 'identifier', str) check(vuln, 'lastSeen', str) check(vuln, 'mdmType', str) check(vuln, 'model', str) check(vuln, 'osCPE', str) check(vuln, 'patchPubDate', str) check(vuln, 'pluginID', str) check(vuln, 'pluginInfo', str) check(vuln, 'pluginModDate', str) check(vuln, 'pluginName', str) check(vuln, 'pluginOutput', str) check(vuln, 'pluginPubDate', str) check(vuln, 'port', str) check(vuln, 'protocol', str) check(vuln, 'repository', dict) check(vuln['repository'], 'description', str) check(vuln['repository'], 'id', str) check(vuln['repository'], 'name', str) check(vuln, 'riskFactor', str) check(vuln, 'seeAlso', str) check(vuln, 'serial', str) check(vuln, 'severity', dict) check(vuln['severity'], 'description', str) check(vuln['severity'], 'id', str) check(vuln['severity'], 'name', str) check(vuln, 'solution', str) check(vuln, 'stigSeverity', str) check(vuln, 'synopsis', str) check(vuln, 'temporalScore', str) check(vuln, 'user', str) check(vuln, 'version', str) check(vuln, 'vulnPubDate', str) check(vuln, 'xref', str) @pytest.mark.vcr() def test_analysis_events_listdata(security_center): ''' test analysis events list data for success ''' events = security_center.analysis.events(tool='listdata', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'destination ip', str) check(event, 'destination port', str) check(event, 'event', str) check(event, 'number of vulns', str) check(event, 'protocol', str) check(event, 'sensor', str) check(event, 'source ip', str) check(event, 'time', str) check(event, 'type', str) check(event, 'va/ids', str) @pytest.mark.vcr() def test_analysis_events_sumasset(security_center): ''' test analysis events sum asset for success ''' events = security_center.analysis.events(tool='sumasset', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'asset', dict) check(event['asset'], 'description', str) check(event['asset'], 'id', str) check(event['asset'], 'name', str) check(event['asset'], 'status', str) check(event['asset'], 'type', str) try: check(event, 'count', str) except AssertionError: check(event, 'count', int) @pytest.mark.vcr() def test_analysis_events_sumclassa(security_center): ''' test analysis events sum class-a for success ''' events = security_center.analysis.events(tool='sumclassa', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'class-a', str) check(event, 'count', str) @pytest.mark.vcr() def test_analysis_events_sumclassb(security_center): ''' test analysis events sum class-b for success ''' events = security_center.analysis.events(tool='sumclassb', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'class-b', str) check(event, 'count', str) @pytest.mark.vcr() def test_analysis_events_sumclassc(security_center): ''' test analysis events sum class-c for success ''' events = security_center.analysis.events(tool='sumclassc', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'class-c', str) check(event, 'count', str) @pytest.mark.vcr() def test_analysis_events_sumconns(security_center): ''' test analysis events sum conns for success ''' events = security_center.analysis.events(tool='sumconns', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'count', str) check(event, 'destination ip', str) check(event, 'source ip', str) @pytest.mark.vcr() def test_analysis_events_sumdate(security_center): ''' test analysis events sum date for success ''' events = security_center.analysis.events(tool='sumdate', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, '24-hour plot', str) check(event, 'count', str) check(event, 'date', str) check(event, 'time block start', str) check(event, 'time block stop', str) @pytest.mark.vcr() def test_analysis_events_sumdstip(security_center): ''' test analysis events sum ds tip for success ''' events = security_center.analysis.events(tool='sumdstip', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'address', str) check(event, 'count', str) check(event, 'lce', dict) check(event['lce'], 'description', str) check(event['lce'], 'id', str) check(event['lce'], 'name', str) check(event['lce'], 'status', str) @pytest.mark.vcr() def test_analysis_events_sumevent(security_center): ''' test analysis events sum event for success ''' events = security_center.analysis.events(tool='sumevent', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, '24-hour plot', str) check(event, 'count', str) check(event, 'description', str) check(event, 'event', str) check(event, 'file', str) @pytest.mark.vcr() def test_analysis_events_sumevent2(security_center): ''' test analysis events sum event for success ''' events = security_center.analysis.events(tool='sumevent2', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, '24-hour plot', str) check(event, 'count', str) check(event, 'description', str) check(event, 'event', str) check(event, 'file', str) @pytest.mark.vcr() def test_analysis_events_sumip(security_center): ''' test analysis events sum ip for success ''' events = security_center.analysis.events(tool='sumip', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'address', str) check(event, 'count', str) check(event, 'lce', dict) check(event['lce'], 'description', str) check(event['lce'], 'id', str) check(event['lce'], 'name', str) check(event['lce'], 'status', str) @pytest.mark.vcr() def test_analysis_events_sumport(security_center): ''' test analysis events sum port for success ''' events = security_center.analysis.events(tool='sumport', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'count', str) check(event, 'port', str) @pytest.mark.vcr() def test_analysis_events_sumprotocol(security_center): ''' test analysis events sum protocol for success ''' events = security_center.analysis.events(tool='sumprotocol', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'count', str) check(event, 'protocol', str) @pytest.mark.vcr() def test_analysis_events_sumsrcip(security_center): ''' test analysis events sum src ip for success ''' events = security_center.analysis.events(tool='sumsrcip', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'address', str) check(event, 'count', str) check(event, 'lce', dict) check(event['lce'], 'description', str) check(event['lce'], 'id', str) check(event['lce'], 'name', str) check(event['lce'], 'status', str) @pytest.mark.vcr() def test_analysis_events_sumtime(security_center): ''' test analysis events sum time for success ''' events = security_center.analysis.events(tool='sumtime', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'count', str) check(event, 'time block start', str) check(event, 'time block stop', str) @pytest.mark.vcr() def test_analysis_events_sumtype(security_center): ''' test analysis events sum type for success ''' events = security_center.analysis.events(tool='sumtype', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, '24-hour plot', str) check(event, 'count', str) check(event, 'type', str) @pytest.mark.vcr() def test_analysis_events_sumuser(security_center): ''' test analysis events sum user for success ''' events = security_center.analysis.events(tool='sumuser', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, '24-hour plot', str) check(event, 'count', str) check(event, 'user', str) @pytest.mark.vcr() def test_analysis_events_syslog(security_center): ''' test analysis events sys log for success ''' events = security_center.analysis.events(tool='syslog', pages=2, limit=5) for event in events: assert isinstance(event, dict) check(event, 'message', str) check(event, 'sensor', str) check(event, 'time', str) check(event, 'type', str)
33.895941
96
0.602909
5,134
45,929
5.30522
0.059018
0.12718
0.151118
0.066821
0.867717
0.845982
0.797261
0.716415
0.650329
0.591658
0
0.006228
0.255307
45,929
1,354
97
33.920975
0.790106
0.080886
0
0.743976
0
0
0.171685
0.00051
0
0
0
0
0.069277
1
0.071285
false
0
0.004016
0
0.075301
0
0
0
0
null
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
cb41297dca436bedc8a918fc251676765c15c4e1
80
py
Python
algoneer/dataschema/__init__.py
algoneer/algoneer-py
5f300543116278c91a9cf8c9ef5a1375e3f1e75d
[ "MIT" ]
10
2019-08-05T16:06:12.000Z
2020-12-19T16:40:48.000Z
algoneer/dataschema/__init__.py
algoneer/algoneer-py
5f300543116278c91a9cf8c9ef5a1375e3f1e75d
[ "MIT" ]
null
null
null
algoneer/dataschema/__init__.py
algoneer/algoneer-py
5f300543116278c91a9cf8c9ef5a1375e3f1e75d
[ "MIT" ]
1
2020-04-27T08:50:14.000Z
2020-04-27T08:50:14.000Z
from .dataschema import DataSchema from .attributeschema import AttributeSchema
26.666667
44
0.875
8
80
8.75
0.5
0
0
0
0
0
0
0
0
0
0
0
0.1
80
2
45
40
0.972222
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cb48a086fdf1115630e1de0b651d18826085adc7
172
py
Python
portal/lib/dart/portal/blueprints/api/__init__.py
plockaby/dart
43f8e471759fb05d9fdfce522ec9976e1e77ee08
[ "Artistic-2.0" ]
2
2021-06-10T19:18:12.000Z
2021-11-03T00:35:44.000Z
portal/lib/dart/portal/blueprints/api/__init__.py
plockaby/dart
43f8e471759fb05d9fdfce522ec9976e1e77ee08
[ "Artistic-2.0" ]
null
null
null
portal/lib/dart/portal/blueprints/api/__init__.py
plockaby/dart
43f8e471759fb05d9fdfce522ec9976e1e77ee08
[ "Artistic-2.0" ]
1
2021-10-05T09:52:34.000Z
2021-10-05T09:52:34.000Z
# the order of imports is important here. "views" uses "api" # so "api" must be imported before "views". from .. import api # noqa: F401 from . import views # noqa: F401
34.4
60
0.686047
27
172
4.37037
0.703704
0.169492
0
0
0
0
0
0
0
0
0
0.043796
0.203488
172
4
61
43
0.817518
0.709302
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cb600ecef9e3ff49c2a662bfdfb1d328b0ea8011
421
py
Python
ShiftType.py
Tweety-FER/hmo-project
3bbdb858a15c84d4c95ec5a4e4313b3593e20cdc
[ "MIT" ]
null
null
null
ShiftType.py
Tweety-FER/hmo-project
3bbdb858a15c84d4c95ec5a4e4313b3593e20cdc
[ "MIT" ]
null
null
null
ShiftType.py
Tweety-FER/hmo-project
3bbdb858a15c84d4c95ec5a4e4313b3593e20cdc
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- class ShiftType(object): def __init__(self, name, time, not_followed_by): self.name = name self.time = time self.not_followed_by = not_followed_by self.cover_requirements = [] def can_follow(self, shift): return self.name not in shift.not_followed_by def can_be_followed_by(self, shift): return shift.name not in self.not_followed_by
28.066667
53
0.660333
60
421
4.316667
0.366667
0.23166
0.250965
0.131274
0
0
0
0
0
0
0
0.003145
0.244656
421
14
54
30.071429
0.811321
0.049881
0
0
0
0
0
0
0
0
0
0
0
1
0.3
false
0
0
0.2
0.6
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
cbb123c63c6c8e26c7e85404bba1299868766b1a
356
py
Python
pirates/battle/DistributedPCCannonUD.py
Willy5s/Pirates-Online-Rewritten
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
[ "BSD-3-Clause" ]
81
2018-04-08T18:14:24.000Z
2022-01-11T07:22:15.000Z
pirates/battle/DistributedPCCannonUD.py
Willy5s/Pirates-Online-Rewritten
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
[ "BSD-3-Clause" ]
4
2018-09-13T20:41:22.000Z
2022-01-08T06:57:00.000Z
pirates/battle/DistributedPCCannonUD.py
Willy5s/Pirates-Online-Rewritten
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
[ "BSD-3-Clause" ]
26
2018-05-26T12:49:27.000Z
2021-09-11T09:11:59.000Z
from direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD from direct.directnotify import DirectNotifyGlobal class DistributedPCCannonUD(DistributedObjectGlobalUD): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPCCannonUD') def __init__(self, air): DistributedObjectGlobalUD.__init__(self, air)
44.5
82
0.842697
27
356
10.814815
0.555556
0.068493
0.075342
0
0
0
0
0
0
0
0
0
0.098315
356
8
83
44.5
0.909657
0
0
0
0
0
0.058824
0.058824
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.833333
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
cbb1641ae533bc40dd4163f41ca76b35032bdc12
45
py
Python
venus/db/yasdl/__init__.py
nagylzs/python-venus-lib
336d20532c32e874ab0a43cf866092b9e55dded5
[ "Apache-2.0" ]
null
null
null
venus/db/yasdl/__init__.py
nagylzs/python-venus-lib
336d20532c32e874ab0a43cf866092b9e55dded5
[ "Apache-2.0" ]
1
2019-02-15T13:40:49.000Z
2019-02-15T13:40:49.000Z
venus/db/yasdl/__init__.py
nagylzs/python-venus-lib
336d20532c32e874ab0a43cf866092b9e55dded5
[ "Apache-2.0" ]
null
null
null
"""Yet Another Schema Definition Language"""
22.5
44
0.755556
5
45
6.8
1
0
0
0
0
0
0
0
0
0
0
0
0.111111
45
1
45
45
0.85
0.844444
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
cbb4e37fae6a10f33af91b1dba0c1f69d2f9ce2a
201
py
Python
pandas/io/json/__init__.py
ivary43/pandas
46adc5b1c2aacb312d72729af72bc0ad600917c0
[ "BSD-3-Clause" ]
5
2019-07-26T15:22:41.000Z
2021-09-28T09:22:17.000Z
pandas/io/json/__init__.py
ivary43/pandas
46adc5b1c2aacb312d72729af72bc0ad600917c0
[ "BSD-3-Clause" ]
null
null
null
pandas/io/json/__init__.py
ivary43/pandas
46adc5b1c2aacb312d72729af72bc0ad600917c0
[ "BSD-3-Clause" ]
3
2019-07-26T10:47:23.000Z
2020-08-10T12:40:32.000Z
from .json import dumps, loads, read_json, to_json # noqa from .normalize import json_normalize # noqa from .table_schema import build_table_schema # noqa del json, normalize, table_schema # noqa
33.5
58
0.781095
30
201
5
0.433333
0.22
0.2
0
0
0
0
0
0
0
0
0
0.159204
201
5
59
40.2
0.887574
0.094527
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
cbdc902a58ed07deedea088707d465e3d7879c41
55
py
Python
pythonClient/eXchange/lib/PaxHeaders.76732/nhs-client.py
snowdensb/theexchange-python3-clients
f6724e00494dc1a705f8cb872425416d09b062cf
[ "MIT" ]
16
2019-06-20T23:17:53.000Z
2022-03-10T05:02:26.000Z
pythonClient/eXchange/lib/PaxHeaders.76732/nhs-client.py
snowdensb/theexchange-python3-clients
f6724e00494dc1a705f8cb872425416d09b062cf
[ "MIT" ]
null
null
null
pythonClient/eXchange/lib/PaxHeaders.76732/nhs-client.py
snowdensb/theexchange-python3-clients
f6724e00494dc1a705f8cb872425416d09b062cf
[ "MIT" ]
14
2019-05-23T18:24:21.000Z
2021-02-19T22:52:25.000Z
26 atime=1555436580.41239 27 ctime=1554400728.036541
18.333333
27
0.818182
8
55
5.625
1
0
0
0
0
0
0
0
0
0
0
0.714286
0.109091
55
2
28
27.5
0.204082
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
1dd1155c74c6505143010536604ef30e41260b84
9,705
py
Python
chembl_beaker/beaker/core_apps/rasterImages/views.py
mnowotka/chembl_beaker
1fb87990ac353b0fa06ab7186d99eae8784da13d
[ "Apache-2.0" ]
7
2015-04-02T16:54:16.000Z
2021-04-06T13:16:21.000Z
chembl_beaker/beaker/core_apps/rasterImages/views.py
mnowotka/chembl_beaker
1fb87990ac353b0fa06ab7186d99eae8784da13d
[ "Apache-2.0" ]
null
null
null
chembl_beaker/beaker/core_apps/rasterImages/views.py
mnowotka/chembl_beaker
1fb87990ac353b0fa06ab7186d99eae8784da13d
[ "Apache-2.0" ]
6
2015-03-13T17:31:33.000Z
2020-06-28T18:28:26.000Z
__author__ = 'mnowotka' from bottle import request, response import base64 from chembl_beaker.beaker.utils.io import _parseFlag from chembl_beaker.beaker import app from chembl_beaker.beaker.core_apps.rasterImages.impl import _ctab2image, _smiles2image #----------------------------------------------------------------------------------------------------------------------- def ctab2imageView(data, params): kwargs = dict() kwargs['size'] = int(params.get('size', 200)) separator = params.get('separator', '|') kwargs['legend'] = params.get('legend', '').split(separator) kwargs['sanitize'] = _parseFlag(params.get('sanitize', True)) kwargs['removeHs'] = _parseFlag(params.get('removeHs', True)) kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True)) kwargs['atomMapNumber'] = _parseFlag(params.get('atomMapNumber', False)) kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', True)) response.content_type = 'image/png' ret = _ctab2image(data, **kwargs) if request.is_ajax: ret = base64.b64encode(ret) return ret #----------------------------------------------------------------------------------------------------------------------- @app.route('/ctab2image/<ctab>', method=['OPTIONS', 'GET'], name="ctab2image") def ctab2image(ctab): """ Converts CTAB to PNG image. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation of multiple molfiles. Size is the optional size of image in pixels (default value is 200 px). Legend is optional label in the bottom of image. cURL examples: curl -X GET ${BEAKER_ROOT_URL}ctab2image/$(cat aspirin.mol | base64 -w 0 | tr "+/" "-_") > aspirin.png curl -X GET ${BEAKER_ROOT_URL}ctab2image/$(cat aspirin.mol | base64 -w 0 | tr "+/" "-_")?computeCoords=0 > aspirin.png curl -X GET ${BEAKER_ROOT_URL}ctab2image/$(cat aspirin.mol | base64 -w 0 | tr "+/" "-_")?atomMapNumber=1 > aspirin.png curl -X GET ${BEAKER_ROOT_URL}ctab2image/$(cat aspirin.mol | base64 -w 0 | tr "+/" "-_")?legend=aspirin > aspirin.png curl -X GET "${BEAKER_ROOT_URL}ctab2image/"$(cat mcs.sdf | base64 -w 0 | tr "+/" "-_")"?legend=foo|bar|bla" > out.png curl -X GET "${BEAKER_ROOT_URL}ctab2image/"$(cat mcs.sdf | base64 -w 0 | tr "+/" "-_")"?legend=foo|bar|bla&computeCoords=0" > out.png curl -X GET "${BEAKER_ROOT_URL}ctab2image/"$(cat mcs.sdf | base64 -w 0 | tr "+/" "-_")"?legend=foo|bar|bla" > out.png curl -X GET ${BEAKER_ROOT_URL}ctab2image/$(cat mcs_no_coords.sdf | base64 -w 0 | tr "+/" "-_")?legend=foo > out.png curl -X GET ${BEAKER_ROOT_URL}ctab2image/$(cat aspirin.mol | base64 -w 0 | tr "+/" "-_")?size=400 > aspirin.png """ data = base64.urlsafe_b64decode(ctab) return ctab2imageView(data, request.params) #----------------------------------------------------------------------------------------------------------------------- @app.route('/ctab2image', method=['OPTIONS', 'POST'], name="ctab2image") def ctab2image(): """ Converts CTAB to PNG image. CTAB is either single molfile or SDF file. Size is the optional size of image in pixels (default value is 200 px). Legend is optional label in the bottom of image. cURL examples: curl -X POST --data-binary @aspirin.mol ${BEAKER_ROOT_URL}ctab2image > aspirin.png curl -X POST -F "file=@aspirin.mol" -F "computeCoords=0" ${BEAKER_ROOT_URL}ctab2image > aspirin.png curl -X POST -F "file=@aspirin.mol" -F "atomMapNumber=1" ${BEAKER_ROOT_URL}ctab2image > aspirin.png curl -X POST -F "file=@aspirin.mol" -F "legend=aspirin" ${BEAKER_ROOT_URL}ctab2image > aspirin.png curl -X POST -F "file=@mcs.sdf" -F "legend=foo|bar|bla" ${BEAKER_ROOT_URL}ctab2image > out.png curl -X POST -F "file=@mcs.sdf" -F "legend=foo|bar|bla" -F "computeCoords=0" ${BEAKER_ROOT_URL}ctab2image > out.png curl -X POST -F "file=@mcs_no_coords.sdf" -F "legend=foo|bar|bla" ${BEAKER_ROOT_URL}ctab2image > out.png curl -X POST -F "file=@mcs.sdf" -F "legend=foo" ${BEAKER_ROOT_URL}ctab2image > out.png curl -X POST -F "file=@mcs.sdf" -F "legend=foo|bar|bla" -F "size=400" ${BEAKER_ROOT_URL}ctab2image > out.png curl -X POST -F "file=@aspirin.mol" -F "size=400" ${BEAKER_ROOT_URL}ctab2image > aspirin.png """ data = request.files.values()[0].file.read() if len(request.files) else request.body.read() return ctab2imageView(data, request.params) #----------------------------------------------------------------------------------------------------------------------- def smiles2imageView(data, params): kwargs = dict() kwargs['size'] = int(params.get('size', 200)) separator = params.get('separator', '|') kwargs['legend'] = params.get('legend', '').split(separator) kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', True)) kwargs['delimiter'] = params.get('delimiter', ' ') kwargs['smilesColumn'] = int(params.get('smilesColumn', 0)) kwargs['nameColumn'] = int(params.get('nameColumn', 1)) kwargs['sanitize'] = _parseFlag(params.get('sanitize', True)) kwargs['atomMapNumber'] = _parseFlag(params.get('atomMapNumber', False)) if params.get('titleLine') is None and not data.startswith('SMILES Name'): kwargs['titleLine'] = False else: kwargs['titleLine'] = _parseFlag(params.get('titleLine', True)) response.content_type = 'image/png' ret = _smiles2image(data, **kwargs) if request.is_ajax: ret = base64.b64encode(ret) return ret #----------------------------------------------------------------------------------------------------------------------- @app.route('/smiles2image/<smiles>', method=['OPTIONS', 'GET'], name="smiles2image") def smiles2image(smiles): """ Converts SMILES to PNG image. This method accepts urlsafe_base64 encoded string containing single or multiple SMILES optionally containing header line, specific to *.smi format. Size is the optional size of image in pixels (default value is 200 px). Legend is optional label in the bottom of image. cURL examples: curl -X GET ${BEAKER_ROOT_URL}smiles2image/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_") > aspirin.png curl -X GET ${BEAKER_ROOT_URL}smiles2image/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_") > aspirin.png curl -X GET ${BEAKER_ROOT_URL}smiles2image/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")?atomMapNumber=1 > aspirin.png curl -X GET ${BEAKER_ROOT_URL}smiles2image/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")?legend=aspirin > aspirin.png curl -X GET ${BEAKER_ROOT_URL}smiles2image/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")?legend=aspirin > aspirin.png curl -X GET ${BEAKER_ROOT_URL}smiles2image/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")?size=400 > aspirin.png curl -X GET ${BEAKER_ROOT_URL}smiles2image/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")?size=400 > aspirin.png curl -X GET "${BEAKER_ROOT_URL}smiles2image/"$(cat mcs.smi | base64 -w 0 | tr "+/" "-_")"?legend=foo|bar|bla" > out.png curl -X GET "${BEAKER_ROOT_URL}smiles2image/"$(cat mcs_no_header.smi | base64 -w 0 | tr "+/" "-_")"?legend=foo|bar|bla" > out.png curl -X GET ${BEAKER_ROOT_URL}smiles2image/$(cat mcs.smi | base64 -w 0 | tr "+/" "-_")?legend=foo > out.png curl -X GET ${BEAKER_ROOT_URL}smiles2image/$(cat mcs_no_header.smi | base64 -w 0 | tr "+/" "-_")?legend=foo > out.png """ data = base64.urlsafe_b64decode(smiles) return smiles2imageView(data, request.params) #----------------------------------------------------------------------------------------------------------------------- @app.route('/smiles2image', method=['OPTIONS', 'POST'], name="smiles2image") def smiles2image(): """ Converts SMILES to PNG image. This method accepts single or multiple SMILES or *.smi file. Size is the optional size of image in pixels (default value is 200 px). Legend is optional label in the bottom of image. cURL examples: curl -X POST -F "file=@aspirin_no_header.smi" -F "atomMapNumber=1" ${BEAKER_ROOT_URL}smiles2image > aspirin.png curl -X POST --data-binary @aspirin_with_header.smi ${BEAKER_ROOT_URL}smiles2image > aspirin.png curl -X POST --data-binary @aspirin_no_header.smi ${BEAKER_ROOT_URL}smiles2image > aspirin.png curl -X POST -F "file=@aspirin_with_header.smi" -F "legend=aspirin" ${BEAKER_ROOT_URL}smiles2image > aspirin.png curl -X POST -F "file=@aspirin_no_header.smi" -F "legend=aspirin" ${BEAKER_ROOT_URL}smiles2image > aspirin.png curl -X POST -F "file=@aspirin_with_header.smi" -F "size=400" ${BEAKER_ROOT_URL}smiles2image > aspirin.png curl -X POST -F "file=@aspirin_no_header.smi" -F "size=400" ${BEAKER_ROOT_URL}smiles2image > aspirin.png curl -X POST -F "file=@mcs.smi" -F "legend=foo|bar|bla" ${BEAKER_ROOT_URL}smiles2image > out.png curl -X POST -F "file=@mcs_no_header.smi" -F "legend=foo|bar|bla" ${BEAKER_ROOT_URL}smiles2image > out.png curl -X POST -F "file=@mcs.smi" -F "legend=foo" ${BEAKER_ROOT_URL}smiles2image > out.png curl -X POST -F "file=@mcs_no_header.smi" -F "legend=foo" ${BEAKER_ROOT_URL}smiles2image > out.png curl -X POST -F "file=@mcs.smi" -F "legend=foo|bar|bla" -F "size=400" ${BEAKER_ROOT_URL}smiles2image > out.png curl -X POST -F "file=@mcs_no_header.smi" -F "legend=foo|bar|bla" -F "size=400" ${BEAKER_ROOT_URL}smiles2image > out.png """ data = request.files.values()[0].file.read() if len(request.files) else request.body.read() return smiles2imageView(data, request.params) #-----------------------------------------------------------------------------------------------------------------------
61.815287
137
0.632148
1,289
9,705
4.621412
0.100078
0.036092
0.093839
0.100722
0.823401
0.788484
0.763472
0.715964
0.665436
0.662246
0
0.024364
0.137249
9,705
156
138
62.211538
0.687089
0.700979
0
0.491228
0
0
0.182515
0.007663
0
0
0
0
0
1
0.105263
false
0
0.087719
0
0.298246
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
380bd9e66df3a2de371fa55a3984fcd483ab1935
153
py
Python
biobb_analysis/gromacs/__init__.py
bioexcel/biobb_analysis
794683daf65eb13ddaaaf6cf3c19da6d1322a949
[ "Apache-2.0" ]
3
2019-05-18T14:52:30.000Z
2020-10-18T06:20:00.000Z
biobb_analysis/gromacs/__init__.py
bioexcel/biobb_analysis
794683daf65eb13ddaaaf6cf3c19da6d1322a949
[ "Apache-2.0" ]
7
2019-03-04T15:04:28.000Z
2021-06-17T10:57:25.000Z
biobb_analysis/gromacs/__init__.py
bioexcel/biobb_analysis
794683daf65eb13ddaaaf6cf3c19da6d1322a949
[ "Apache-2.0" ]
null
null
null
name = "gromacs" __all__ = ["gmx_cluster", "gmx_energy", "gmx_image", "gmx_rgyr", "gmx_rms", "gmx_trjconv_str", "gmx_trjconv_str_ens", "gmx_trjconv_trj"]
76.5
136
0.732026
23
153
4.173913
0.565217
0.3125
0.270833
0
0
0
0
0
0
0
0
0
0.078431
153
2
136
76.5
0.680851
0
0
0
0
0
0.655844
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
383916616349f54fecf47956a9daae97b92aac7f
65
py
Python
spacetimeformer/lstnet_model/__init__.py
Piki1989/spacetimeformer
7e0caf17dd03e5d25e2766c4f7132805779bcc40
[ "MIT" ]
209
2021-09-28T13:59:56.000Z
2022-03-31T23:29:43.000Z
spacetimeformer/lstnet_model/__init__.py
Piki1989/spacetimeformer
7e0caf17dd03e5d25e2766c4f7132805779bcc40
[ "MIT" ]
30
2021-09-30T07:53:38.000Z
2022-03-22T01:13:42.000Z
spacetimeformer/lstnet_model/__init__.py
Piki1989/spacetimeformer
7e0caf17dd03e5d25e2766c4f7132805779bcc40
[ "MIT" ]
49
2021-10-29T22:47:20.000Z
2022-03-30T15:24:56.000Z
from .lstnet_model import LSTNet_Forecaster from . import LSTNet
21.666667
43
0.846154
9
65
5.888889
0.555556
0.45283
0
0
0
0
0
0
0
0
0
0
0.123077
65
2
44
32.5
0.929825
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
69dcb63799fd9ea64d3217d580c398312cb0ebda
72
py
Python
wpa_project/payment/src/__init__.py
s-amundson/wpa_2p1
43deb859123e5ef2eab3652e403c8d2f53d43b77
[ "MIT" ]
1
2022-01-03T02:46:34.000Z
2022-01-03T02:46:34.000Z
wpa_project/payment/src/__init__.py
s-amundson/wpa_2p1
43deb859123e5ef2eab3652e403c8d2f53d43b77
[ "MIT" ]
31
2021-12-29T17:43:06.000Z
2022-03-25T01:03:17.000Z
wpa_project/payment/src/__init__.py
s-amundson/wpa_2p1
43deb859123e5ef2eab3652e403c8d2f53d43b77
[ "MIT" ]
null
null
null
from .email import EmailMessage from .square_helper import SquareHelper
24
39
0.861111
9
72
6.777778
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.111111
72
2
40
36
0.953125
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
69e37a13e58c8d0a5c35299d8543852750acf121
66
py
Python
{{cookiecutter.module_name}}/__init__.py
banjtheman/cookiecutter-zephyr-module
2bcd0b6b90d536e801858772bc1e21386f08949f
[ "Apache-2.0" ]
null
null
null
{{cookiecutter.module_name}}/__init__.py
banjtheman/cookiecutter-zephyr-module
2bcd0b6b90d536e801858772bc1e21386f08949f
[ "Apache-2.0" ]
null
null
null
{{cookiecutter.module_name}}/__init__.py
banjtheman/cookiecutter-zephyr-module
2bcd0b6b90d536e801858772bc1e21386f08949f
[ "Apache-2.0" ]
null
null
null
from .controller import format_input, procces_data, format_output
33
65
0.863636
9
66
6
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.090909
66
1
66
66
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0e09477cf89314b94fcf27fa272d1f9ed83920ea
23,490
py
Python
tests/test_packages/test_protocols/test_fipa.py
lrahmani/agents-aea
9bd1d51530fc21bf41b5adea031cda19a94b048b
[ "Apache-2.0" ]
null
null
null
tests/test_packages/test_protocols/test_fipa.py
lrahmani/agents-aea
9bd1d51530fc21bf41b5adea031cda19a94b048b
[ "Apache-2.0" ]
null
null
null
tests/test_packages/test_protocols/test_fipa.py
lrahmani/agents-aea
9bd1d51530fc21bf41b5adea031cda19a94b048b
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This module contains the tests for the FIPA protocol.""" import logging from typing import Tuple, cast from unittest import mock import pytest from aea.helpers.search.models import Constraint, ConstraintType, Description, Query from aea.mail.base import Envelope from packages.fetchai.protocols.fipa.dialogues import FipaDialogue, FipaDialogues from packages.fetchai.protocols.fipa.message import FipaMessage from packages.fetchai.protocols.fipa.serialization import FipaSerializer logger = logging.getLogger(__name__) def test_fipa_cfp_serialization(): """Test that the serialization for the 'fipa' protocol works.""" query = Query([Constraint("something", ConstraintType(">", 1))]) msg = FipaMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=FipaMessage.Performative.CFP, query=query, ) msg_bytes = FipaSerializer().encode(msg) envelope = Envelope( to="receiver", sender="sender", protocol_id=FipaMessage.protocol_id, message=msg_bytes, ) envelope_bytes = envelope.encode() actual_envelope = Envelope.decode(envelope_bytes) expected_envelope = envelope assert expected_envelope == actual_envelope actual_msg = FipaSerializer().decode(actual_envelope.message) expected_msg = msg assert expected_msg == actual_msg def test_fipa_cfp_serialization_bytes(): """Test that the serialization - deserialization for the 'fipa' protocol works.""" query = Query([Constraint("something", ConstraintType(">", 1))]) msg = FipaMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=FipaMessage.Performative.CFP, query=query, ) msg.counterparty = "sender" msg_bytes = FipaSerializer().encode(msg) envelope = Envelope( to="receiver", sender="sender", protocol_id=FipaMessage.protocol_id, message=msg_bytes, ) envelope_bytes = envelope.encode() actual_envelope = Envelope.decode(envelope_bytes) expected_envelope = envelope assert expected_envelope == actual_envelope actual_msg = FipaSerializer().decode(actual_envelope.message) actual_msg.counterparty = "sender" expected_msg = msg assert expected_msg == actual_msg deserialised_msg = FipaSerializer().decode(envelope.message) deserialised_msg.counterparty = "sender" assert msg.get("performative") == deserialised_msg.get("performative") def test_fipa_propose_serialization(): """Test that the serialization for the 'fipa' protocol works.""" proposal = Description({"foo1": 1, "bar1": 2}) msg = FipaMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=FipaMessage.Performative.PROPOSE, proposal=proposal, ) msg_bytes = FipaSerializer().encode(msg) envelope = Envelope( to="receiver", sender="sender", protocol_id=FipaMessage.protocol_id, message=msg_bytes, ) envelope_bytes = envelope.encode() actual_envelope = Envelope.decode(envelope_bytes) expected_envelope = envelope assert expected_envelope == actual_envelope actual_msg = FipaSerializer().decode(actual_envelope.message) expected_msg = msg p1 = actual_msg.get("proposal") p2 = expected_msg.get("proposal") assert p1.values == p2.values def test_fipa_accept_serialization(): """Test that the serialization for the 'fipa' protocol works.""" msg = FipaMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=FipaMessage.Performative.ACCEPT, ) msg.counterparty = "sender" msg_bytes = FipaSerializer().encode(msg) envelope = Envelope( to="receiver", sender="sender", protocol_id=FipaMessage.protocol_id, message=msg_bytes, ) envelope_bytes = envelope.encode() actual_envelope = Envelope.decode(envelope_bytes) expected_envelope = envelope assert expected_envelope == actual_envelope actual_msg = FipaSerializer().decode(actual_envelope.message) actual_msg.counterparty = "sender" expected_msg = msg assert expected_msg == actual_msg def test_performative_match_accept(): """Test the serialization - deserialization of the match_accept performative.""" msg = FipaMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=FipaMessage.Performative.MATCH_ACCEPT, ) msg_bytes = FipaSerializer().encode(msg) envelope = Envelope( to="receiver", sender="sender", protocol_id=FipaMessage.protocol_id, message=msg_bytes, ) msg.counterparty = "sender" envelope_bytes = envelope.encode() actual_envelope = Envelope.decode(envelope_bytes) expected_envelope = envelope assert expected_envelope == actual_envelope deserialised_msg = FipaSerializer().decode(envelope.message) assert msg.get("performative") == deserialised_msg.get("performative") def test_performative_accept_with_inform(): """Test the serialization - deserialization of the accept_with_address performative.""" msg = FipaMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=FipaMessage.Performative.ACCEPT_W_INFORM, info={"address": "dummy_address"}, ) msg_bytes = FipaSerializer().encode(msg) envelope = Envelope( to="receiver", sender="sender", protocol_id=FipaMessage.protocol_id, message=msg_bytes, ) envelope_bytes = envelope.encode() actual_envelope = Envelope.decode(envelope_bytes) expected_envelope = envelope assert expected_envelope == actual_envelope deserialised_msg = FipaSerializer().decode(envelope.message) assert msg.get("performative") == deserialised_msg.get("performative") def test_performative_match_accept_with_inform(): """Test the serialization - deserialization of the match_accept_with_address performative.""" msg = FipaMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=FipaMessage.Performative.MATCH_ACCEPT_W_INFORM, info={"address": "dummy_address", "signature": "my_signature"}, ) msg_bytes = FipaSerializer().encode(msg) envelope = Envelope( to="receiver", sender="sender", protocol_id=FipaMessage.protocol_id, message=msg_bytes, ) envelope_bytes = envelope.encode() actual_envelope = Envelope.decode(envelope_bytes) expected_envelope = envelope assert expected_envelope == actual_envelope deserialised_msg = FipaSerializer().decode(envelope.message) assert msg.get("performative") == deserialised_msg.get("performative") def test_performative_inform(): """Test the serialization-deserialization of the inform performative.""" msg = FipaMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=FipaMessage.Performative.INFORM, info={"foo": "bar"}, ) msg_bytes = FipaSerializer().encode(msg) envelope = Envelope( to="receiver", sender="sender", protocol_id=FipaMessage.protocol_id, message=msg_bytes, ) envelope_bytes = envelope.encode() actual_envelope = Envelope.decode(envelope_bytes) expected_envelope = envelope assert expected_envelope == actual_envelope deserialised_msg = FipaSerializer().decode(envelope.message) assert msg.get("performative") == deserialised_msg.get("performative") # def test_unknown_performative(): # """Test that we raise an exception when the performative is unknown during check_consistency.""" # msg = FipaMessage( # message_id=1, # dialogue_reference=(str(0), ""), # target=0, # performative=FipaMessage.Performative.ACCEPT, # ) # with mock.patch.object(FipaMessage.Performative, "__eq__", return_value=False): # assert not msg._is_consistent() def test_performative_string_value(): """Test the string value of the performatives.""" assert str(FipaMessage.Performative.CFP) == "cfp", "The str value must be cfp" assert ( str(FipaMessage.Performative.PROPOSE) == "propose" ), "The str value must be propose" assert ( str(FipaMessage.Performative.DECLINE) == "decline" ), "The str value must be decline" assert ( str(FipaMessage.Performative.ACCEPT) == "accept" ), "The str value must be accept" assert ( str(FipaMessage.Performative.MATCH_ACCEPT) == "match_accept" ), "The str value must be match_accept" assert ( str(FipaMessage.Performative.ACCEPT_W_INFORM) == "accept_w_inform" ), "The str value must be accept_w_inform" assert ( str(FipaMessage.Performative.MATCH_ACCEPT_W_INFORM) == "match_accept_w_inform" ), "The str value must be match_accept_w_inform" assert ( str(FipaMessage.Performative.INFORM) == "inform" ), "The str value must be inform" def test_fipa_encoding_unknown_performative(): """Test that we raise an exception when the performative is unknown during encoding.""" msg = FipaMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=FipaMessage.Performative.ACCEPT, ) with pytest.raises(ValueError, match="Performative not valid:"): with mock.patch.object(FipaMessage.Performative, "__eq__", return_value=False): FipaSerializer().encode(msg) def test_fipa_decoding_unknown_performative(): """Test that we raise an exception when the performative is unknown during decoding.""" msg = FipaMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=FipaMessage.Performative.ACCEPT, ) encoded_msg = FipaSerializer().encode(msg) with pytest.raises(ValueError, match="Performative not valid:"): with mock.patch.object(FipaMessage.Performative, "__eq__", return_value=False): FipaSerializer().decode(encoded_msg) class Test_dialogues: """Tests dialogues model from the packages protocols fipa.""" @classmethod def setup_class(cls): """Set up the test.""" cls.client_dialogues = FipaDialogues() cls.seller_dialogues = FipaDialogues() cls.client_addr = "client" cls.seller_addr = "seller" def test_create_self_initiated(self): """Test the self initialisation of a dialogue.""" result = self.client_dialogues.create_self_initiated( dialogue_starter_addr=self.client_addr, dialogue_opponent_addr=self.seller_addr, is_seller=True, ) assert isinstance(result, FipaDialogue) assert result.role == FipaDialogue.AgentRole.SELLER, "The role must be seller." def test_create_opponent_initiated(self): """Test the opponent initialisation of a dialogue.""" result = self.client_dialogues.create_opponent_initiated( dialogue_opponent_addr=self.seller_addr, dialogue_reference=(str(0), ""), is_seller=False, ) assert isinstance(result, FipaDialogue) assert result.role == FipaDialogue.AgentRole.BUYER def test_dialogue_endstates(self): """Test the end states of a dialogue.""" assert self.client_dialogues.dialogue_stats is not None self.client_dialogues.dialogue_stats.add_dialogue_endstate( FipaDialogue.EndState.SUCCESSFUL, is_self_initiated=True ) self.client_dialogues.dialogue_stats.add_dialogue_endstate( FipaDialogue.EndState.DECLINED_CFP, is_self_initiated=False ) assert self.client_dialogues.dialogue_stats.self_initiated == { FipaDialogue.EndState.SUCCESSFUL: 1, FipaDialogue.EndState.DECLINED_PROPOSE: 0, FipaDialogue.EndState.DECLINED_ACCEPT: 0, FipaDialogue.EndState.DECLINED_CFP: 0, } assert self.client_dialogues.dialogue_stats.other_initiated == { FipaDialogue.EndState.SUCCESSFUL: 0, FipaDialogue.EndState.DECLINED_PROPOSE: 0, FipaDialogue.EndState.DECLINED_ACCEPT: 0, FipaDialogue.EndState.DECLINED_CFP: 1, } assert self.client_dialogues.dialogues_as_seller is not None def test_dialogues_self_initiated_no_seller(self): """Test an end to end scenario of client-seller dialogue.""" # Initialise a dialogue client_dialogue = self.client_dialogues.create_self_initiated( dialogue_opponent_addr=self.seller_addr, dialogue_starter_addr=self.client_addr, is_seller=False, ) # Register the dialogue to the dictionary of dialogues. self.client_dialogues.dialogues[client_dialogue.dialogue_label] = cast( FipaDialogue, client_dialogue ) # Send a message to the seller. cfp_msg = FipaMessage( message_id=1, dialogue_reference=client_dialogue.dialogue_label.dialogue_reference, target=0, performative=FipaMessage.Performative.CFP, query=Query([Constraint("something", ConstraintType(">", 1))]), ) cfp_msg.counterparty = self.client_addr # Checking that I cannot retrieve the dialogue. retrieved_dialogue = self.client_dialogues.is_belonging_to_registered_dialogue( cfp_msg, "client" ) assert not retrieved_dialogue, "Should not belong to registered dialogue" # Checking the value error when we are trying to retrieve an un-existing dialogue. with pytest.raises(ValueError, match="Should have found dialogue."): self.client_dialogues.get_dialogue(cfp_msg, self.client_addr) # Extends the outgoing list of messages. client_dialogue.outgoing_extend(cfp_msg) # Creates a new dialogue for the seller side based on the income message. seller_dialogue = self.seller_dialogues.create_opponent_initiated( dialogue_opponent_addr=cfp_msg.counterparty, dialogue_reference=cfp_msg.dialogue_reference, is_seller=True, ) # Register the dialogue to the dictionary of dialogues. self.seller_dialogues.dialogues[seller_dialogue.dialogue_label] = cast( FipaDialogue, seller_dialogue ) # Extend the incoming list of messages. seller_dialogue.incoming_extend(cfp_msg) # Check that both fields in the dialogue_reference are set. last_msg = seller_dialogue.last_incoming_message assert last_msg == cfp_msg, "The messages must be equal" dialogue_reference = cast( Tuple[str, str], last_msg.body.get("dialogue_reference") ) assert ( dialogue_reference[0] != "" and dialogue_reference[1] == "" ), "The dialogue_reference is not set correctly." # Checks if the message we received is permitted for a new dialogue or if it is a registered dialogue. assert self.seller_dialogues.is_permitted_for_new_dialogue( seller_dialogue.last_incoming_message ), "Should be permitted since the first incoming msg is CFP" # Generate a proposal message to send to the client. proposal = Description({"foo1": 1, "bar1": 2}) message_id = cfp_msg.message_id + 1 target = cfp_msg.message_id proposal_msg = FipaMessage( message_id=message_id, dialogue_reference=seller_dialogue.dialogue_label.dialogue_reference, target=target, performative=FipaMessage.Performative.PROPOSE, proposal=proposal, ) proposal_msg.counterparty = self.seller_addr # Extends the outgoing list of messages. seller_dialogue.outgoing_extend(proposal_msg) # Client received the message and we extend the incoming messages list. client_dialogue.incoming_extend(proposal_msg) # Check that both fields in the dialogue_reference are set. last_msg = client_dialogue.last_incoming_message assert last_msg == proposal_msg, "The two messages must be equal." dialogue_reference = cast( Tuple[str, str], last_msg.body.get("dialogue_reference") ) assert ( dialogue_reference[0] != "" and dialogue_reference[1] != "" ), "The dialogue_reference is not setup properly." assert not self.client_dialogues.is_permitted_for_new_dialogue( client_dialogue.last_incoming_message ), "Should not be permitted since we registered the cfp message." response = self.client_dialogues.is_belonging_to_registered_dialogue( proposal_msg, agent_addr=self.client_addr ) assert response, "We expect the response from the function to be true." # Retrieve the dialogue based on the received message. retrieved_dialogue = self.client_dialogues.get_dialogue( proposal_msg, self.client_addr ) assert retrieved_dialogue.dialogue_label is not None # Create an accept_w_inform message to send seller. message_id = proposal_msg.message_id + 1 target = proposal_msg.message_id accept_msg = FipaMessage( message_id=message_id, dialogue_reference=client_dialogue.dialogue_label.dialogue_reference, target=target, performative=FipaMessage.Performative.ACCEPT_W_INFORM, info={"address": "dummy_address"}, ) accept_msg.counterparty = self.client_addr # Adds the message to the client outgoing list. client_dialogue.outgoing_extend(accept_msg) # Adds the message to the seller incoming message list. seller_dialogue.incoming_extend(accept_msg) # Check if this message is registered to a dialogue. response = self.seller_dialogues.is_belonging_to_registered_dialogue( accept_msg, agent_addr=self.seller_addr ) assert response, "We expect the response from the function to be true." retrieved_dialogue = self.seller_dialogues.get_dialogue( accept_msg, self.seller_addr ) assert retrieved_dialogue.dialogue_label in self.seller_dialogues.dialogues def test_dialogues_self_initiated_is_seller(self): """Test an end to end scenario of seller-client dialogue.""" # Initialise a dialogue seller_dialogue = self.seller_dialogues.create_self_initiated( dialogue_opponent_addr=self.client_addr, dialogue_starter_addr=self.seller_addr, is_seller=True, ) # Register the dialogue to the dictionary of dialogues. self.seller_dialogues.dialogues[seller_dialogue.dialogue_label] = cast( FipaDialogue, seller_dialogue ) # Send a message to the client. cfp_msg = FipaMessage( message_id=1, dialogue_reference=seller_dialogue.dialogue_label.dialogue_reference, target=0, performative=FipaMessage.Performative.CFP, query=Query([Constraint("something", ConstraintType(">", 1))]), ) cfp_msg.counterparty = self.seller_addr seller_dialogue.outgoing_extend(cfp_msg) # Creates a new dialogue for the client side based on the income message. client_dialogue = self.client_dialogues.create_opponent_initiated( dialogue_opponent_addr=cfp_msg.counterparty, dialogue_reference=cfp_msg.dialogue_reference, is_seller=False, ) # Register the dialogue to the dictionary of dialogues. self.client_dialogues.dialogues[client_dialogue.dialogue_label] = cast( FipaDialogue, client_dialogue ) # Extend the incoming list of messages. client_dialogue.incoming_extend(cfp_msg) # Checks if the message we received is permitted for a new dialogue or if it is a registered dialogue. assert self.client_dialogues.is_permitted_for_new_dialogue( client_dialogue.last_incoming_message ), "Should be permitted since the first incoming msg is CFP" # Generate a proposal message to send to the seller. proposal = Description({"foo1": 1, "bar1": 2}) message_id = cfp_msg.message_id + 1 target = cfp_msg.message_id proposal_msg = FipaMessage( message_id=message_id, dialogue_reference=client_dialogue.dialogue_label.dialogue_reference, target=target, performative=FipaMessage.Performative.PROPOSE, proposal=proposal, ) proposal_msg.counterparty = self.client_addr # Extends the outgoing list of messages. client_dialogue.outgoing_extend(proposal_msg) # Seller received the message and we extend the incoming messages list. seller_dialogue.incoming_extend(proposal_msg) assert not self.seller_dialogues.is_permitted_for_new_dialogue( seller_dialogue.last_incoming_message ), "Should not be permitted since we registered the cfp message." response = self.seller_dialogues.is_belonging_to_registered_dialogue( proposal_msg, agent_addr=self.seller_addr ) assert response, "We expect the response from the function to be true." # Test the self_initiated_dialogue explicitly message_id = proposal_msg.message_id + 1 target = proposal_msg.message_id accept_msg = FipaMessage( message_id=message_id, dialogue_reference=seller_dialogue.dialogue_label.dialogue_reference, target=target, performative=FipaMessage.Performative.ACCEPT_W_INFORM, info={"address": "dummy_address"}, ) accept_msg.counterparty = self.client_addr # Adds the message to the client outgoing list. seller_dialogue.outgoing_extend(accept_msg) # Adds the message to the seller incoming message list. client_dialogue.incoming_extend(accept_msg) # Check if this message is registered to a dialogue. response = self.seller_dialogues.is_belonging_to_registered_dialogue( accept_msg, agent_addr=self.seller_addr ) assert not response, "We expect the response from the function to be true."
38.071313
110
0.678203
2,616
23,490
5.860856
0.100917
0.044352
0.022306
0.025502
0.823898
0.779546
0.73252
0.702779
0.678646
0.643882
0
0.004824
0.232226
23,490
616
111
38.133117
0.845301
0.175053
0
0.616253
0
0
0.081786
0.002185
0
0
0
0
0.108352
1
0.038375
false
0
0.020316
0
0.060948
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
3871a0332ee18e0942fb3afa689fd001108d2a74
229
py
Python
prettyqt/qthelp/helpsearchquerywidget.py
phil65/PrettyQt
26327670c46caa039c9bd15cb17a35ef5ad72e6c
[ "MIT" ]
7
2019-05-01T01:34:36.000Z
2022-03-08T02:24:14.000Z
prettyqt/qthelp/helpsearchquerywidget.py
phil65/PrettyQt
26327670c46caa039c9bd15cb17a35ef5ad72e6c
[ "MIT" ]
141
2019-04-16T11:22:01.000Z
2021-04-14T15:12:36.000Z
prettyqt/qthelp/helpsearchquerywidget.py
phil65/PrettyQt
26327670c46caa039c9bd15cb17a35ef5ad72e6c
[ "MIT" ]
5
2019-04-17T11:48:19.000Z
2021-11-21T10:30:19.000Z
from __future__ import annotations from prettyqt import widgets from prettyqt.qt import QtHelp QtHelp.QHelpSearchQueryWidget.__bases__ = (widgets.Widget,) class HelpSearchQueryWidget(QtHelp.QHelpSearchQueryWidget): pass
19.083333
59
0.829694
23
229
7.913043
0.608696
0.131868
0
0
0
0
0
0
0
0
0
0
0.117904
229
11
60
20.818182
0.90099
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.166667
0.5
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
5
38adc48e9a56ff3f5e5e7f97174bc12c9a24bac6
4,531
py
Python
tests/build_models/test_build_case.py
bjhall/loqusdb
55ee806662848eeffd266bf65d4b4eb24e534a89
[ "MIT" ]
4
2018-06-04T12:42:45.000Z
2021-03-29T20:36:12.000Z
tests/build_models/test_build_case.py
bjhall/loqusdb
55ee806662848eeffd266bf65d4b4eb24e534a89
[ "MIT" ]
50
2016-02-26T07:54:39.000Z
2021-10-12T07:52:01.000Z
tests/build_models/test_build_case.py
bjhall/loqusdb
55ee806662848eeffd266bf65d4b4eb24e534a89
[ "MIT" ]
8
2016-02-29T13:50:46.000Z
2020-04-22T10:15:23.000Z
from pprint import pprint as pp import pytest from loqusdb.build_models.case import (build_case, get_individual_positions) from loqusdb.exceptions import CaseError def test_get_individual_positions(): ## GIVEN a list with ids inds = ['1','2','3'] ## WHEN getting the individual positions ind_pos = get_individual_positions(inds) ## THEN assert they where given the correct position assert ind_pos['1'] == 0 assert ind_pos['2'] == 1 assert ind_pos['3'] == 2 def test_get_individual_positions_no_inds(): ## GIVEN a list with ids inds = None ## WHEN getting the individual positions ind_pos = get_individual_positions(inds) ## THEN assert an empty dict is returned assert ind_pos == {} def test_build_case_no_ped(): ## GIVEN some vcf individuals vcf_individuals = ['mother', 'proband'] case_id = 'test' ## WHEN building a case object case_obj = build_case( case = None, vcf_individuals=vcf_individuals, case_id=case_id, ) ## THEN assert that the case got the right ID assert case_obj['case_id'] == case_id for ind_obj in case_obj['individuals']: assert ind_obj['name'] in vcf_individuals assert ind_obj['ind_id'] in vcf_individuals def test_build_case_no_ped_no_case_id(): ## GIVEN some vcf individuals vcf_individuals = ['mother', 'proband'] ## WHEN building a case object ## THEN assert a CaseError is raised with pytest.raises(CaseError): case_obj = build_case( case = None, vcf_individuals=vcf_individuals, ) def test_build_case_ped(family_obj, vcf_path): ## GIVEN a ped parser family_obj vcf_inds = [ind_id for ind_id in family_obj.individuals] nr_variants = 10 ## WHEN building a case object case_obj = build_case( case = family_obj, vcf_individuals=vcf_inds, vcf_path = vcf_path, nr_variants = nr_variants, ) ## THEN assert that the case has the correct id assert case_obj['case_id'] == family_obj.family_id for ind_obj in case_obj['individuals']: assert ind_obj['ind_id'] in vcf_inds ## THEN assert that the vcf_path was added assert case_obj['vcf_path'] == vcf_path ## THEN assert that the nr variants is correct assert case_obj['nr_variants'] == nr_variants def test_build_case_ped_sv(family_obj, sv_vcf_path): ## GIVEN a ped parser family_obj vcf_inds = [ind_id for ind_id in family_obj.individuals] nr_sv_variants = 10 ## WHEN building a case object case_obj = build_case( case = family_obj, sv_individuals=vcf_inds, vcf_sv_path = sv_vcf_path, nr_sv_variants = nr_sv_variants, ) ## THEN assert that the case has the correct id assert case_obj['case_id'] == family_obj.family_id case_obj['individuals'] == [] for ind_obj in case_obj['sv_individuals']: assert ind_obj['ind_id'] in vcf_inds ## THEN assert that the vcf_path was added assert case_obj['vcf_path'] == None assert case_obj['vcf_sv_path'] == sv_vcf_path ## THEN assert that the nr variants is correct assert case_obj['nr_variants'] == None assert case_obj['nr_sv_variants'] == nr_sv_variants def test_build_case_ped_sv_and_snv(family_obj, sv_vcf_path, vcf_path): ## GIVEN a ped parser family_obj vcf_inds = [ind_id for ind_id in family_obj.individuals] sv_vcf_inds = [ind_id for ind_id in family_obj.individuals] nr_sv_variants = 10 nr_variants = 20 ## WHEN building a case object case_obj = build_case( case = family_obj, sv_individuals=vcf_inds, vcf_sv_path = sv_vcf_path, nr_sv_variants = nr_sv_variants, vcf_individuals=vcf_inds, vcf_path = vcf_path, nr_variants = nr_variants, ) ## THEN assert that the case has the correct id assert case_obj['case_id'] == family_obj.family_id for ind_obj in case_obj['individuals']: assert ind_obj['ind_id'] in vcf_inds for ind_obj in case_obj['sv_individuals']: assert ind_obj['ind_id'] in sv_vcf_inds ## THEN assert that the vcf_path was added assert case_obj['vcf_path'] == vcf_path assert case_obj['vcf_sv_path'] == sv_vcf_path ## THEN assert that the nr variants is correct assert case_obj['nr_variants'] == nr_variants assert case_obj['nr_sv_variants'] == nr_sv_variants
31.034247
76
0.670051
671
4,531
4.196721
0.113264
0.062145
0.064631
0.060369
0.850497
0.802912
0.756037
0.741122
0.693892
0.667614
0
0.004996
0.248952
4,531
145
77
31.248276
0.82251
0.211432
0
0.597701
0
0
0.078738
0
0
0
0
0
0.275862
1
0.08046
false
0
0.045977
0
0.126437
0.011494
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
2a432d0bad3f78f15421155e2c998efb082b863c
2,603
py
Python
stubs.min/System/Windows/Navigation.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
1
2017-07-25T14:30:18.000Z
2017-07-25T14:30:18.000Z
stubs.min/System/Windows/Navigation.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
stubs.min/System/Windows/Navigation.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
# encoding: utf-8 # module System.Windows.Navigation calls itself Navigation # from PresentationFramework,Version=4.0.0.0,Culture=neutral,PublicKeyToken=31bf3856ad364e35,PresentationCore,Version=4.0.0.0,Culture=neutral,PublicKeyToken=31bf3856ad364e35 # by generator 1.145 # no doc # no imports # no functions # classes from Navigation_parts.BaseUriHelper import BaseUriHelper from Navigation_parts.CustomContentState import CustomContentState from Navigation_parts.FragmentNavigationEventArgs import FragmentNavigationEventArgs from Navigation_parts.FragmentNavigationEventHandler import FragmentNavigationEventHandler from Navigation_parts.IProvideCustomContentState import IProvideCustomContentState from Navigation_parts.JournalEntry import JournalEntry from Navigation_parts.JournalEntryListConverter import JournalEntryListConverter from Navigation_parts.JournalEntryPosition import JournalEntryPosition from Navigation_parts.JournalEntryUnifiedViewConverter import JournalEntryUnifiedViewConverter from Navigation_parts.JournalOwnership import JournalOwnership from Navigation_parts.LoadCompletedEventHandler import LoadCompletedEventHandler from Navigation_parts.NavigatedEventHandler import NavigatedEventHandler from Navigation_parts.NavigatingCancelEventArgs import NavigatingCancelEventArgs from Navigation_parts.NavigatingCancelEventHandler import NavigatingCancelEventHandler from Navigation_parts.NavigationEventArgs import NavigationEventArgs from Navigation_parts.NavigationFailedEventArgs import NavigationFailedEventArgs from Navigation_parts.NavigationFailedEventHandler import NavigationFailedEventHandler from Navigation_parts.NavigationMode import NavigationMode from Navigation_parts.NavigationProgressEventArgs import NavigationProgressEventArgs from Navigation_parts.NavigationProgressEventHandler import NavigationProgressEventHandler from Navigation_parts.NavigationService import NavigationService from Navigation_parts.NavigationStoppedEventHandler import NavigationStoppedEventHandler from Navigation_parts.NavigationUIVisibility import NavigationUIVisibility from Navigation_parts.NavigationWindow import NavigationWindow from Navigation_parts.PageFunctionBase import PageFunctionBase from Navigation_parts.PageFunction import PageFunction from Navigation_parts.RequestNavigateEventArgs import RequestNavigateEventArgs from Navigation_parts.RequestNavigateEventHandler import RequestNavigateEventHandler from Navigation_parts.ReturnEventArgs import ReturnEventArgs from Navigation_parts.ReturnEventHandler import ReturnEventHandler
65.075
174
0.904341
222
2,603
10.468468
0.27027
0.180723
0.245267
0.008606
0.047332
0.047332
0.047332
0.047332
0.047332
0
0
0.014487
0.07184
2,603
39
175
66.74359
0.947434
0.11602
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
1
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2a5eb217c55f741b353b6f8dcf9505bd263e267f
135
py
Python
scripts/calc.py
mvanleeu/csvmonkey
7f817204801cb4d6f78388d89d7d3ff9a8b0be10
[ "BSD-3-Clause" ]
98
2017-05-10T15:46:06.000Z
2022-01-10T02:00:05.000Z
scripts/calc.py
mvanleeu/csvmonkey
7f817204801cb4d6f78388d89d7d3ff9a8b0be10
[ "BSD-3-Clause" ]
8
2018-03-18T22:49:56.000Z
2022-03-27T17:14:56.000Z
scripts/calc.py
mvanleeu/csvmonkey
7f817204801cb4d6f78388d89d7d3ff9a8b0be10
[ "BSD-3-Clause" ]
15
2017-08-04T08:17:00.000Z
2022-03-25T21:05:49.000Z
#!/usr/bin/env python import os.path import sys print (os.path.getsize('ram.csv') / 1048576.0) / (float(sys.argv[1]) / 1e6), 'GiB/s'
19.285714
84
0.651852
24
135
3.666667
0.833333
0.136364
0
0
0
0
0
0
0
0
0
0.09322
0.125926
135
6
85
22.5
0.652542
0.148148
0
0
0
0
0.105263
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2a98e59a09c5adc8bfc50de62bdda4d48e369d51
266
py
Python
{{cookiecutter.repo_name}}/mypack/test_module2.py
payalupadhyaya/myproject
9dacbed1bf5b1524d66491a1b65e8b3b2ecd56a4
[ "Apache-2.0" ]
null
null
null
{{cookiecutter.repo_name}}/mypack/test_module2.py
payalupadhyaya/myproject
9dacbed1bf5b1524d66491a1b65e8b3b2ecd56a4
[ "Apache-2.0" ]
null
null
null
{{cookiecutter.repo_name}}/mypack/test_module2.py
payalupadhyaya/myproject
9dacbed1bf5b1524d66491a1b65e8b3b2ecd56a4
[ "Apache-2.0" ]
null
null
null
from __future__ import unicode_literals, absolute_import from __future__ import print_function, division from mypack.packmodule import module3 def cust_print(): print("Hi Hello") def test_mypackage(): cust_print() module3() print("Test ended")
16.625
56
0.74812
33
266
5.606061
0.575758
0.108108
0.172973
0
0
0
0
0
0
0
0
0.009091
0.172932
266
15
57
17.733333
0.831818
0
0
0
0
0
0.068702
0
0
0
0
0
0
1
0.222222
true
0
0.333333
0
0.555556
0.555556
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
1
0
5
aa534f08de3366f1db918905836d7866ad00eeab
194
py
Python
mail_ticket/models.py
weijia/mail-ticket
cdbc2b863ce9eeed4527e42c347582cf9873f005
[ "BSD-3-Clause" ]
null
null
null
mail_ticket/models.py
weijia/mail-ticket
cdbc2b863ce9eeed4527e42c347582cf9873f005
[ "BSD-3-Clause" ]
null
null
null
mail_ticket/models.py
weijia/mail-ticket
cdbc2b863ce9eeed4527e42c347582cf9873f005
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # Create your models here. from djangoautoconf.model_duplicator import get_duplicated_model from post_office.models import Email c = get_duplicated_model(Email, "Email")
32.333333
64
0.78866
27
194
5.444444
0.666667
0.176871
0.244898
0
0
0
0
0
0
0
0
0.005814
0.113402
194
6
65
32.333333
0.848837
0.237113
0
0
0
0
0.034247
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
2ab4a55ef6439735e4067bf56ef68c7b8aa07c86
25
py
Python
test/test_minigrad.py
MANU-CHAUHAN/minigrad
a71cdf505974b9d2fb5e1730dcc483ec42e92c17
[ "MIT" ]
null
null
null
test/test_minigrad.py
MANU-CHAUHAN/minigrad
a71cdf505974b9d2fb5e1730dcc483ec42e92c17
[ "MIT" ]
null
null
null
test/test_minigrad.py
MANU-CHAUHAN/minigrad
a71cdf505974b9d2fb5e1730dcc483ec42e92c17
[ "MIT" ]
null
null
null
# unit tests for minigrad
25
25
0.8
4
25
5
1
0
0
0
0
0
0
0
0
0
0
0
0.16
25
1
25
25
0.952381
0.92
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
631ecb40500c7beca84aab139087fda326136100
17
py
Python
hello_world.py
stelioskarydakis/profiles-rest-api
564d972c7681e4ee48e2ca61d4a41afb466aaee3
[ "MIT" ]
null
null
null
hello_world.py
stelioskarydakis/profiles-rest-api
564d972c7681e4ee48e2ca61d4a41afb466aaee3
[ "MIT" ]
null
null
null
hello_world.py
stelioskarydakis/profiles-rest-api
564d972c7681e4ee48e2ca61d4a41afb466aaee3
[ "MIT" ]
null
null
null
print('hellooo')
8.5
16
0.705882
2
17
6
1
0
0
0
0
0
0
0
0
0
0
0
0.058824
17
1
17
17
0.75
0
0
0
0
0
0.411765
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
63251b28373ae2e15b52c7942c2bc80a824e5732
206
py
Python
award_app/admin.py
Maureen-1998DEV/Awards_app
9cfa481fd3303f64fca0c2132b49e040dfc5c67b
[ "MIT" ]
null
null
null
award_app/admin.py
Maureen-1998DEV/Awards_app
9cfa481fd3303f64fca0c2132b49e040dfc5c67b
[ "MIT" ]
null
null
null
award_app/admin.py
Maureen-1998DEV/Awards_app
9cfa481fd3303f64fca0c2132b49e040dfc5c67b
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Comments, Profile,Project,Comments # Register your models here. admin.site.register(Profile) admin.site.register(Project) admin.site.register(Comments)
25.75
54
0.820388
28
206
6.035714
0.464286
0.159763
0.301775
0
0
0
0
0
0
0
0
0
0.087379
206
7
55
29.428571
0.898936
0.126214
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
2d908f3134c9743a2f1246e03d5d57efa087e3f4
21
py
Python
__init__.py
KaiserMovet/where_is_my_money2
0d88571824e7be1500bf800f86af2ced04033891
[ "MIT" ]
1
2020-06-21T21:09:36.000Z
2020-06-21T21:09:36.000Z
__init__.py
KaiserMovet/where_is_my_money2
0d88571824e7be1500bf800f86af2ced04033891
[ "MIT" ]
null
null
null
__init__.py
KaiserMovet/where_is_my_money2
0d88571824e7be1500bf800f86af2ced04033891
[ "MIT" ]
null
null
null
from .wim import Wim
10.5
20
0.761905
4
21
4
0.75
0
0
0
0
0
0
0
0
0
0
0
0.190476
21
1
21
21
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
2da4ffe2fedd99b5d456ff668f5bceba34896856
33
py
Python
testing/example_scripts/config/collect_pytest_prefix/conftest.py
markshao/pytest
611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64
[ "MIT" ]
9,225
2015-06-15T21:56:14.000Z
2022-03-31T20:47:38.000Z
testing/example_scripts/config/collect_pytest_prefix/conftest.py
markshao/pytest
611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64
[ "MIT" ]
7,794
2015-06-15T21:06:34.000Z
2022-03-31T10:56:54.000Z
testing/example_scripts/config/collect_pytest_prefix/conftest.py
markshao/pytest
611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64
[ "MIT" ]
2,598
2015-06-15T21:42:39.000Z
2022-03-29T13:48:22.000Z
class pytest_something: pass
11
23
0.757576
4
33
6
1
0
0
0
0
0
0
0
0
0
0
0
0.212121
33
2
24
16.5
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
2dcb60963dde05fa33ea2b8732ce253cd77fe052
64
py
Python
pisat/tester/comm/__init__.py
jjj999/pisat
be96cfcd4a68ece14cb5f8745ab138e3931eb761
[ "MIT" ]
1
2020-09-23T08:23:47.000Z
2020-09-23T08:23:47.000Z
pisat/tester/comm/__init__.py
jjj999/pisat
be96cfcd4a68ece14cb5f8745ab138e3931eb761
[ "MIT" ]
4
2020-07-01T06:31:05.000Z
2021-05-04T05:53:01.000Z
pisat/tester/comm/__init__.py
jjj999/pisat
be96cfcd4a68ece14cb5f8745ab138e3931eb761
[ "MIT" ]
2
2020-07-01T06:08:00.000Z
2020-09-20T00:52:43.000Z
from pisat.tester.comm.test_transceiver import TestTransceiver
21.333333
62
0.875
8
64
6.875
1
0
0
0
0
0
0
0
0
0
0
0
0.078125
64
2
63
32
0.932203
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
93135db49a4e749d4da86ffca76244b0e318c228
112
py
Python
daftlistings/__init__.py
iandioch/daftlistings
729d0f460296f0a7d11d5776c97190bb9eee7e3d
[ "MIT" ]
null
null
null
daftlistings/__init__.py
iandioch/daftlistings
729d0f460296f0a7d11d5776c97190bb9eee7e3d
[ "MIT" ]
null
null
null
daftlistings/__init__.py
iandioch/daftlistings
729d0f460296f0a7d11d5776c97190bb9eee7e3d
[ "MIT" ]
null
null
null
from .daft import * from .enums import * from .exceptions import DaftException from .map_visualization import *
22.4
37
0.794643
14
112
6.285714
0.571429
0.227273
0
0
0
0
0
0
0
0
0
0
0.142857
112
4
38
28
0.916667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
9363f0378588e14c1ccb14d4fd76b5ef0446e5bb
212
py
Python
pyrolite/plot/templates/__init__.py
JustinGOSSES/pyrolite
21eb5b28d9295625241b73b820fc8892b00fc6b0
[ "BSD-3-Clause" ]
1
2020-03-13T07:11:47.000Z
2020-03-13T07:11:47.000Z
pyrolite/plot/templates/__init__.py
JustinGOSSES/pyrolite
21eb5b28d9295625241b73b820fc8892b00fc6b0
[ "BSD-3-Clause" ]
null
null
null
pyrolite/plot/templates/__init__.py
JustinGOSSES/pyrolite
21eb5b28d9295625241b73b820fc8892b00fc6b0
[ "BSD-3-Clause" ]
null
null
null
""" A utility submodule for standardised plot templates to be added to matplotlib axes. """ from .pearce import pearceThNbYb, pearceTiNbYb from .TAS import TAS __all__ = ["pearceThNbYb", "pearceTiNbYb", "TAS"]
23.555556
83
0.754717
26
212
6
0.730769
0.307692
0
0
0
0
0
0
0
0
0
0
0.146226
212
8
84
26.5
0.861878
0.391509
0
0
0
0
0.223141
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
936ae7e86840fb374ade6cc47209016085f42a11
158
py
Python
136:A - Vanya and Fence/script.py
treeindev/CodeForces
b3bcc332e0330a6588f021ff766737a996577147
[ "MIT" ]
null
null
null
136:A - Vanya and Fence/script.py
treeindev/CodeForces
b3bcc332e0330a6588f021ff766737a996577147
[ "MIT" ]
null
null
null
136:A - Vanya and Fence/script.py
treeindev/CodeForces
b3bcc332e0330a6588f021ff766737a996577147
[ "MIT" ]
null
null
null
from functools import reduce c,p = map(int, raw_input().split()), map(int, raw_input().split()) print( reduce( lambda s,i : s+1 if i <= c[1] else s+2, p, 0) )
52.666667
66
0.639241
32
158
3.09375
0.625
0.121212
0.181818
0.282828
0.383838
0
0
0
0
0
0
0.029851
0.151899
158
3
67
52.666667
0.708955
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0.333333
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
fa7d0da81234c6d8a96fa1520340e8c178db3c9c
85
py
Python
plugins/typingrole/__init__.py
Antonio32A/AntiInvisible
efba35a88115771fcef252bf0e07bdddaa2e3c76
[ "MIT" ]
null
null
null
plugins/typingrole/__init__.py
Antonio32A/AntiInvisible
efba35a88115771fcef252bf0e07bdddaa2e3c76
[ "MIT" ]
null
null
null
plugins/typingrole/__init__.py
Antonio32A/AntiInvisible
efba35a88115771fcef252bf0e07bdddaa2e3c76
[ "MIT" ]
2
2019-04-28T18:15:48.000Z
2022-03-14T16:27:23.000Z
from .typingrole import TypingRole def setup(bot): bot.add_cog(TypingRole(bot))
17
34
0.752941
12
85
5.25
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.141176
85
4
35
21.25
0.863014
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
5
faa5cb2f19ee229eff0c1a7b28f758a6fdb6e357
108
py
Python
tests/ast/input/test-if.py
Nakrez/RePy
057db55a99eac2c5cb3d622fa1f2e29f6083d8d6
[ "MIT" ]
1
2020-11-24T05:24:26.000Z
2020-11-24T05:24:26.000Z
tests/bind/good/test-if.py
Nakrez/RePy
057db55a99eac2c5cb3d622fa1f2e29f6083d8d6
[ "MIT" ]
null
null
null
tests/bind/good/test-if.py
Nakrez/RePy
057db55a99eac2c5cb3d622fa1f2e29f6083d8d6
[ "MIT" ]
null
null
null
if 1: 1 elif 2: 2 elif 3: 3 else: if 4: 4 elif 5: 5 else: 6
7.714286
11
0.324074
18
108
1.944444
0.5
0
0
0
0
0
0
0
0
0
0
0.261905
0.611111
108
13
12
8.307692
0.571429
0
0
0.153846
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
fabf81b444c4b2893efde9cede75e56dc229a17b
75
py
Python
AULA 08 - TRABALHANDO COM MODULOS/Aula08a.py
Jumbeba/CursoEmVideoPython
0d1b4e7c1deae5f962d8310ed56dcbe42ada1dca
[ "Unlicense" ]
null
null
null
AULA 08 - TRABALHANDO COM MODULOS/Aula08a.py
Jumbeba/CursoEmVideoPython
0d1b4e7c1deae5f962d8310ed56dcbe42ada1dca
[ "Unlicense" ]
null
null
null
AULA 08 - TRABALHANDO COM MODULOS/Aula08a.py
Jumbeba/CursoEmVideoPython
0d1b4e7c1deae5f962d8310ed56dcbe42ada1dca
[ "Unlicense" ]
null
null
null
import emoji print(emoji.emojize("Olá mundo :rainbow:", use_aliases=True))
25
61
0.773333
11
75
5.181818
0.909091
0
0
0
0
0
0
0
0
0
0
0
0.08
75
2
62
37.5
0.826087
0
0
0
0
0
0.253333
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
878d9300187924817c7c873bae16a856bf753eaa
137
py
Python
Week 3/Python Track/Mutuation.py
Dawit-Getachew/A2SV_Practice
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
[ "MIT" ]
null
null
null
Week 3/Python Track/Mutuation.py
Dawit-Getachew/A2SV_Practice
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
[ "MIT" ]
null
null
null
Week 3/Python Track/Mutuation.py
Dawit-Getachew/A2SV_Practice
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
[ "MIT" ]
null
null
null
def mutate_string(string, position, character): newString = string[:position] + character + string [position+1:] return newString
45.666667
68
0.737226
15
137
6.666667
0.533333
0.42
0.46
0
0
0
0
0
0
0
0
0.008621
0.153285
137
3
69
45.666667
0.853448
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
5
87d2303534a812e589315f3fc041e1981c41e75a
179
py
Python
smartystreets_python_sdk/us_zipcode/__init__.py
jasonrfarkas/smartystreets-python-sdk
bcb94efc09c795222eb1bd85544073a6cc063a46
[ "Apache-2.0" ]
19
2017-01-20T16:34:19.000Z
2021-12-09T15:56:09.000Z
smartystreets_python_sdk/us_zipcode/__init__.py
jasonrfarkas/smartystreets-python-sdk
bcb94efc09c795222eb1bd85544073a6cc063a46
[ "Apache-2.0" ]
25
2016-12-11T01:20:19.000Z
2022-03-24T19:59:25.000Z
smartystreets_python_sdk/us_zipcode/__init__.py
jasonrfarkas/smartystreets-python-sdk
bcb94efc09c795222eb1bd85544073a6cc063a46
[ "Apache-2.0" ]
28
2016-12-31T17:06:07.000Z
2022-02-17T00:09:02.000Z
from .city import City from .zipcode import ZipCode from .result import Result from .lookup import Lookup from .client import Client from .alternate_county import AlternateCounty
25.571429
45
0.832402
25
179
5.92
0.4
0
0
0
0
0
0
0
0
0
0
0
0.134078
179
6
46
29.833333
0.954839
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
87e75ad2319ee0961f50919f16a10c803b84254f
124
py
Python
HMM/__init__.py
xiaozhouw/663
88ddd49441ba5903bc1e730af60bb8e2a653d6f8
[ "MIT" ]
null
null
null
HMM/__init__.py
xiaozhouw/663
88ddd49441ba5903bc1e730af60bb8e2a653d6f8
[ "MIT" ]
null
null
null
HMM/__init__.py
xiaozhouw/663
88ddd49441ba5903bc1e730af60bb8e2a653d6f8
[ "MIT" ]
null
null
null
__all__=["hmm","hmm_unoptimized"] from .hmm import forward,backward,Viterbi,Baum_Welch,sim_HMM from . import hmm_unoptimized
41.333333
60
0.822581
18
124
5.222222
0.611111
0.297872
0
0
0
0
0
0
0
0
0
0
0.064516
124
3
61
41.333333
0.810345
0
0
0
0
0
0.144
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
356ab9fe47a56d558735ef5921d6f7e65a43825f
42
py
Python
lithops/serverless/__init__.py
kpavel/lithops
395eef8b283512bd714d3633dcd94258e1df620c
[ "Apache-2.0" ]
158
2020-09-16T13:22:03.000Z
2022-03-28T20:01:31.000Z
lithops/serverless/__init__.py
kpavel/lithops
395eef8b283512bd714d3633dcd94258e1df620c
[ "Apache-2.0" ]
256
2018-05-20T13:01:51.000Z
2020-09-16T09:09:54.000Z
lithops/serverless/__init__.py
kpavel/lithops
395eef8b283512bd714d3633dcd94258e1df620c
[ "Apache-2.0" ]
48
2020-09-19T15:29:53.000Z
2022-03-23T17:08:24.000Z
from .serverless import ServerlessHandler
21
41
0.880952
4
42
9.25
1
0
0
0
0
0
0
0
0
0
0
0
0.095238
42
1
42
42
0.973684
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
357c63254831f51ab0ffb669e729eb48112b38aa
100
py
Python
shpaml/__init__.py
cool-RR/shpaml
9ec0183ecd8ad73e054bacadf4852b8ff71f0227
[ "BSD-3-Clause" ]
null
null
null
shpaml/__init__.py
cool-RR/shpaml
9ec0183ecd8ad73e054bacadf4852b8ff71f0227
[ "BSD-3-Clause" ]
null
null
null
shpaml/__init__.py
cool-RR/shpaml
9ec0183ecd8ad73e054bacadf4852b8ff71f0227
[ "BSD-3-Clause" ]
null
null
null
from loader import Loader from loaders import shpaml_loaders as _loaders locals().update(_loaders)
20
46
0.83
14
100
5.714286
0.571429
0
0
0
0
0
0
0
0
0
0
0
0.12
100
4
47
25
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
ea4b91370d08f5b956c14ae4aa34687e6550c7a9
138
py
Python
suzieq/engines/pandas/topcpu.py
foobug/suzieq
c5927616a0e1a1fd9283f2a3eeb120d24ff0f2b5
[ "Apache-2.0" ]
487
2020-04-29T13:34:34.000Z
2022-03-31T06:13:41.000Z
suzieq/engines/pandas/topcpu.py
foobug/suzieq
c5927616a0e1a1fd9283f2a3eeb120d24ff0f2b5
[ "Apache-2.0" ]
410
2020-04-24T20:57:52.000Z
2022-03-31T18:07:48.000Z
suzieq/engines/pandas/topcpu.py
foobug/suzieq
c5927616a0e1a1fd9283f2a3eeb120d24ff0f2b5
[ "Apache-2.0" ]
75
2020-04-29T22:13:34.000Z
2022-03-31T17:00:17.000Z
from .engineobj import SqPandasEngine class TopcpuObj(SqPandasEngine): @staticmethod def table_name(): return 'topcpu'
15.333333
37
0.710145
13
138
7.461538
0.923077
0
0
0
0
0
0
0
0
0
0
0
0.217391
138
8
38
17.25
0.898148
0
0
0
0
0
0.043478
0
0
0
0
0
0
1
0.2
true
0
0.2
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
1
1
0
0
5
ea7b8dfc915e8b4b8a258e969f0fe7cdd1f51151
360
py
Python
metafarmerx/cli/console.py
0xsamsar/MetaFarmerX
62b5171c5f63355c69b7a3089c0ca7a56e8f7d9f
[ "MIT" ]
4
2022-02-13T01:30:34.000Z
2022-03-01T21:41:53.000Z
metafarmerx/cli/console.py
0xsamsar/deltra-neutral-yield-farming
62b5171c5f63355c69b7a3089c0ca7a56e8f7d9f
[ "MIT" ]
null
null
null
metafarmerx/cli/console.py
0xsamsar/deltra-neutral-yield-farming
62b5171c5f63355c69b7a3089c0ca7a56e8f7d9f
[ "MIT" ]
null
null
null
INIT_MSG = (''' __ ___ __ ______ _ __ / |/ /__ / /_____ _/ ____/___ __________ ___ ___ ____| |/ / / /|_/ / _ \/ __/ __ `/ /_ / __ `/ ___/ __ `__ \/ _ \/ ___/ / / / / / __/ /_/ /_/ / __/ / /_/ / / / / / / / / __/ / / | /_/ /_/\___/\__/\__,_/_/ \__,_/_/ /_/ /_/ /_/\___/_/ /_/|_| ''')
51.428571
82
0.311111
2
360
3.5
1
0
0
0
0
0
0
0
0
0
0
0
0.416667
360
7
83
51.428571
0.033333
0
0
0
0
0.5
0.944598
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
ea7c327257f57c5ffc44b755df23c00ba97f1fe7
2,098
py
Python
tbx/core/migrations/0121_remove_unused_standardpage_fields.py
arush15june/wagtail-torchbox
c4d06e096c72bd8007975dc016133024f9d27fab
[ "MIT" ]
null
null
null
tbx/core/migrations/0121_remove_unused_standardpage_fields.py
arush15june/wagtail-torchbox
c4d06e096c72bd8007975dc016133024f9d27fab
[ "MIT" ]
null
null
null
tbx/core/migrations/0121_remove_unused_standardpage_fields.py
arush15june/wagtail-torchbox
c4d06e096c72bd8007975dc016133024f9d27fab
[ "MIT" ]
null
null
null
# Generated by Django 2.1.5 on 2019-01-24 14:35 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('torchbox', '0120_remove_contactpage'), ] operations = [ migrations.RemoveField( model_name='standardpageclient', name='image', ), migrations.RemoveField( model_name='standardpageclient', name='link_document', ), migrations.RemoveField( model_name='standardpageclient', name='link_page', ), migrations.RemoveField( model_name='standardpageclient', name='page', ), migrations.RemoveField( model_name='standardpagecontentblock', name='page', ), migrations.RemoveField( model_name='standardpagerelatedlink', name='link_document', ), migrations.RemoveField( model_name='standardpagerelatedlink', name='link_page', ), migrations.RemoveField( model_name='standardpagerelatedlink', name='page', ), migrations.RemoveField( model_name='standardpage', name='credit', ), migrations.RemoveField( model_name='standardpage', name='email', ), migrations.RemoveField( model_name='standardpage', name='feed_image', ), migrations.RemoveField( model_name='standardpage', name='heading', ), migrations.RemoveField( model_name='standardpage', name='main_image', ), migrations.RemoveField( model_name='standardpage', name='quote', ), migrations.DeleteModel( name='StandardPageClient', ), migrations.DeleteModel( name='StandardPageContentBlock', ), migrations.DeleteModel( name='StandardPageRelatedLink', ), ]
26.556962
50
0.533365
143
2,098
7.671329
0.293706
0.268004
0.331814
0.382862
0.689152
0.684594
0.411121
0
0
0
0
0.014222
0.363203
2,098
78
51
26.897436
0.806886
0.021449
0
0.75
1
0
0.213067
0.079473
0
0
0
0
0
1
0
false
0
0.013889
0
0.055556
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
ea9a9b38894ef23dd0990670ea0a6247e15c9225
120
py
Python
testguess/templates/testguess/anonymous_user.py
kezabelle/django-testguess
0d33fd3291157f6454e74e47944767de5dc8a91c
[ "BSD-2-Clause" ]
null
null
null
testguess/templates/testguess/anonymous_user.py
kezabelle/django-testguess
0d33fd3291157f6454e74e47944767de5dc8a91c
[ "BSD-2-Clause" ]
null
null
null
testguess/templates/testguess/anonymous_user.py
kezabelle/django-testguess
0d33fd3291157f6454e74e47944767de5dc8a91c
[ "BSD-2-Clause" ]
null
null
null
from django.contrib.auth.models import AnonymousUser self.user = AnonymousUser() self.auth = {}
30
60
0.625
12
120
6.25
0.75
0.453333
0
0
0
0
0
0
0
0
0
0
0.283333
120
3
61
40
0.872093
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0.333333
null
null
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
5
5776ce016b839cc57604005351cf424418340de7
74
py
Python
tests/test_training.py
lucmos/spectral-unions
39991fcbfef36b8e6a2b3578a3be36e9e7c3bbc5
[ "MIT" ]
67
2022-02-06T14:45:44.000Z
2022-03-28T22:09:49.000Z
tests/test_training.py
lucmos/spectral-unions
39991fcbfef36b8e6a2b3578a3be36e9e7c3bbc5
[ "MIT" ]
1
2022-03-17T03:09:20.000Z
2022-03-21T23:12:18.000Z
tests/test_training.py
lucmos/spectral-unions
39991fcbfef36b8e6a2b3578a3be36e9e7c3bbc5
[ "MIT" ]
2
2022-03-01T13:12:16.000Z
2022-03-13T14:18:11.000Z
def test_train_loop(run_trainings: str) -> None: assert run_trainings
24.666667
48
0.77027
11
74
4.818182
0.818182
0.45283
0
0
0
0
0
0
0
0
0
0
0.148649
74
2
49
37
0.84127
0
0
0
0
0
0
0
0
0
0
0
0.5
1
0.5
false
0
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
0
0
0
0
0
0
0
5
57f47c52c0fff3074ee2ee0a4eb71cfee4723012
9,786
py
Python
symmetricDistance/resources/manuscript/stl/stlLexer.py
CIDARLAB/stl_metrics
1ec645b3b688356a8286d037b8c7447db043a3b6
[ "BSD-3-Clause" ]
null
null
null
symmetricDistance/resources/manuscript/stl/stlLexer.py
CIDARLAB/stl_metrics
1ec645b3b688356a8286d037b8c7447db043a3b6
[ "BSD-3-Clause" ]
null
null
null
symmetricDistance/resources/manuscript/stl/stlLexer.py
CIDARLAB/stl_metrics
1ec645b3b688356a8286d037b8c7447db043a3b6
[ "BSD-3-Clause" ]
null
null
null
# Generated from /home/cristi/Dropbox/work/workspace_linux_precision5520/python-stl/stl/stl.g4 by ANTLR 4.7.1 # encoding: utf-8 from __future__ import print_function from antlr4 import * from io import StringIO import sys ''' Copyright (C) 2015-2018 Cristian Ioan Vasile <cvasile@mit.edu>, Prashant Vaidyanathan <prash@bu.edu>, Curtis Madsen <ckmadsen@bu.edu> Hybrid and Networked Systems (HyNeSs) Group, BU Robotics Lab, Boston University Cross Disciplinary Integration for Design Automation Research (CIDAR Lab), Boston University See license.txt file for license information. ''' def serializedATN(): with StringIO() as buf: buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2") buf.write(u"$\u00d3\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4") buf.write(u"\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r") buf.write(u"\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22") buf.write(u"\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4") buf.write(u"\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35") buf.write(u"\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\3") buf.write(u"\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3") buf.write(u"\b\3\t\3\t\3\n\3\n\3\n\3\13\3\13\3\13\3\f\3\f\3\f\3\r") buf.write(u"\3\r\3\r\3\16\3\16\3\17\3\17\3\17\3\20\3\20\3\21\3\21") buf.write(u"\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\23\3") buf.write(u"\23\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25") buf.write(u"\3\25\3\25\3\26\3\26\3\26\3\26\3\26\3\27\3\27\3\30\3") buf.write(u"\30\3\31\3\31\3\32\3\32\3\33\3\33\3\34\3\34\3\34\3\35") buf.write(u"\3\35\3\36\3\36\3\36\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3") buf.write(u" \3 \5 \u00a6\n \3!\5!\u00a9\n!\3!\7!\u00ac\n!\f!\16") buf.write(u"!\u00af\13!\3\"\5\"\u00b2\n\"\3\"\7\"\u00b5\n\"\f\"\16") buf.write(u"\"\u00b8\13\"\3\"\5\"\u00bb\n\"\3\"\6\"\u00be\n\"\r\"") buf.write(u"\16\"\u00bf\3\"\3\"\3\"\5\"\u00c5\n\"\3\"\7\"\u00c8\n") buf.write(u"\"\f\"\16\"\u00cb\13\"\3#\6#\u00ce\n#\r#\16#\u00cf\3") buf.write(u"#\3#\2\2$\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25") buf.write(u"\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26") buf.write(u"+\27-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C") buf.write(u"#E$\3\2\6\4\2C\\c|\6\2\62;C\\aac|\3\2\62;\5\2\13\f\17") buf.write(u"\17\"\"\2\u00dc\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2") buf.write(u"\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21") buf.write(u"\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31") buf.write(u"\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3") buf.write(u"\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2") buf.write(u"+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2") buf.write(u"\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2") buf.write(u"\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2") buf.write(u"\2\3G\3\2\2\2\5I\3\2\2\2\7K\3\2\2\2\tM\3\2\2\2\13O\3") buf.write(u"\2\2\2\rQ\3\2\2\2\17S\3\2\2\2\21U\3\2\2\2\23W\3\2\2\2") buf.write(u"\25Z\3\2\2\2\27]\3\2\2\2\31`\3\2\2\2\33c\3\2\2\2\35e") buf.write(u"\3\2\2\2\37h\3\2\2\2!j\3\2\2\2#p\3\2\2\2%u\3\2\2\2\'") buf.write(u"y\3\2\2\2)~\3\2\2\2+\u0083\3\2\2\2-\u0088\3\2\2\2/\u008a") buf.write(u"\3\2\2\2\61\u008c\3\2\2\2\63\u008e\3\2\2\2\65\u0090\3") buf.write(u"\2\2\2\67\u0092\3\2\2\29\u0095\3\2\2\2;\u0097\3\2\2\2") buf.write(u"=\u009a\3\2\2\2?\u00a5\3\2\2\2A\u00a8\3\2\2\2C\u00b1") buf.write(u"\3\2\2\2E\u00cd\3\2\2\2GH\7*\2\2H\4\3\2\2\2IJ\7+\2\2") buf.write(u"J\6\3\2\2\2KL\7#\2\2L\b\3\2\2\2MN\7H\2\2N\n\3\2\2\2O") buf.write(u"P\7]\2\2P\f\3\2\2\2QR\7.\2\2R\16\3\2\2\2ST\7_\2\2T\20") buf.write(u"\3\2\2\2UV\7I\2\2V\22\3\2\2\2WX\7?\2\2XY\7@\2\2Y\24\3") buf.write(u"\2\2\2Z[\7(\2\2[\\\7(\2\2\\\26\3\2\2\2]^\7~\2\2^_\7~") buf.write(u"\2\2_\30\3\2\2\2`a\7@\2\2ab\7@\2\2b\32\3\2\2\2cd\7W\2") buf.write(u"\2d\34\3\2\2\2ef\7/\2\2fg\7*\2\2g\36\3\2\2\2hi\7`\2\2") buf.write(u"i \3\2\2\2jk\7u\2\2kl\7s\2\2lm\7t\2\2mn\7v\2\2no\7*\2") buf.write(u"\2o\"\3\2\2\2pq\7n\2\2qr\7q\2\2rs\7i\2\2st\7*\2\2t$\3") buf.write(u"\2\2\2uv\7n\2\2vw\7p\2\2wx\7*\2\2x&\3\2\2\2yz\7c\2\2") buf.write(u"z{\7d\2\2{|\7u\2\2|}\7*\2\2}(\3\2\2\2~\177\7f\2\2\177") buf.write(u"\u0080\7g\2\2\u0080\u0081\7t\2\2\u0081\u0082\7*\2\2\u0082") buf.write(u"*\3\2\2\2\u0083\u0084\7k\2\2\u0084\u0085\7p\2\2\u0085") buf.write(u"\u0086\7v\2\2\u0086\u0087\7*\2\2\u0087,\3\2\2\2\u0088") buf.write(u"\u0089\7,\2\2\u0089.\3\2\2\2\u008a\u008b\7\61\2\2\u008b") buf.write(u"\60\3\2\2\2\u008c\u008d\7-\2\2\u008d\62\3\2\2\2\u008e") buf.write(u"\u008f\7/\2\2\u008f\64\3\2\2\2\u0090\u0091\7>\2\2\u0091") buf.write(u"\66\3\2\2\2\u0092\u0093\7>\2\2\u0093\u0094\7?\2\2\u0094") buf.write(u"8\3\2\2\2\u0095\u0096\7?\2\2\u0096:\3\2\2\2\u0097\u0098") buf.write(u"\7@\2\2\u0098\u0099\7?\2\2\u0099<\3\2\2\2\u009a\u009b") buf.write(u"\7@\2\2\u009b>\3\2\2\2\u009c\u009d\7v\2\2\u009d\u009e") buf.write(u"\7t\2\2\u009e\u009f\7w\2\2\u009f\u00a6\7g\2\2\u00a0\u00a1") buf.write(u"\7h\2\2\u00a1\u00a2\7c\2\2\u00a2\u00a3\7n\2\2\u00a3\u00a4") buf.write(u"\7u\2\2\u00a4\u00a6\7g\2\2\u00a5\u009c\3\2\2\2\u00a5") buf.write(u"\u00a0\3\2\2\2\u00a6@\3\2\2\2\u00a7\u00a9\t\2\2\2\u00a8") buf.write(u"\u00a7\3\2\2\2\u00a9\u00ad\3\2\2\2\u00aa\u00ac\t\3\2") buf.write(u"\2\u00ab\u00aa\3\2\2\2\u00ac\u00af\3\2\2\2\u00ad\u00ab") buf.write(u"\3\2\2\2\u00ad\u00ae\3\2\2\2\u00aeB\3\2\2\2\u00af\u00ad") buf.write(u"\3\2\2\2\u00b0\u00b2\7/\2\2\u00b1\u00b0\3\2\2\2\u00b1") buf.write(u"\u00b2\3\2\2\2\u00b2\u00b6\3\2\2\2\u00b3\u00b5\t\4\2") buf.write(u"\2\u00b4\u00b3\3\2\2\2\u00b5\u00b8\3\2\2\2\u00b6\u00b4") buf.write(u"\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00ba\3\2\2\2\u00b8") buf.write(u"\u00b6\3\2\2\2\u00b9\u00bb\7\60\2\2\u00ba\u00b9\3\2\2") buf.write(u"\2\u00ba\u00bb\3\2\2\2\u00bb\u00bd\3\2\2\2\u00bc\u00be") buf.write(u"\t\4\2\2\u00bd\u00bc\3\2\2\2\u00be\u00bf\3\2\2\2\u00bf") buf.write(u"\u00bd\3\2\2\2\u00bf\u00c0\3\2\2\2\u00c0\u00c4\3\2\2") buf.write(u"\2\u00c1\u00c5\7G\2\2\u00c2\u00c3\7G\2\2\u00c3\u00c5") buf.write(u"\7/\2\2\u00c4\u00c1\3\2\2\2\u00c4\u00c2\3\2\2\2\u00c4") buf.write(u"\u00c5\3\2\2\2\u00c5\u00c9\3\2\2\2\u00c6\u00c8\t\4\2") buf.write(u"\2\u00c7\u00c6\3\2\2\2\u00c8\u00cb\3\2\2\2\u00c9\u00c7") buf.write(u"\3\2\2\2\u00c9\u00ca\3\2\2\2\u00caD\3\2\2\2\u00cb\u00c9") buf.write(u"\3\2\2\2\u00cc\u00ce\t\5\2\2\u00cd\u00cc\3\2\2\2\u00ce") buf.write(u"\u00cf\3\2\2\2\u00cf\u00cd\3\2\2\2\u00cf\u00d0\3\2\2") buf.write(u"\2\u00d0\u00d1\3\2\2\2\u00d1\u00d2\b#\2\2\u00d2F\3\2") buf.write(u"\2\2\16\2\u00a5\u00a8\u00ab\u00ad\u00b1\u00b6\u00ba\u00bf") buf.write(u"\u00c4\u00c9\u00cf\3\3#\2") return buf.getvalue() class stlLexer(Lexer): atn = ATNDeserializer().deserialize(serializedATN()) decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] T__0 = 1 T__1 = 2 T__2 = 3 T__3 = 4 T__4 = 5 T__5 = 6 T__6 = 7 T__7 = 8 T__8 = 9 T__9 = 10 T__10 = 11 T__11 = 12 T__12 = 13 T__13 = 14 T__14 = 15 T__15 = 16 T__16 = 17 T__17 = 18 T__18 = 19 T__19 = 20 T__20 = 21 T__21 = 22 T__22 = 23 T__23 = 24 T__24 = 25 T__25 = 26 T__26 = 27 T__27 = 28 T__28 = 29 T__29 = 30 BOOLEAN = 31 VARIABLE = 32 RATIONAL = 33 WS = 34 channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ] modeNames = [ u"DEFAULT_MODE" ] literalNames = [ u"<INVALID>", u"'('", u"')'", u"'!'", u"'F'", u"'['", u"','", u"']'", u"'G'", u"'=>'", u"'&&'", u"'||'", u"'>>'", u"'U'", u"'-('", u"'^'", u"'sqrt('", u"'log('", u"'ln('", u"'abs('", u"'der('", u"'int('", u"'*'", u"'/'", u"'+'", u"'-'", u"'<'", u"'<='", u"'='", u"'>='", u"'>'" ] symbolicNames = [ u"<INVALID>", u"BOOLEAN", u"VARIABLE", u"RATIONAL", u"WS" ] ruleNames = [ u"T__0", u"T__1", u"T__2", u"T__3", u"T__4", u"T__5", u"T__6", u"T__7", u"T__8", u"T__9", u"T__10", u"T__11", u"T__12", u"T__13", u"T__14", u"T__15", u"T__16", u"T__17", u"T__18", u"T__19", u"T__20", u"T__21", u"T__22", u"T__23", u"T__24", u"T__25", u"T__26", u"T__27", u"T__28", u"T__29", u"BOOLEAN", u"VARIABLE", u"RATIONAL", u"WS" ] grammarFileName = u"stl.g4" def __init__(self, input=None, output=sys.stdout): super(stlLexer, self).__init__(input, output=output) self.checkVersion("4.7.1") self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) self._actions = None self._predicates = None def action(self, localctx, ruleIndex, actionIndex): if self._actions is None: actions = dict() actions[33] = self.WS_action self._actions = actions action = self._actions.get(ruleIndex, None) if action is not None: action(localctx, actionIndex) else: raise Exception("No registered action for:" + str(ruleIndex)) def WS_action(self, localctx , actionIndex): if actionIndex == 0: self.skip();
49.175879
109
0.544758
2,289
9,786
2.263871
0.156837
0.128908
0.083366
0.086453
0.226553
0.146662
0.109996
0.048437
0.023929
0.009456
0
0.280776
0.178214
9,786
198
110
49.424242
0.363591
0.012569
0
0.012048
1
0.379518
0.516443
0.459758
0
0
0
0
0
1
0.024096
false
0
0.024096
0
0.313253
0.006024
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
1
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
17b55e39060f062674960073780c93f19a9ab3df
323
py
Python
Chapter08/doctest_module.py
TranQuangDuc/Clean-Code-in-Python
3c4b4a2fde2ccf28d2e0ec5002b2e1921704164e
[ "MIT" ]
402
2018-08-19T03:09:40.000Z
2022-03-30T08:10:26.000Z
Chapter08/doctest_module.py
TranQuangDuc/Clean-Code-in-Python
3c4b4a2fde2ccf28d2e0ec5002b2e1921704164e
[ "MIT" ]
137
2021-01-05T11:21:04.000Z
2022-03-31T11:10:11.000Z
Chapter08/doctest_module.py
TranQuangDuc/Clean-Code-in-Python
3c4b4a2fde2ccf28d2e0ec5002b2e1921704164e
[ "MIT" ]
140
2018-09-16T05:47:46.000Z
2022-03-31T03:20:30.000Z
def convert_num(num_str: str): """ >>> convert_num("12345") 12345 >>> convert_num("-12345") -12345 >>> convert_num("12345-") -12345 >>> convert_num("-12345-") 12345 """ num, sign = num_str[:-1], num_str[-1] if sign == "-": return -int(num) return int(num_str)
17
41
0.510836
39
323
4
0.25641
0.320513
0.384615
0.512821
0.512821
0.512821
0.512821
0.512821
0.512821
0.512821
0
0.182609
0.287926
323
18
42
17.944444
0.495652
0.408669
0
0
0
0
0.006803
0
0
0
0
0
0
1
0.2
false
0
0
0
0.6
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
aa40fe216ed72cce585d24f7b3ea4f3f30d35885
20
py
Python
checkov/version.py
scottbrown/checkov
6116e339455fc93c78c0ac7a96bc88d3f8924f7a
[ "Apache-2.0" ]
null
null
null
checkov/version.py
scottbrown/checkov
6116e339455fc93c78c0ac7a96bc88d3f8924f7a
[ "Apache-2.0" ]
null
null
null
checkov/version.py
scottbrown/checkov
6116e339455fc93c78c0ac7a96bc88d3f8924f7a
[ "Apache-2.0" ]
null
null
null
version = '1.0.702'
10
19
0.6
4
20
3
1
0
0
0
0
0
0
0
0
0
0
0.294118
0.15
20
1
20
20
0.411765
0
0
0
0
0
0.35
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
a4eb2cbb2cb7647131e71e14dadd0b96bebf6a35
76
py
Python
src/helper.py
Cheakyam/webhook
79f60721e08a8f6d1ca5eca2601b10ea92e0e324
[ "MIT" ]
null
null
null
src/helper.py
Cheakyam/webhook
79f60721e08a8f6d1ca5eca2601b10ea92e0e324
[ "MIT" ]
null
null
null
src/helper.py
Cheakyam/webhook
79f60721e08a8f6d1ca5eca2601b10ea92e0e324
[ "MIT" ]
null
null
null
import os def execute(script, path) : os.system("sh " + path + script)
15.2
36
0.631579
11
76
4.363636
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.223684
76
4
37
19
0.813559
0
0
0
0
0
0.039474
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
5
a4f91d11f90724b54f3cc0b3c1e82ce83943f582
40
py
Python
examples/str.startswith/ex1.py
mcorne/python-by-example
15339c0909c84b51075587a6a66391100971c033
[ "MIT" ]
null
null
null
examples/str.startswith/ex1.py
mcorne/python-by-example
15339c0909c84b51075587a6a66391100971c033
[ "MIT" ]
null
null
null
examples/str.startswith/ex1.py
mcorne/python-by-example
15339c0909c84b51075587a6a66391100971c033
[ "MIT" ]
null
null
null
print('fr.domain.com'.startswith('fr'))
20
39
0.7
6
40
4.666667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.025
40
1
40
40
0.717949
0
0
0
0
0
0.375
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
35356e3b09819e68ae0fb9c1a1716d805f0a1d4a
81
py
Python
boykrapschool/__init__.py
boykrap/boykrapschool
891a6c21f04d448bb9d79c23eacb8cff7228c28d
[ "MIT" ]
null
null
null
boykrapschool/__init__.py
boykrap/boykrapschool
891a6c21f04d448bb9d79c23eacb8cff7228c28d
[ "MIT" ]
null
null
null
boykrapschool/__init__.py
boykrap/boykrapschool
891a6c21f04d448bb9d79c23eacb8cff7228c28d
[ "MIT" ]
null
null
null
#__init__.py from boykrapschool.studentclass import Student,SpecialStudent
16.2
62
0.814815
8
81
7.75
1
0
0
0
0
0
0
0
0
0
0
0
0.135802
81
4
63
20.25
0.885714
0.135802
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
102d75e3ed5a6998b171d42340a80090f62f3efc
213
py
Python
baseclasses/utils/__init__.py
yqliaohk/baseclasses
dac8fbaddeb2f67b031ffc38353cbe8334b9bd03
[ "Apache-2.0" ]
6
2019-04-18T00:50:08.000Z
2022-01-11T20:33:44.000Z
baseclasses/utils/__init__.py
yqliaohk/baseclasses
dac8fbaddeb2f67b031ffc38353cbe8334b9bd03
[ "Apache-2.0" ]
50
2019-10-04T16:18:24.000Z
2022-03-15T22:50:35.000Z
baseclasses/utils/__init__.py
yqliaohk/baseclasses
dac8fbaddeb2f67b031ffc38353cbe8334b9bd03
[ "Apache-2.0" ]
24
2019-07-03T10:35:40.000Z
2021-07-22T14:52:20.000Z
from .containers import CaseInsensitiveSet, CaseInsensitiveDict from .error import Error from .py3Util import getPy3SafeString __all__ = ["CaseInsensitiveSet", "CaseInsensitiveDict", "Error", "getPy3SafeString"]
35.5
84
0.821596
18
213
9.5
0.5
0.432749
0
0
0
0
0
0
0
0
0
0.015544
0.093897
213
5
85
42.6
0.870466
0
0
0
0
0
0.2723
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
106d8098b3000c62a07b4b54a2d8c2b2097c7003
118
py
Python
rgprojections/__init__.py
sansbacon/rgprojections
824b1547d36589b08d7f57afd971e22a6b4e699a
[ "MIT" ]
null
null
null
rgprojections/__init__.py
sansbacon/rgprojections
824b1547d36589b08d7f57afd971e22a6b4e699a
[ "MIT" ]
null
null
null
rgprojections/__init__.py
sansbacon/rgprojections
824b1547d36589b08d7f57afd971e22a6b4e699a
[ "MIT" ]
null
null
null
import logging logging.getLogger(__name__).addHandler(logging.NullHandler()) from .rg import RotogrindersProjection
19.666667
61
0.838983
12
118
7.916667
0.75
0
0
0
0
0
0
0
0
0
0
0
0.076271
118
5
62
23.6
0.87156
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
529b7ff72bf5faebd89212f66e2ec3338568c3be
68
py
Python
corebreakout/__init__.py
kbarnhart/corebreakout
fa8dc7575b330b7c1ce47a35b44deca7856bd05c
[ "MIT" ]
20
2019-12-09T23:56:32.000Z
2021-08-11T18:57:59.000Z
corebreakout/__init__.py
kbarnhart/corebreakout
fa8dc7575b330b7c1ce47a35b44deca7856bd05c
[ "MIT" ]
13
2019-11-05T00:13:39.000Z
2021-08-20T19:08:13.000Z
corebreakout/__init__.py
kbarnhart/corebreakout
fa8dc7575b330b7c1ce47a35b44deca7856bd05c
[ "MIT" ]
12
2019-12-12T17:35:44.000Z
2021-10-05T05:45:49.000Z
from .column import CoreColumn from .segmenter import CoreSegmenter
22.666667
36
0.852941
8
68
7.25
0.75
0
0
0
0
0
0
0
0
0
0
0
0.117647
68
2
37
34
0.966667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5e03529cd365e900d18d09f4f3217d859d2dfa39
8,944
py
Python
test/test_ssh_algorithm.py
luke-goddard/ssh-audit
e447c42a79df49841d5269eacde6dbeb811a6be4
[ "MIT" ]
1
2020-12-23T18:27:03.000Z
2020-12-23T18:27:03.000Z
test/test_ssh_algorithm.py
FRooter/ssh-audit
e447c42a79df49841d5269eacde6dbeb811a6be4
[ "MIT" ]
null
null
null
test/test_ssh_algorithm.py
FRooter/ssh-audit
e447c42a79df49841d5269eacde6dbeb811a6be4
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import pytest # pylint: disable=attribute-defined-outside-init class TestSSHAlgorithm(object): @pytest.fixture(autouse=True) def init(self, ssh_audit): self.ssh = ssh_audit.SSH def _tf(self, v, s=None): return self.ssh.Algorithm.Timeframe().update(v, s) def test_get_ssh_version(self): def ver(v): return self.ssh.Algorithm.get_ssh_version(v) assert ver('7.5') == ('OpenSSH', '7.5', False) assert ver('7.5C') == ('OpenSSH', '7.5', True) assert ver('d2016.74') == ('Dropbear SSH', '2016.74', False) assert ver('l10.7.4') == ('libssh', '0.7.4', False) assert ver('')[1] == '' def test_get_since_text(self): def gst(v): return self.ssh.Algorithm.get_since_text(v) assert gst(['7.5']) == 'available since OpenSSH 7.5' assert gst(['7.5C']) == 'available since OpenSSH 7.5 (client only)' assert gst(['7.5,']) == 'available since OpenSSH 7.5' assert gst(['d2016.73']) == 'available since Dropbear SSH 2016.73' assert gst(['7.5,d2016.73']) == 'available since OpenSSH 7.5, Dropbear SSH 2016.73' assert gst(['l10.7.4']) is None assert gst([]) is None def test_timeframe_creation(self): # pylint: disable=line-too-long,too-many-statements def cmp_tf(v, s, r): assert str(self._tf(v, s)) == str(r) cmp_tf(['6.2'], None, {'OpenSSH': ['6.2', None, '6.2', None]}) cmp_tf(['6.2'], True, {'OpenSSH': ['6.2', None, None, None]}) cmp_tf(['6.2'], False, {'OpenSSH': [None, None, '6.2', None]}) cmp_tf(['6.2C'], None, {'OpenSSH': [None, None, '6.2', None]}) cmp_tf(['6.2C'], True, {}) cmp_tf(['6.2C'], False, {'OpenSSH': [None, None, '6.2', None]}) cmp_tf(['6.1,6.2C'], None, {'OpenSSH': ['6.1', None, '6.2', None]}) cmp_tf(['6.1,6.2C'], True, {'OpenSSH': ['6.1', None, None, None]}) cmp_tf(['6.1,6.2C'], False, {'OpenSSH': [None, None, '6.2', None]}) cmp_tf(['6.2C,6.1'], None, {'OpenSSH': ['6.1', None, '6.2', None]}) cmp_tf(['6.2C,6.1'], True, {'OpenSSH': ['6.1', None, None, None]}) cmp_tf(['6.2C,6.1'], False, {'OpenSSH': [None, None, '6.2', None]}) cmp_tf(['6.3,6.2C'], None, {'OpenSSH': ['6.3', None, '6.2', None]}) cmp_tf(['6.3,6.2C'], True, {'OpenSSH': ['6.3', None, None, None]}) cmp_tf(['6.3,6.2C'], False, {'OpenSSH': [None, None, '6.2', None]}) cmp_tf(['6.2C,6.3'], None, {'OpenSSH': ['6.3', None, '6.2', None]}) cmp_tf(['6.2C,6.3'], True, {'OpenSSH': ['6.3', None, None, None]}) cmp_tf(['6.2C,6.3'], False, {'OpenSSH': [None, None, '6.2', None]}) cmp_tf(['6.2', '6.6'], None, {'OpenSSH': ['6.2', '6.6', '6.2', '6.6']}) cmp_tf(['6.2', '6.6'], True, {'OpenSSH': ['6.2', '6.6', None, None]}) cmp_tf(['6.2', '6.6'], False, {'OpenSSH': [None, None, '6.2', '6.6']}) cmp_tf(['6.2C', '6.6'], None, {'OpenSSH': [None, '6.6', '6.2', '6.6']}) cmp_tf(['6.2C', '6.6'], True, {'OpenSSH': [None, '6.6', None, None]}) cmp_tf(['6.2C', '6.6'], False, {'OpenSSH': [None, None, '6.2', '6.6']}) cmp_tf(['6.1,6.2C', '6.6'], None, {'OpenSSH': ['6.1', '6.6', '6.2', '6.6']}) cmp_tf(['6.1,6.2C', '6.6'], True, {'OpenSSH': ['6.1', '6.6', None, None]}) cmp_tf(['6.1,6.2C', '6.6'], False, {'OpenSSH': [None, None, '6.2', '6.6']}) cmp_tf(['6.2C,6.1', '6.6'], None, {'OpenSSH': ['6.1', '6.6', '6.2', '6.6']}) cmp_tf(['6.2C,6.1', '6.6'], True, {'OpenSSH': ['6.1', '6.6', None, None]}) cmp_tf(['6.2C,6.1', '6.6'], False, {'OpenSSH': [None, None, '6.2', '6.6']}) cmp_tf(['6.3,6.2C', '6.6'], None, {'OpenSSH': ['6.3', '6.6', '6.2', '6.6']}) cmp_tf(['6.3,6.2C', '6.6'], True, {'OpenSSH': ['6.3', '6.6', None, None]}) cmp_tf(['6.3,6.2C', '6.6'], False, {'OpenSSH': [None, None, '6.2', '6.6']}) cmp_tf(['6.2C,6.3', '6.6'], None, {'OpenSSH': ['6.3', '6.6', '6.2', '6.6']}) cmp_tf(['6.2C,6.3', '6.6'], True, {'OpenSSH': ['6.3', '6.6', None, None]}) cmp_tf(['6.2C,6.3', '6.6'], False, {'OpenSSH': [None, None, '6.2', '6.6']}) cmp_tf(['6.2', '6.6', None], None, {'OpenSSH': ['6.2', '6.6', '6.2', None]}) cmp_tf(['6.2', '6.6', None], True, {'OpenSSH': ['6.2', '6.6', None, None]}) cmp_tf(['6.2', '6.6', None], False, {'OpenSSH': [None, None, '6.2', None]}) cmp_tf(['6.2C', '6.6', None], None, {'OpenSSH': [None, '6.6', '6.2', None]}) cmp_tf(['6.2C', '6.6', None], True, {'OpenSSH': [None, '6.6', None, None]}) cmp_tf(['6.2C', '6.6', None], False, {'OpenSSH': [None, None, '6.2', None]}) cmp_tf(['6.1,6.2C', '6.6', None], None, {'OpenSSH': ['6.1', '6.6', '6.2', None]}) cmp_tf(['6.1,6.2C', '6.6', None], True, {'OpenSSH': ['6.1', '6.6', None, None]}) cmp_tf(['6.1,6.2C', '6.6', None], False, {'OpenSSH': [None, None, '6.2', None]}) cmp_tf(['6.2C,6.1', '6.6', None], None, {'OpenSSH': ['6.1', '6.6', '6.2', None]}) cmp_tf(['6.2C,6.1', '6.6', None], True, {'OpenSSH': ['6.1', '6.6', None, None]}) cmp_tf(['6.2C,6.1', '6.6', None], False, {'OpenSSH': [None, None, '6.2', None]}) cmp_tf(['6.2,6.3C', '6.6', None], None, {'OpenSSH': ['6.2', '6.6', '6.3', None]}) cmp_tf(['6.2,6.3C', '6.6', None], True, {'OpenSSH': ['6.2', '6.6', None, None]}) cmp_tf(['6.2,6.3C', '6.6', None], False, {'OpenSSH': [None, None, '6.3', None]}) cmp_tf(['6.3C,6.2', '6.6', None], None, {'OpenSSH': ['6.2', '6.6', '6.3', None]}) cmp_tf(['6.3C,6.2', '6.6', None], True, {'OpenSSH': ['6.2', '6.6', None, None]}) cmp_tf(['6.3C,6.2', '6.6', None], False, {'OpenSSH': [None, None, '6.3', None]}) cmp_tf(['6.2', '6.6', '7.1'], None, {'OpenSSH': ['6.2', '6.6', '6.2', '7.1']}) cmp_tf(['6.2', '6.6', '7.1'], True, {'OpenSSH': ['6.2', '6.6', None, None]}) cmp_tf(['6.2', '6.6', '7.1'], False, {'OpenSSH': [None, None, '6.2', '7.1']}) cmp_tf(['6.1,6.2C', '6.6', '7.1'], None, {'OpenSSH': ['6.1', '6.6', '6.2', '7.1']}) cmp_tf(['6.1,6.2C', '6.6', '7.1'], True, {'OpenSSH': ['6.1', '6.6', None, None]}) cmp_tf(['6.1,6.2C', '6.6', '7.1'], False, {'OpenSSH': [None, None, '6.2', '7.1']}) cmp_tf(['6.2C,6.1', '6.6', '7.1'], None, {'OpenSSH': ['6.1', '6.6', '6.2', '7.1']}) cmp_tf(['6.2C,6.1', '6.6', '7.1'], True, {'OpenSSH': ['6.1', '6.6', None, None]}) cmp_tf(['6.2C,6.1', '6.6', '7.1'], False, {'OpenSSH': [None, None, '6.2', '7.1']}) cmp_tf(['6.2,6.3C', '6.6', '7.1'], None, {'OpenSSH': ['6.2', '6.6', '6.3', '7.1']}) cmp_tf(['6.2,6.3C', '6.6', '7.1'], True, {'OpenSSH': ['6.2', '6.6', None, None]}) cmp_tf(['6.2,6.3C', '6.6', '7.1'], False, {'OpenSSH': [None, None, '6.3', '7.1']}) cmp_tf(['6.3C,6.2', '6.6', '7.1'], None, {'OpenSSH': ['6.2', '6.6', '6.3', '7.1']}) cmp_tf(['6.3C,6.2', '6.6', '7.1'], True, {'OpenSSH': ['6.2', '6.6', None, None]}) cmp_tf(['6.3C,6.2', '6.6', '7.1'], False, {'OpenSSH': [None, None, '6.3', '7.1']}) tf1 = self._tf(['6.1,d2016.72,6.2C', '6.6,d2016.73', '7.1,d2016.74']) tf2 = self._tf(['d2016.72,6.2C,6.1', 'd2016.73,6.6', 'd2016.74,7.1']) tf3 = self._tf(['d2016.72,6.2C,6.1', '6.6,d2016.73', '7.1,d2016.74']) # check without caring for output order ov = "'OpenSSH': ['6.1', '6.6', '6.2', '7.1']" dv = "'Dropbear SSH': ['2016.72', '2016.73', '2016.72', '2016.74']" assert len(str(tf1)) == len(str(tf2)) == len(str(tf3)) assert ov in str(tf1) and ov in str(tf2) and ov in str(tf3) assert dv in str(tf1) and dv in str(tf2) and dv in str(tf3) assert ov in repr(tf1) and ov in repr(tf2) and ov in repr(tf3) assert dv in repr(tf1) and dv in repr(tf2) and dv in repr(tf3) def test_timeframe_object(self): tf = self._tf(['6.1,6.2C', '6.6', '7.1']) assert 'OpenSSH' in tf assert 'Dropbear SSH' not in tf assert 'libssh' not in tf assert 'unknown' not in tf assert tf['OpenSSH'] == ('6.1', '6.6', '6.2', '7.1') assert tf['Dropbear SSH'] == (None, None, None, None) assert tf['libssh'] == (None, None, None, None) assert tf['unknown'] == (None, None, None, None) assert tf.get_from('OpenSSH', True) == '6.1' assert tf.get_till('OpenSSH', True) == '6.6' assert tf.get_from('OpenSSH', False) == '6.2' assert tf.get_till('OpenSSH', False) == '7.1' tf = self._tf(['6.1,d2016.72,6.2C', '6.6,d2016.73', '7.1,d2016.74']) assert 'OpenSSH' in tf assert 'Dropbear SSH' in tf assert 'libssh' not in tf assert 'unknown' not in tf assert tf['OpenSSH'] == ('6.1', '6.6', '6.2', '7.1') assert tf['Dropbear SSH'] == ('2016.72', '2016.73', '2016.72', '2016.74') assert tf['libssh'] == (None, None, None, None) assert tf['unknown'] == (None, None, None, None) assert tf.get_from('OpenSSH', True) == '6.1' assert tf.get_till('OpenSSH', True) == '6.6' assert tf.get_from('OpenSSH', False) == '6.2' assert tf.get_till('OpenSSH', False) == '7.1' assert tf.get_from('Dropbear SSH', True) == '2016.72' assert tf.get_till('Dropbear SSH', True) == '2016.73' assert tf.get_from('Dropbear SSH', False) == '2016.72' assert tf.get_till('Dropbear SSH', False) == '2016.74' ov = "'OpenSSH': ['6.1', '6.6', '6.2', '7.1']" dv = "'Dropbear SSH': ['2016.72', '2016.73', '2016.72', '2016.74']" assert ov in str(tf) assert dv in str(tf) assert ov in repr(tf) assert dv in repr(tf)
54.206061
85
0.529852
1,714
8,944
2.703617
0.054842
0.055675
0.08934
0.099266
0.787656
0.769098
0.727881
0.706733
0.673069
0.66789
0
0.1306
0.146467
8,944
164
86
54.536585
0.476421
0.01979
0
0.166667
0
0.027778
0.291942
0
0
0
0
0
0.347222
1
0.0625
false
0
0.006944
0.020833
0.097222
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
5e26db6300b8aade85b8cd834b7695146226676e
40
py
Python
services/src/handler/__init__.py
pjw960408/binance-trader-c1
dae91cc721591257334ab1ddcf3a4f6d86644435
[ "MIT" ]
null
null
null
services/src/handler/__init__.py
pjw960408/binance-trader-c1
dae91cc721591257334ab1ddcf3a4f6d86644435
[ "MIT" ]
null
null
null
services/src/handler/__init__.py
pjw960408/binance-trader-c1
dae91cc721591257334ab1ddcf3a4f6d86644435
[ "MIT" ]
1
2021-05-06T14:14:56.000Z
2021-05-06T14:14:56.000Z
from .slack_handler import SlackHandler
20
39
0.875
5
40
6.8
1
0
0
0
0
0
0
0
0
0
0
0
0.1
40
1
40
40
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
eadaa4e29470b0d0b86af44e8e4d2dd0d2318d6c
490
py
Python
constructutils/noemit.py
shiftinv/construct-utils
f1ca72f0afd478efca106bbc076c07e3f87399bb
[ "Apache-2.0" ]
null
null
null
constructutils/noemit.py
shiftinv/construct-utils
f1ca72f0afd478efca106bbc076c07e3f87399bb
[ "Apache-2.0" ]
null
null
null
constructutils/noemit.py
shiftinv/construct-utils
f1ca72f0afd478efca106bbc076c07e3f87399bb
[ "Apache-2.0" ]
null
null
null
class NoEmitMixin: ''' Mixin that raises a :class:`NotImplementedError` for `Construct._emit*` functions ''' def _emitparse(*args, **kwargs): raise NotImplementedError def _emitbuild(*args, **kwargs): raise NotImplementedError def _emitseq(*args, **kwargs): raise NotImplementedError def _emitprimitivetype(*args, **kwargs): raise NotImplementedError def _emitfulltype(*args, **kwargs): raise NotImplementedError
24.5
85
0.665306
42
490
7.619048
0.47619
0.15625
0.234375
0.53125
0.4625
0
0
0
0
0
0
0
0.232653
490
19
86
25.789474
0.851064
0.165306
0
0.454545
0
0
0
0
0
0
0
0
0
1
0.454545
true
0
0
0
0.545455
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
1
0
0
5