hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a6f3baa679f38ad28c89b3ed455830bee4d015b8
| 500
|
py
|
Python
|
home/models.py
|
dsingh12345/grocerybag
|
a9ea758d828078aae306b95d4486859beed27644
|
[
"MIT"
] | null | null | null |
home/models.py
|
dsingh12345/grocerybag
|
a9ea758d828078aae306b95d4486859beed27644
|
[
"MIT"
] | null | null | null |
home/models.py
|
dsingh12345/grocerybag
|
a9ea758d828078aae306b95d4486859beed27644
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class grocerylist(models.Model):
name = models.CharField(max_length = 75)
itemquantity = models.CharField(max_length = 75)
status = models.IntegerField()
date = models.DateField( max_length=50)
def __str__(self):
return self.name
class login(models.Model):
name = models.CharField(max_length = 75)
password = models.CharField(max_length = 75)
def __str__(self):
return self.name
| 26.315789
| 52
| 0.678
| 62
| 500
| 5.258065
| 0.451613
| 0.138037
| 0.220859
| 0.294479
| 0.558282
| 0.398773
| 0.251534
| 0.251534
| 0
| 0
| 0
| 0.02584
| 0.226
| 500
| 18
| 53
| 27.777778
| 0.816537
| 0.048
| 0
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0.076923
| 0.076923
| 0.153846
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
5b55ebab1af2411b91a57c355e4e65199ce70e2b
| 242
|
py
|
Python
|
GT_users/user_app/models.py
|
10K-Linesofcode/Glowing-Tribble
|
be0e17ce5391b589792e4ae6b02156d7ee4ce145
|
[
"MIT"
] | null | null | null |
GT_users/user_app/models.py
|
10K-Linesofcode/Glowing-Tribble
|
be0e17ce5391b589792e4ae6b02156d7ee4ce145
|
[
"MIT"
] | null | null | null |
GT_users/user_app/models.py
|
10K-Linesofcode/Glowing-Tribble
|
be0e17ce5391b589792e4ae6b02156d7ee4ce145
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=128)
last_name = models.CharField(max_length=128)
emails = models.CharField(max_length=264,unique=True)
| 24.2
| 57
| 0.756198
| 35
| 242
| 5.085714
| 0.628571
| 0.252809
| 0.303371
| 0.404494
| 0.348315
| 0.348315
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0.144628
| 242
| 9
| 58
| 26.888889
| 0.816425
| 0.099174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
5b6bc92a42076f14903741399286866e3703706a
| 181
|
py
|
Python
|
print_triangle.py
|
mariongb81/TeamProject
|
c29c765ff8313563d17aea845caefc18631cafac
|
[
"MIT"
] | null | null | null |
print_triangle.py
|
mariongb81/TeamProject
|
c29c765ff8313563d17aea845caefc18631cafac
|
[
"MIT"
] | 4
|
2021-11-16T02:36:24.000Z
|
2021-11-26T03:33:57.000Z
|
print_triangle.py
|
mariongb81/TeamProject
|
c29c765ff8313563d17aea845caefc18631cafac
|
[
"MIT"
] | 4
|
2021-11-16T01:02:42.000Z
|
2021-11-27T03:07:36.000Z
|
number = int(input("ingrese el numero de filas de su triangulo "))
def print_triangle(number):
for i in range(1, number + 1):
print(str(i) * i)
print_triangle(number)
| 22.625
| 66
| 0.668508
| 29
| 181
| 4.103448
| 0.655172
| 0.218487
| 0.319328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013986
| 0.209945
| 181
| 7
| 67
| 25.857143
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.237569
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.2
| 0.6
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
5bb29d9fbf95203d50fecf9e875de62e253c4778
| 48
|
py
|
Python
|
Code/rozali.py
|
Mehrabkb/CintaKamu
|
62c8eacc3bfb2be205e854b92b3df65fc77692c0
|
[
"MIT"
] | 7
|
2018-10-03T14:16:44.000Z
|
2022-02-24T10:58:46.000Z
|
Code/rozali.py
|
Mehrabkb/CintaKamu
|
62c8eacc3bfb2be205e854b92b3df65fc77692c0
|
[
"MIT"
] | 11
|
2018-10-03T11:43:28.000Z
|
2020-10-07T09:32:27.000Z
|
Code/rozali.py
|
Mehrabkb/CintaKamu
|
62c8eacc3bfb2be205e854b92b3df65fc77692c0
|
[
"MIT"
] | 117
|
2018-10-03T11:46:22.000Z
|
2022-03-11T03:21:34.000Z
|
#Rozali Izaq
#Indonesia
print("Aku Cinta Kamu")
| 12
| 23
| 0.75
| 7
| 48
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 48
| 3
| 24
| 16
| 0.857143
| 0.416667
| 0
| 0
| 0
| 0
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
5bf9fbaf08df8b466f8f17349698dc5d41488d83
| 151
|
py
|
Python
|
aspace_tools/tests/test_queries.py
|
yalemssa/aspace-tools
|
3ea1a0be08d85eddeaee93c5564bb9e865f6f8c8
|
[
"MIT"
] | 4
|
2019-08-15T18:47:48.000Z
|
2021-12-12T17:47:57.000Z
|
aspace_tools/tests/test_queries.py
|
yalemssa/aspace-tools
|
3ea1a0be08d85eddeaee93c5564bb9e865f6f8c8
|
[
"MIT"
] | 1
|
2021-05-04T19:49:16.000Z
|
2021-05-04T19:49:16.000Z
|
aspace_tools/tests/test_queries.py
|
yalemssa/aspace-tools
|
3ea1a0be08d85eddeaee93c5564bb9e865f6f8c8
|
[
"MIT"
] | null | null | null |
#TESTS: Check if all files open, if they correspond to AS DB schema, and if the number and titles of functions correspond with what's in the directory
| 75.5
| 150
| 0.788079
| 28
| 151
| 4.25
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178808
| 151
| 1
| 151
| 151
| 0.959677
| 0.986755
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
756006360a8bcb601355f5f7ddb844eac2a80a31
| 295
|
py
|
Python
|
src/datasets/base_dataset_factory.py
|
elangovana/bert-reverse
|
95147e7c1959024a759c4e45e1ce1c5200fc69be
|
[
"MIT"
] | null | null | null |
src/datasets/base_dataset_factory.py
|
elangovana/bert-reverse
|
95147e7c1959024a759c4e45e1ce1c5200fc69be
|
[
"MIT"
] | null | null | null |
src/datasets/base_dataset_factory.py
|
elangovana/bert-reverse
|
95147e7c1959024a759c4e45e1ce1c5200fc69be
|
[
"MIT"
] | null | null | null |
class BaseDatasetFactory:
def get_dataset(self, data, postprocessors=None, **kwargs):
raise NotImplementedError
def get_label_mapper(self, data=None, postprocessors=None, **kwargs):
raise NotImplementedError
def get_scorers(self):
raise NotImplementedError
| 29.5
| 73
| 0.725424
| 30
| 295
| 7
| 0.5
| 0.085714
| 0.228571
| 0.27619
| 0.514286
| 0.514286
| 0.514286
| 0
| 0
| 0
| 0
| 0
| 0.19661
| 295
| 9
| 74
| 32.777778
| 0.886076
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
f328eca4f5b0713c1ba95f1755acea55bfb84e09
| 15,616
|
py
|
Python
|
etk/unit_tests/data_extractors_tests/test_extraction_url_country.py
|
linqyd/etk
|
dcf0cae4076619f5261573d47b4f5f26baaf15b7
|
[
"MIT"
] | null | null | null |
etk/unit_tests/data_extractors_tests/test_extraction_url_country.py
|
linqyd/etk
|
dcf0cae4076619f5261573d47b4f5f26baaf15b7
|
[
"MIT"
] | null | null | null |
etk/unit_tests/data_extractors_tests/test_extraction_url_country.py
|
linqyd/etk
|
dcf0cae4076619f5261573d47b4f5f26baaf15b7
|
[
"MIT"
] | null | null | null |
import unittest
import sys, os
import json
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from data_extractors import url_country_extractor
class TestUrlCountryExtractorMethods(unittest.TestCase):
def setUp(self):
file_path = os.path.join(os.path.dirname(__file__), "../ground_truth/url_countries.jl")
self.f = open(file_path, 'r')
self.country_code_dict = {"gw": "Guinea-Bissau", "gu": "Guam", "gt": "Guatemala",
"gs": "South Georgia and the South Sandwich Islands", "gr": "Greece",
"gq": "Equatorial Guinea", "gp": "Guadeloupe", "gy": "Guyana", "gg": "Guernsey",
"gf": "French Guiana", "ge": "Georgia", "gd": "Grenada", "gb": "United Kingdom",
"ga": "Gabon", "gn": "Guinea", "gm": "Gambia", "gl": "Greenland", "gi": "Gibraltar",
"gh": "Ghana", "tz": "Tanzania", "tv": "Tuvalu", "tw": "Taiwan",
"tt": "Trinidad and Tobago", "tr": "Turkey", "tn": "Tunisia", "to": "Tonga",
"tl": "East Timor", "tm": "Turkmenistan", "tj": "Tajikistan", "tk": "Tokelau",
"th": "Thailand", "tf": "French Southern Territories", "tg": "Togo", "td": "Chad",
"tc": "Turks and Caicos Islands", "GW": "Guinea-Bissau", "GU": "Guam",
"GT": "Guatemala", "GS": "South Georgia and the South Sandwich Islands",
"GR": "Greece", "GQ": "Equatorial Guinea", "GP": "Guadeloupe", "mg": "Madagascar",
"GY": "Guyana", "GG": "Guernsey", "GF": "French Guiana", "GE": "Georgia",
"GD": "Grenada", "GB": "United Kingdom", "GA": "Gabon", "GN": "Guinea",
"GM": "Gambia", "GL": "Greenland", "GI": "Gibraltar", "GH": "Ghana", "vu": "Vanuatu",
"md": "Moldova", "mm": "Myanmar", "ml": "Mali", "zm": "Zambia", "za": "South Africa",
"mh": "Marshall Islands", "zw": "Zimbabwe", "mu": "Mauritius", "mt": "Malta",
"mf": "Saint Martin", "mw": "Malawi", "mv": "Maldives", "mq": "Martinique",
"mp": "Northern Mariana Islands", "ZM": "Zambia", "ms": "Montserrat",
"mr": "Mauritania", "ZA": "South Africa", "ZW": "Zimbabwe", "ME": "Montenegro",
"MD": "Moldova", "MG": "Madagascar", "MF": "Saint Martin", "MA": "Morocco",
"MC": "Monaco", "MM": "Myanmar", "ML": "Mali", "MO": "Macao", "MN": "Mongolia",
"MH": "Marshall Islands", "MK": "Macedonia", "MU": "Mauritius", "MT": "Malta",
"MW": "Malawi", "MV": "Maldives", "MQ": "Martinique",
"MP": "Northern Mariana Islands", "MS": "Montserrat", "MR": "Mauritania",
"mz": "Mozambique", "MY": "Malaysia", "MX": "Mexico", "MZ": "Mozambique",
"FR": "France", "FI": "Finland", "FJ": "Fiji", "FK": "Falkland Islands",
"FM": "Micronesia", "FO": "Faroe Islands", "me": "Montenegro", "SZ": "Swaziland",
"SY": "Syria", "SX": "Sint Maarten", "ma": "Morocco", "mc": "Monaco",
"SS": "South Sudan", "SR": "Suriname", "mo": "Macao", "mn": "Mongolia",
"SV": "El Salvador", "mk": "Macedonia", "ST": "Sao Tome and Principe",
"SK": "Slovakia", "SJ": "Svalbard and Jan Mayen", "SI": "Slovenia",
"SH": "Saint Helena", "SO": "Somalia", "SN": "Senegal", "SM": "San Marino",
"SL": "Sierra Leone", "SC": "Seychelles", "SB": "Solomon Islands",
"SA": "Saudi Arabia", "SG": "Singapore", "mx": "Mexico", "SE": "Sweden",
"SD": "Sudan", "YE": "Yemen", "YT": "Mayotte", "LB": "Lebanon", "LC": "Saint Lucia",
"LA": "Laos", "LK": "Sri Lanka", "LI": "Liechtenstein", "LV": "Latvia",
"LT": "Lithuania", "LU": "Luxembourg", "LR": "Liberia", "LS": "Lesotho",
"LY": "Libya", "fr": "France", "fi": "Finland", "fj": "Fiji",
"fk": "Falkland Islands", "fm": "Micronesia", "fo": "Faroe Islands",
"sz": "Swaziland", "sy": "Syria", "sx": "Sint Maarten", "ss": "South Sudan",
"sr": "Suriname", "sv": "El Salvador", "st": "Sao Tome and Principe",
"sk": "Slovakia", "sj": "Svalbard and Jan Mayen", "si": "Slovenia",
"sh": "Saint Helena", "so": "Somalia", "sn": "Senegal", "sm": "San Marino",
"sl": "Sierra Leone", "sc": "Seychelles", "sb": "Solomon Islands",
"sa": "Saudi Arabia", "sg": "Singapore", "se": "Sweden", "sd": "Sudan",
"RU": "Russia", "RW": "Rwanda", "lb": "Lebanon", "lc": "Saint Lucia", "RS": "Serbia",
"lk": "Sri Lanka", "li": "Liechtenstein", "lv": "Latvia", "RE": "Reunion",
"lt": "Lithuania", "lu": "Luxembourg", "lr": "Liberia", "ls": "Lesotho",
"RO": "Romania", "ly": "Libya", "ye": "Yemen", "yt": "Mayotte",
"eh": "Western Sahara", "ee": "Estonia", "eg": "Egypt", "ec": "Ecuador",
"et": "Ethiopia", "es": "Spain", "er": "Eritrea", "ru": "Russia", "rw": "Rwanda",
"rs": "Serbia", "re": "Reunion", "ro": "Romania", "EH": "Western Sahara",
"EE": "Estonia", "EG": "Egypt", "EC": "Ecuador", "ET": "Ethiopia", "ES": "Spain",
"ER": "Eritrea", "VU": "Vanuatu", "xk": "Kosovo", "XK": "Kosovo", "KG": "Kyrgyzstan",
"KE": "Kenya", "KI": "Kiribati", "KH": "Cambodia", "KN": "Saint Kitts and Nevis",
"KM": "Comoros", "KR": "South Korea", "KP": "North Korea", "KW": "Kuwait",
"KZ": "Kazakhstan", "KY": "Cayman Islands", "DO": "Dominican Republic",
"DM": "Dominica", "DJ": "Djibouti", "DK": "Denmark", "DE": "Germany", "DZ": "Algeria",
"my": "Malaysia", "kg": "Kyrgyzstan", "ke": "Kenya", "ki": "Kiribati",
"kh": "Cambodia", "kn": "Saint Kitts and Nevis", "km": "Comoros", "QA": "Qatar",
"kr": "South Korea", "kp": "North Korea", "kw": "Kuwait", "kz": "Kazakhstan",
"ky": "Cayman Islands", "WF": "Wallis and Futuna", "JP": "Japan", "JM": "Jamaica",
"JO": "Jordan", "WS": "Samoa", "JE": "Jersey", "do": "Dominican Republic",
"dm": "Dominica", "dj": "Djibouti", "dk": "Denmark", "de": "Germany", "dz": "Algeria",
"qa": "Qatar", "PR": "Puerto Rico", "PS": "Palestinian Territory", "PW": "Palau",
"PT": "Portugal", "PY": "Paraguay", "PA": "Panama", "PF": "French Polynesia",
"PG": "Papua New Guinea", "PE": "Peru", "PK": "Pakistan", "PH": "Philippines",
"PN": "Pitcairn", "PL": "Poland", "PM": "Saint Pierre and Miquelon",
"wf": "Wallis and Futuna", "jp": "Japan", "jm": "Jamaica", "jo": "Jordan",
"ws": "Samoa", "je": "Jersey", "la": "Laos", "ck": "Cook Islands",
"ci": "Ivory Coast", "ch": "Switzerland", "co": "Colombia", "cn": "China",
"cm": "Cameroon", "cl": "Chile", "cc": "Cocos Islands", "ca": "Canada",
"cg": "Republic of the Congo", "cf": "Central African Republic",
"cd": "Democratic Republic of the Congo", "cz": "Czechia", "cy": "Cyprus",
"cx": "Christmas Island", "cs": "Serbia and Montenegro", "cr": "Costa Rica",
"cw": "Curacao", "cv": "Cape Verde", "cu": "Cuba", "pr": "Puerto Rico",
"ps": "Palestinian Territory", "pw": "Palau", "pt": "Portugal", "py": "Paraguay",
"pa": "Panama", "pf": "French Polynesia", "pg": "Papua New Guinea", "pe": "Peru",
"pk": "Pakistan", "ph": "Philippines", "pn": "Pitcairn", "pl": "Poland",
"pm": "Saint Pierre and Miquelon", "CK": "Cook Islands", "CI": "Ivory Coast",
"CH": "Switzerland", "CO": "Colombia", "CN": "China", "CM": "Cameroon", "CL": "Chile",
"CC": "Cocos Islands", "CA": "Canada", "CG": "Republic of the Congo",
"CF": "Central African Republic", "CD": "Democratic Republic of the Congo",
"CZ": "Czechia", "CY": "Cyprus", "CX": "Christmas Island",
"CS": "Serbia and Montenegro", "CR": "Costa Rica", "CW": "Curacao",
"CV": "Cape Verde", "CU": "Cuba", "va": "Vatican",
"vc": "Saint Vincent and the Grenadines", "ve": "Venezuela",
"vg": "British Virgin Islands", "iq": "Iraq", "vi": "U.S. Virgin Islands",
"is": "Iceland", "ir": "Iran", "it": "Italy", "vn": "Vietnam", "im": "Isle of Man",
"il": "Israel", "io": "British Indian Ocean Territory", "in": "India",
"ie": "Ireland", "id": "Indonesia", "VA": "Vatican",
"VC": "Saint Vincent and the Grenadines", "VE": "Venezuela",
"VG": "British Virgin Islands", "IQ": "Iraq", "VI": "U.S. Virgin Islands",
"IS": "Iceland", "IR": "Iran", "IT": "Italy", "VN": "Vietnam", "IM": "Isle of Man",
"IL": "Israel", "IO": "British Indian Ocean Territory", "IN": "India",
"nl": "Netherlands", "IE": "Ireland", "ID": "Indonesia", "BD": "Bangladesh",
"BE": "Belgium", "BF": "Burkina Faso", "BG": "Bulgaria",
"BA": "Bosnia and Herzegovina", "BB": "Barbados", "BL": "Saint Barthelemy",
"BM": "Bermuda", "BN": "Brunei", "BO": "Bolivia", "BH": "Bahrain", "BI": "Burundi",
"BJ": "Benin", "BT": "Bhutan", "BV": "Bouvet Island", "BW": "Botswana",
"BQ": "Bonaire, Saint Eustatius and Saba ", "BR": "Brazil", "BS": "Bahamas",
"BY": "Belarus", "BZ": "Belize", "nz": "New Zealand", "np": "Nepal", "nr": "Nauru",
"OM": "Oman", "nu": "Niue", "HR": "Croatia", "HT": "Haiti", "HU": "Hungary",
"HK": "Hong Kong", "HN": "Honduras", "HM": "Heard Island and McDonald Islands",
"bd": "Bangladesh", "be": "Belgium", "bf": "Burkina Faso", "bg": "Bulgaria",
"ba": "Bosnia and Herzegovina", "bb": "Barbados", "bl": "Saint Barthelemy",
"bm": "Bermuda", "bn": "Brunei", "bo": "Bolivia", "bh": "Bahrain", "bi": "Burundi",
"bj": "Benin", "bt": "Bhutan", "bv": "Bouvet Island", "bw": "Botswana",
"bq": "Bonaire, Saint Eustatius and Saba ", "br": "Brazil", "bs": "Bahamas",
"by": "Belarus", "bz": "Belize", "om": "Oman", "UY": "Uruguay", "UZ": "Uzbekistan",
"US": "United States", "UM": "United States Minor Outlying Islands", "UG": "Uganda",
"UA": "Ukraine", "NI": "Nicaragua", "NL": "Netherlands", "NO": "Norway",
"NC": "New Caledonia", "NE": "Niger", "NF": "Norfolk Island", "NG": "Nigeria",
"NZ": "New Zealand", "NP": "Nepal", "NR": "Nauru", "NU": "Niue", "hr": "Croatia",
"ht": "Haiti", "hu": "Hungary", "hk": "Hong Kong", "hn": "Honduras",
"hm": "Heard Island and McDonald Islands", "uy": "Uruguay", "uz": "Uzbekistan",
"us": "United States", "um": "United States Minor Outlying Islands", "ug": "Uganda",
"ua": "Ukraine", "ae": "United Arab Emirates", "ad": "Andorra",
"ag": "Antigua and Barbuda", "af": "Afghanistan", "ai": "Anguilla", "am": "Armenia",
"al": "Albania", "ao": "Angola", "an": "Netherlands Antilles", "aq": "Antarctica",
"as": "American Samoa", "ar": "Argentina", "au": "Australia", "at": "Austria",
"aw": "Aruba", "ax": "Aland Islands", "az": "Azerbaijan", "ni": "Nicaragua",
"TZ": "Tanzania", "no": "Norway", "TV": "Tuvalu", "TW": "Taiwan",
"TT": "Trinidad and Tobago", "nc": "New Caledonia", "nan": "Namibia", "ne": "Niger",
"nf": "Norfolk Island", "ng": "Nigeria", "TN": "Tunisia", "TO": "Tonga",
"TL": "East Timor", "TM": "Turkmenistan", "TJ": "Tajikistan", "TK": "Tokelau",
"TH": "Thailand", "TF": "French Southern Territories", "TG": "Togo", "TD": "Chad",
"TC": "Turks and Caicos Islands", "AE": "United Arab Emirates", "AD": "Andorra",
"AG": "Antigua and Barbuda", "AF": "Afghanistan", "AI": "Anguilla", "AM": "Armenia",
"AL": "Albania", "AO": "Angola", "AN": "Netherlands Antilles", "AQ": "Antarctica",
"AS": "American Samoa", "AR": "Argentina", "AU": "Australia", "AT": "Austria",
"AW": "Aruba", "AX": "Aland Islands", "AZ": "Azerbaijan", "TR": "Turkey",
"fx": "France", "na": "Namibia", "nt": "Neutral Zone", "su": "Former USSR",
"tp": "East Timor", "uk": "United Kingdom", "yu": "Yugoslavia", "zr": "Zaire",
"usa": "United States"}
def tearDown(self):
pass
def test_url_country_extractor(self):
for line in self.f:
x = json.loads(line)
extraction = url_country_extractor.extract(x['tokens_url'], self.country_code_dict)
self.assertEqual(x['expected'], extraction[0]['value'])
if __name__ == '__main__':
unittest.main()
| 96.395062
| 120
| 0.410797
| 1,375
| 15,616
| 4.638545
| 0.447273
| 0.005644
| 0.008153
| 0.011289
| 0.780025
| 0.766541
| 0.766541
| 0.724365
| 0.700533
| 0.700533
| 0
| 0.000104
| 0.384542
| 15,616
| 161
| 121
| 96.993789
| 0.663511
| 0
| 0
| 0
| 0
| 0
| 0.395492
| 0.002049
| 0
| 0
| 0
| 0
| 0.006452
| 1
| 0.019355
| false
| 0.006452
| 0.025806
| 0
| 0.051613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f39ce68d280d617231cf3c7f99f888b12fbe50a7
| 48
|
py
|
Python
|
tests/__init__.py
|
espenfjo/pyyaledoorman
|
20507adc6047b300edc9ec83fc604fd7a47a2cda
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
espenfjo/pyyaledoorman
|
20507adc6047b300edc9ec83fc604fd7a47a2cda
|
[
"MIT"
] | 84
|
2021-06-05T07:47:33.000Z
|
2022-03-31T03:16:34.000Z
|
tests/__init__.py
|
espenfjo/pyyaledoorman
|
20507adc6047b300edc9ec83fc604fd7a47a2cda
|
[
"MIT"
] | null | null | null |
"""Test suite for the pyyaledoorman package."""
| 24
| 47
| 0.729167
| 6
| 48
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 48
| 1
| 48
| 48
| 0.833333
| 0.854167
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f3ab52cd4cba6c1dbe6bc9682c31050b1a9cc812
| 70
|
py
|
Python
|
reporter/sources/chat/__init__.py
|
Wikia/jira-reporter
|
af8a2df6dfb679872b82cba67560961d0ad5b2fb
|
[
"MIT"
] | 3
|
2015-08-19T13:27:24.000Z
|
2022-01-14T15:46:19.000Z
|
reporter/sources/chat/__init__.py
|
Wikia/jira-reporter
|
af8a2df6dfb679872b82cba67560961d0ad5b2fb
|
[
"MIT"
] | 74
|
2015-01-22T16:30:20.000Z
|
2022-03-25T17:03:00.000Z
|
reporter/sources/chat/__init__.py
|
Wikia/jira-reporter
|
af8a2df6dfb679872b82cba67560961d0ad5b2fb
|
[
"MIT"
] | 3
|
2016-04-10T18:26:00.000Z
|
2020-06-17T06:35:15.000Z
|
# expose all Mercury-related sources
from .chat import ChatLogsSource
| 23.333333
| 36
| 0.828571
| 9
| 70
| 6.444444
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128571
| 70
| 2
| 37
| 35
| 0.95082
| 0.485714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
45f7de777058e5bcf64c2db19a80548bd66c17cc
| 54
|
py
|
Python
|
SeismicReduction/__init__.py
|
msc-acse/acse-9-independent-research-project-coush001
|
0cef182c835ce896d55c1c0721cd6d20f383619b
|
[
"MIT"
] | 2
|
2019-08-29T20:33:38.000Z
|
2019-08-31T18:03:07.000Z
|
SeismicReduction/__init__.py
|
msc-acse/acse-9-independent-research-project-coush001
|
0cef182c835ce896d55c1c0721cd6d20f383619b
|
[
"MIT"
] | 10
|
2019-07-04T09:36:12.000Z
|
2019-08-06T15:13:21.000Z
|
SeismicReduction/__init__.py
|
msc-acse/acse-9-independent-research-project-coush001
|
0cef182c835ce896d55c1c0721cd6d20f383619b
|
[
"MIT"
] | null | null | null |
# __init__.py
from .core import *
from .utils import *
| 18
| 20
| 0.722222
| 8
| 54
| 4.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 54
| 3
| 20
| 18
| 0.777778
| 0.203704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3426cbc8ad766f3b1dc8fe2239fb99a643dc06fb
| 616
|
py
|
Python
|
flask_pblog/security.py
|
Nicals/pblog
|
b0422233216cc11b60be801c84043061334f047c
|
[
"MIT"
] | null | null | null |
flask_pblog/security.py
|
Nicals/pblog
|
b0422233216cc11b60be801c84043061334f047c
|
[
"MIT"
] | 1
|
2019-03-14T15:18:41.000Z
|
2019-03-14T15:18:41.000Z
|
flask_pblog/security.py
|
Nicals/pblog
|
b0422233216cc11b60be801c84043061334f047c
|
[
"MIT"
] | null | null | null |
from itsdangerous import TimestampSigner
from werkzeug.security import generate_password_hash
from werkzeug.security import check_password_hash
def hash_password(password):
return generate_password_hash(password, 'pbkdf2:sha256:2000', salt_length=12)
def check_password(password, password_hash):
return check_password_hash(password_hash, password)
def generate_token(username, secret_key):
signer = TimestampSigner(secret_key)
return signer.sign(username)
def validate_token(token, secret_key, max_age=None):
signer = TimestampSigner(secret_key)
signer.unsign(token, max_age=max_age)
| 28
| 81
| 0.808442
| 80
| 616
| 5.95
| 0.3625
| 0.151261
| 0.12605
| 0.109244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018416
| 0.118506
| 616
| 21
| 82
| 29.333333
| 0.858195
| 0
| 0
| 0.153846
| 1
| 0
| 0.029221
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0.461538
| 0.230769
| 0.153846
| 0.769231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
342cb59f6a41afa9860d221518f3459564bcf568
| 131
|
py
|
Python
|
core/commands/__init__.py
|
suuuuumod/awmepost
|
b5e8699c552bb6c0e469fdf867a5bc48b637ce09
|
[
"MIT"
] | 1
|
2021-03-25T09:06:15.000Z
|
2021-03-25T09:06:15.000Z
|
core/commands/__init__.py
|
suuuuumod/awmepost
|
b5e8699c552bb6c0e469fdf867a5bc48b637ce09
|
[
"MIT"
] | null | null | null |
core/commands/__init__.py
|
suuuuumod/awmepost
|
b5e8699c552bb6c0e469fdf867a5bc48b637ce09
|
[
"MIT"
] | null | null | null |
from .base import Commander, ArgParser, HelpAction
from .settings import SettingsCommander
from .activity import ActivityCommander
| 32.75
| 50
| 0.854962
| 14
| 131
| 8
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10687
| 131
| 3
| 51
| 43.666667
| 0.957265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cab93ab07c8fa93d62c172df44faef6291ed1210
| 152
|
py
|
Python
|
python/nilib/printkeys.py
|
skunkwerks/netinf
|
7f164db64d87d9450ff9233497d10f1c900b527e
|
[
"Apache-2.0"
] | null | null | null |
python/nilib/printkeys.py
|
skunkwerks/netinf
|
7f164db64d87d9450ff9233497d10f1c900b527e
|
[
"Apache-2.0"
] | null | null | null |
python/nilib/printkeys.py
|
skunkwerks/netinf
|
7f164db64d87d9450ff9233497d10f1c900b527e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import redis
import pprint
c = redis.StrictRedis()
print "List of keys in database:"
redis_keys = c.keys()
pprint.pprint(redis_keys)
| 16.888889
| 33
| 0.75
| 24
| 152
| 4.666667
| 0.583333
| 0.160714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 152
| 8
| 34
| 19
| 0.842105
| 0.105263
| 0
| 0
| 0
| 0
| 0.185185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
cac5aeb3ca9a5153527f3f732bcbfccc92b11fce
| 113
|
py
|
Python
|
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/wpt/wpt/tools/wptserve/wptserve/__init__.py
|
wenfeifei/miniblink49
|
2ed562ff70130485148d94b0e5f4c343da0c2ba4
|
[
"Apache-2.0"
] | 5,964
|
2016-09-27T03:46:29.000Z
|
2022-03-31T16:25:27.000Z
|
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/wpt/wpt/tools/wptserve/wptserve/__init__.py
|
w4454962/miniblink49
|
b294b6eacb3333659bf7b94d670d96edeeba14c0
|
[
"Apache-2.0"
] | 459
|
2016-09-29T00:51:38.000Z
|
2022-03-07T14:37:46.000Z
|
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/wpt/wpt/tools/wptserve/wptserve/__init__.py
|
w4454962/miniblink49
|
b294b6eacb3333659bf7b94d670d96edeeba14c0
|
[
"Apache-2.0"
] | 1,006
|
2016-09-27T05:17:27.000Z
|
2022-03-30T02:46:51.000Z
|
from server import WebTestHttpd, WebTestServer, Router
from request import Request
from response import Response
| 28.25
| 54
| 0.858407
| 14
| 113
| 6.928571
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123894
| 113
| 3
| 55
| 37.666667
| 0.979798
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cadd9f579e70e6ecc01d678d0588aa37f992585a
| 75
|
py
|
Python
|
src/slinject/_inject_linux.py
|
vbe0201/slinject
|
b370b7a8ccb8cbbef18be5368866754d1f21e684
|
[
"MIT"
] | 1
|
2020-04-30T19:13:28.000Z
|
2020-04-30T19:13:28.000Z
|
src/slinject/_inject_linux.py
|
vbe0201/slinject
|
b370b7a8ccb8cbbef18be5368866754d1f21e684
|
[
"MIT"
] | null | null | null |
src/slinject/_inject_linux.py
|
vbe0201/slinject
|
b370b7a8ccb8cbbef18be5368866754d1f21e684
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# TODO: Implement SO injection for Linux systems.
| 18.75
| 49
| 0.653333
| 10
| 75
| 4.9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.186667
| 75
| 3
| 50
| 25
| 0.786885
| 0.92
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.333333
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1b1ac77bf6e3a5c0b2e621b3f8cafe1fa978808d
| 137
|
py
|
Python
|
karkinos/__init__.py
|
0xb0bb/karkinos
|
2346a1627eede1b960307e2e209697c081007214
|
[
"MIT"
] | 195
|
2020-02-01T22:00:27.000Z
|
2022-02-23T02:49:02.000Z
|
karkinos/__init__.py
|
JulianVolodia/karkinos
|
ebed03c6b02b6786b646e225126a4fbfcaafe273
|
[
"MIT"
] | 3
|
2020-07-25T09:19:08.000Z
|
2021-11-14T22:25:30.000Z
|
karkinos/__init__.py
|
JulianVolodia/karkinos
|
ebed03c6b02b6786b646e225126a4fbfcaafe273
|
[
"MIT"
] | 19
|
2020-02-02T10:13:06.000Z
|
2022-03-23T14:59:47.000Z
|
##
## b0bb - 31/01/2020 - Karkinos
##
## https://twitter.com/0xb0bb
## https://github.com/0xb0bb/karkinos
##
import karkinos.version
| 15.222222
| 38
| 0.656934
| 17
| 137
| 5.294118
| 0.705882
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110169
| 0.138686
| 137
| 8
| 39
| 17.125
| 0.652542
| 0.671533
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1b202c0404bd4b56789b57049752a4fa39d115ab
| 74
|
py
|
Python
|
03.Bool.py
|
CristianoRC/Estudos-Python
|
8daa1eda9b803e0937ab0c0dd2e19102d538be37
|
[
"MIT"
] | null | null | null |
03.Bool.py
|
CristianoRC/Estudos-Python
|
8daa1eda9b803e0937ab0c0dd2e19102d538be37
|
[
"MIT"
] | null | null | null |
03.Bool.py
|
CristianoRC/Estudos-Python
|
8daa1eda9b803e0937ab0c0dd2e19102d538be37
|
[
"MIT"
] | null | null | null |
x = 10 > 15
y = 15 > 10
print(x and y)
print(x or y)
print(not (x and y))
| 12.333333
| 20
| 0.567568
| 19
| 74
| 2.210526
| 0.421053
| 0.285714
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 0.27027
| 74
| 5
| 21
| 14.8
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1b221c76848b1bee75e17f4cd7430d27a1880492
| 156
|
py
|
Python
|
chatbot/logging/consolelogger.py
|
squahtx/hal9000
|
80e13911d0cf240c786f016993cd18bb063e687f
|
[
"MIT"
] | null | null | null |
chatbot/logging/consolelogger.py
|
squahtx/hal9000
|
80e13911d0cf240c786f016993cd18bb063e687f
|
[
"MIT"
] | null | null | null |
chatbot/logging/consolelogger.py
|
squahtx/hal9000
|
80e13911d0cf240c786f016993cd18bb063e687f
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import time
from .logger import Logger
class ConsoleLogger(Logger):
# ILogger
def logRaw(self, message):
print(message)
| 15.6
| 29
| 0.775641
| 20
| 156
| 6.05
| 0.65
| 0.231405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 156
| 9
| 30
| 17.333333
| 0.916667
| 0.044872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0
| 0.833333
| 0.166667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1b41068085a554932b1eaafcc6afbcf638a7f551
| 113
|
py
|
Python
|
0x03-python-data_structures/8-multiple_returns.py
|
Rmolimock/holbertonschool-higher_level_programming
|
cf0421cbb6463b3960dc581badf7d4bbe1622b7d
|
[
"MIT"
] | 1
|
2019-05-21T09:34:41.000Z
|
2019-05-21T09:34:41.000Z
|
0x03-python-data_structures/8-multiple_returns.py
|
Rmolimock/holbertonschool-higher_level_programming
|
cf0421cbb6463b3960dc581badf7d4bbe1622b7d
|
[
"MIT"
] | null | null | null |
0x03-python-data_structures/8-multiple_returns.py
|
Rmolimock/holbertonschool-higher_level_programming
|
cf0421cbb6463b3960dc581badf7d4bbe1622b7d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
def multiple_returns(sentence):
return (len(sentence), sentence[0] if sentence else None)
| 28.25
| 61
| 0.743363
| 16
| 113
| 5.1875
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020202
| 0.123894
| 113
| 3
| 62
| 37.666667
| 0.818182
| 0.150442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
1b434a1855866aa0e55f59f7c78da8dd5c943d65
| 105
|
py
|
Python
|
python/speech_recognition.py
|
odrzywolski-lukas/odrzywolskiSprintScripts
|
2f6248db0d43542ac79627c94508d8e539524db7
|
[
"MIT"
] | null | null | null |
python/speech_recognition.py
|
odrzywolski-lukas/odrzywolskiSprintScripts
|
2f6248db0d43542ac79627c94508d8e539524db7
|
[
"MIT"
] | null | null | null |
python/speech_recognition.py
|
odrzywolski-lukas/odrzywolskiSprintScripts
|
2f6248db0d43542ac79627c94508d8e539524db7
|
[
"MIT"
] | null | null | null |
#C:\Users\Baxter\Documents\repos\models\research\syntaxnet\tensorflow\tensorflow\examples\speech_commands
| 105
| 105
| 0.87619
| 13
| 105
| 7
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 105
| 1
| 105
| 105
| 0.866667
| 0.990476
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1b455c1b329f03d1a923970414e584a4b66daafc
| 115
|
py
|
Python
|
src/entities/_Table.py
|
Truta446/cardapio-digital-python-printer
|
5e69e445e5fb1b5a73837f27ef9e7f88c2c4efa9
|
[
"MIT"
] | null | null | null |
src/entities/_Table.py
|
Truta446/cardapio-digital-python-printer
|
5e69e445e5fb1b5a73837f27ef9e7f88c2c4efa9
|
[
"MIT"
] | null | null | null |
src/entities/_Table.py
|
Truta446/cardapio-digital-python-printer
|
5e69e445e5fb1b5a73837f27ef9e7f88c2c4efa9
|
[
"MIT"
] | null | null | null |
class Table(object):
def __init__(self, table: dict):
self.table_number = table.get('table_number')
| 28.75
| 54
| 0.66087
| 15
| 115
| 4.666667
| 0.6
| 0.257143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208696
| 115
| 3
| 55
| 38.333333
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
1b6d022cbf69dd668506058ac4548190169ae802
| 22,208
|
py
|
Python
|
tests/unit_tests/test_story.py
|
sathiscode/trumania
|
bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc
|
[
"Apache-2.0"
] | 97
|
2018-01-15T19:29:31.000Z
|
2022-03-11T00:27:34.000Z
|
tests/unit_tests/test_story.py
|
sathiscode/trumania
|
bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc
|
[
"Apache-2.0"
] | 10
|
2018-01-15T22:44:55.000Z
|
2022-02-18T09:44:10.000Z
|
tests/unit_tests/test_story.py
|
sathiscode/trumania
|
bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc
|
[
"Apache-2.0"
] | 33
|
2018-01-15T19:34:23.000Z
|
2022-03-05T22:39:33.000Z
|
import pandas as pd
import numpy as np
from trumania.core.operations import Operation
from trumania.core.random_generators import SequencialGenerator, ConstantGenerator, ConstantDependentGenerator
from trumania.core.population import Population
from trumania.core.story import Story
from tests.mocks.random_generators import MockTimerGenerator, ConstantsMockGenerator
from tests.mocks.operations import MockDropOp, FakeRecording
def test_empty_story_should_do_nothing_and_not_crash():
customers = Population(circus=None, size=1000,
ids_gen=SequencialGenerator(prefix="a"))
empty_story = Story(
name="purchase",
initiating_population=customers,
member_id_field="AGENT")
logs = empty_story.execute()
# no logs should be produced
assert logs == {}
def test_all_populations_should_be_inactive_when_timers_are_positive():
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
# 5 populations should trigger in 2 ticks, and 5 more
init_timers = pd.Series([2] * 5 + [1] * 5, index=population.ids)
timers_gen = MockTimerGenerator(init_timers)
story = Story(
name="tested",
initiating_population=population,
member_id_field="ac_id",
# forcing the timer of all populations to be initialized to 0
timer_gen=timers_gen,
auto_reset_timer=True
)
assert ([], population.ids.tolist()) == story.active_inactive_ids()
def test_active_inactive_ids_should_mark_timer_0_as_active():
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
# 5 populations should trigger in 2 ticks, and 5 more
init_timers = pd.Series([0] * 5 + [1] * 5, index=population.ids)
timers_gen = MockTimerGenerator(init_timers)
story = Story(
name="tested",
initiating_population=population,
member_id_field="ac_id",
# forcing the timer of all populations to be initialized to 0
timer_gen=timers_gen,
auto_reset_timer=True
)
assert (population.ids[:5].tolist(), population.ids[5:].tolist()) == story.active_inactive_ids()
def test_active_inactive_ids_should_mark_all_populations_active_when_all_timers_0():
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
# 5 populations should trigger in 2 ticks, and 5 more
init_timers = pd.Series([0] * 10, index=population.ids)
timers_gen = MockTimerGenerator(init_timers)
story = Story(
name="tested",
initiating_population=population,
member_id_field="ac_id",
# forcing the timer of all populations to be initialized to 0
timer_gen=timers_gen,
auto_reset_timer=True
)
assert (population.ids.tolist(), []) == story.active_inactive_ids()
def test_get_activity_should_be_default_by_default():
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
story = Story(name="tested", initiating_population=population,
member_id_field="")
# by default, each population should be in the default state with activity 1
assert [1.] * 10 == story.get_param("activity", population.ids).tolist()
assert story.get_possible_states() == ["default"]
def test_populations_with_zero_activity_should_never_have_positive_timer():
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
story = Story(
name="tested",
initiating_population=population,
# fake generator that assign zero activity to 3 populations
activity_gen=ConstantsMockGenerator([1, 1, 1, 1, 0, 1, 1, 0, 0, 1]),
timer_gen=ConstantDependentGenerator(value=10),
member_id_field="")
story.reset_timers()
# all non zero populations should have been through the profiler => timer to 10
# all others should be locked to -1, to reflect that activity 0 never
# triggers anything
expected_timers = [10, 10, 10, 10, -1, 10, 10, -1, -1, 10]
assert expected_timers == story.timer["remaining"].tolist()
def test_get_activity_should_be_aligned_for_each_state():
excited_call_activity = ConstantGenerator(value=10)
back_to_normal_prob = ConstantGenerator(value=.3)
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
story = Story(name="tested", initiating_population=population,
member_id_field="",
states={
"excited": {
"activity": excited_call_activity,
"back_to_default_probability":
back_to_normal_prob}
})
# by default, each population should be in the default state with activity 1
assert [1] * 10 == story.get_param("activity", population.ids).tolist()
assert [1] * 10 == story.get_param("back_to_default_probability",
population.ids).tolist()
assert sorted(story.get_possible_states()) == ["default", "excited"]
story.transit_to_state(["ac_2", "ac_5", "ac_9"],
["excited", "excited", "excited"])
# activity and probability of getting back to normal should now be updated
expected_activity = [1, 1, 10, 1, 1, 10, 1, 1, 1, 10]
assert expected_activity == story.get_param("activity",
population.ids).tolist()
# also, doing a get_param for some specific population ids should return the
# correct values (was buggy if we requested sth else than the whole list)
assert expected_activity[2:7] == story.get_param("activity",
population.ids[2:7]).tolist()
assert [1, 10] == story.get_param("activity", population.ids[-2:]).tolist()
expected_probs = [1, 1, .3, 1, 1, .3, 1, 1, 1, .3]
assert expected_probs == story.get_param("back_to_default_probability",
population.ids, ).tolist()
def test_scenario_transiting_to_state_with_0_back_to_default_prob_should_remain_there():
"""
we create an story with a transit_to_state operation and 0 probability
of going back to normal => after the execution, all triggered populations should
still be in that starte
"""
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
# here we are saying that some story on populations 5 to 9 is triggering a
# state change on populations 0 to 4
active_ids_gens = ConstantsMockGenerator(
values=[np.nan] * 5 + population.ids[:5].tolist())
excited_state_gens = ConstantsMockGenerator(
values=[np.nan] * 5 + ["excited"] * 5)
excited_call_activity = ConstantGenerator(value=10)
# forcing to stay excited
back_to_normal_prob = ConstantGenerator(value=0)
story = Story(
name="tested",
initiating_population=population,
member_id_field="ac_id",
states={
"excited": {
"activity": excited_call_activity,
"back_to_default_probability": back_to_normal_prob}
},
# forcing the timer of all populations to be initialized to 0
timer_gen=ConstantDependentGenerator(value=0)
)
story.set_operations(
# first 5 population are "active"
active_ids_gens.ops.generate(named_as="active_ids"),
excited_state_gens.ops.generate(named_as="new_state"),
# forcing a transition to "excited" state of the 5 populations
story.ops.transit_to_state(member_id_field="active_ids",
state_field="new_state")
)
# before any execution, the state should be default for all
assert ["default"] * 10 == story.timer["state"].tolist()
logs = story.execute()
# no logs are expected as output
assert logs == {}
# the first 5 populations should still be in "excited", since
# "back_to_normal_probability" is 0, the other 5 should not have
# moved
expected_state = ["excited"] * 5 + ["default"] * 5
assert expected_state == story.timer["state"].tolist()
def test_scenario_transiting_to_state_with_1_back_to_default_prob_should_go_back_to_normal():
"""
similar test to above, though this time we are using
back_to_normal_prob = 1 => all populations should be back to "normal" state
at the end of the execution
"""
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
# this one is slightly tricky: populations
active_ids_gens = ConstantsMockGenerator(
values=[np.nan] * 5 + population.ids[:5].tolist())
excited_state_gens = ConstantsMockGenerator(
values=[np.nan] * 5 + ["excited"] * 5)
excited_call_activity = ConstantGenerator(value=10)
# this time we're forcing to stay in the transited state
back_to_normal_prob = ConstantGenerator(value=1)
story = Story(
name="tested",
initiating_population=population,
member_id_field="ac_id",
states={
"excited": {
"activity": excited_call_activity,
"back_to_default_probability": back_to_normal_prob}
},
# forcing the timer of all populations to be initialized to 0
timer_gen=ConstantDependentGenerator(value=0)
)
story.set_operations(
# first 5 population are "active"
active_ids_gens.ops.generate(named_as="active_ids"),
excited_state_gens.ops.generate(named_as="new_state"),
# forcing a transition to "excited" state of the 5 populations
story.ops.transit_to_state(member_id_field="active_ids",
state_field="new_state")
)
# before any execution, the state should be default for all
assert ["default"] * 10 == story.timer["state"].tolist()
logs = story.execute()
# no logs are expected as output
assert logs == {}
# this time, all populations should have transited back to "normal" at the end
print(story.timer["state"].tolist())
assert ["default"] * 10 == story.timer["state"].tolist()
def test_story_autoreset_true_not_dropping_rows_should_reset_all_timers():
# in case an story is configured to perform an auto-reset, after one
# execution,
# - all executed rows should have a timer back to some positive value
# - all non executed rows should have gone down one tick
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
# 5 populations should trigger in 2 ticks, and 5 more
init_timers = pd.Series([2] * 5 + [1] * 5, index=population.ids)
timers_gen = MockTimerGenerator(init_timers)
story = Story(
name="tested",
initiating_population=population,
member_id_field="ac_id",
# forcing the timer of all populations to be initialized to 0
timer_gen=timers_gen,
auto_reset_timer=True
)
# empty operation list as initialization
story.set_operations(Operation())
# initial timers should be those provided by the generator
assert story.timer["remaining"].equals(init_timers)
# after one execution, no population id has been selected and all counters
# are decreased by 1
story.execute()
assert story.timer["remaining"].equals(init_timers - 1)
# this time, the last 5 should have executed => go back up to 1. The
# other 5 should now be at 0, ready to execute at next step
story.execute()
expected_timers = pd.Series([0] * 5 + [1] * 5, index=population.ids)
assert story.timer["remaining"].equals(expected_timers)
def test_story_autoreset_true_and_dropping_rows_should_reset_all_timers():
# in case an story is configured to perform an auto-reset, but also
# drops some rows, after one execution,
# - all executed rows (dropped or not) should have a timer back to some
# positive value
# - all non executed rows should have gone down one tick
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
# 5 populations should trigger in 2 ticks, and 5 more
init_timers = pd.Series([2] * 5 + [1] * 5, index=population.ids)
timers_gen = MockTimerGenerator(init_timers)
story = Story(
name="tested",
initiating_population=population,
member_id_field="ac_id",
# forcing the timer of all populations to be initialized to 0
timer_gen=timers_gen,
auto_reset_timer=True
)
# simulating an operation that drop the last 2 rows
story.set_operations(MockDropOp(0, 2))
# initial timers should be those provided by the generator
assert story.timer["remaining"].equals(init_timers)
# after one execution, no population id has been selected and all counters
# are decreased by 1
story.execute()
assert story.timer["remaining"].equals(init_timers - 1)
# this time, the last 5 should have executed => and the last 3 of them
# should have been dropped. Nonetheless, all 5 of them should be back to 1
story.execute()
expected_timers = pd.Series([0] * 5 + [1] * 5, index=population.ids)
assert story.timer["remaining"].equals(expected_timers)
def test_story_autoreset_false_not_dropping_rows_should_reset_all_timers():
# in case an story is configured not to perform an auto-reset, after one
# execution:
# - all executed rows should now be at -1
# - all non executed rows should have gone down one tick
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
# 5 populations should trigger in 2 ticks, and 5 more
init_timers = pd.Series([2] * 5 + [1] * 5, index=population.ids)
timers_gen = MockTimerGenerator(init_timers)
story = Story(
name="tested",
initiating_population=population,
member_id_field="ac_id",
# forcing the timer of all populations to be initialized to 0
timer_gen=timers_gen,
auto_reset_timer=False
)
# empty operation list as initialization
story.set_operations(Operation())
# we have no auto-reset => all timers should intially be at -1
all_minus_1 = pd.Series([-1] * 10, index=population.ids)
assert story.timer["remaining"].equals(all_minus_1)
# executing once => should do nothing, and leave all timers at -1
story.execute()
assert story.timer["remaining"].equals(all_minus_1)
# triggering explicitally the story => timers should have the hard-coded
# values from the mock generator
story.reset_timers()
assert story.timer["remaining"].equals(init_timers)
# after one execution, no population id has been selected and all counters
# are decreased by 1
story.execute()
assert story.timer["remaining"].equals(init_timers - 1)
# this time, the last 5 should have executed, but we should not have
# any timer reste => they should go to -1.
# The other 5 should now be at 0, ready to execute at next step
story.execute()
expected_timers = pd.Series([0] * 5 + [-1] * 5, index=population.ids)
assert story.timer["remaining"].equals(expected_timers)
# executing once more: the previously at -1 should still be there, and the
# just executed at this stage should be there too
story.execute()
expected_timers = pd.Series([-1] * 10, index=population.ids)
assert story.timer["remaining"].equals(expected_timers)
def test_story_autoreset_false_and_dropping_rows_should_reset_all_timers():
# in case an story is configured not to perform an auto-reset, after one
# execution:
# - all executed rows should now be at -1 (dropped or not)
# - all non executed rows should have gone down one tick
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
# 5 populations should trigger in 2 ticks, and 5 more
init_timers = pd.Series([2] * 5 + [1] * 5, index=population.ids)
timers_gen = MockTimerGenerator(init_timers)
story = Story(
name="tested",
initiating_population=population,
member_id_field="ac_id",
# forcing the timer of all populations to be initialized to 0
timer_gen=timers_gen,
auto_reset_timer=False
)
# empty operation list as initialization
# simulating an operation that drop the last 2 rows
story.set_operations(MockDropOp(0, 2))
# we have no auto-reset => all timers should intially be at -1
all_minus_1 = pd.Series([-1] * 10, index=population.ids)
assert story.timer["remaining"].equals(all_minus_1)
# executing once => should do nothing, and leave all timers at -1
story.execute()
assert story.timer["remaining"].equals(all_minus_1)
# triggering explicitaly the story => timers should have the hard-coded
# values from the mock generator
story.reset_timers()
assert story.timer["remaining"].equals(init_timers)
# after one execution, no population id has been selected and all counters
# are decreased by 1
story.execute()
assert story.timer["remaining"].equals(init_timers - 1)
# this time, the last 5 should have executed, but we should not have
# any timer reste => they should go to -1.
# The other 5 should now be at 0, ready to execute at next step
story.execute()
expected_timers = pd.Series([0] * 5 + [-1] * 5, index=population.ids)
assert story.timer["remaining"].equals(expected_timers)
# executing once more: the previously at -1 should still be there, and the
# just executed at this stage should be there too
story.execute()
expected_timers = pd.Series([-1] * 10, index=population.ids)
assert story.timer["remaining"].equals(expected_timers)
def test_bugfix_collisions_force_act_next():
# Previously, resetting the timer of reset populations was cancelling the reset.
#
# We typically want to reset the timer when we have change the activity
# state => we want to generate new timer values that reflect the new state.
#
# But force_act_next should still have priority on that: if somewhere else
# we force some populations to act at the next clock step (e.g. to re-try
# buying an ER or so), the fact that their activity level changed should
# not cancel the retry.
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
# 5 populations should trigger in 2 ticks, and 5 more
init_timers = pd.Series([2] * 5 + [1] * 5, index=population.ids)
timers_gen = MockTimerGenerator(init_timers)
story = Story(
name="tested",
initiating_population=population,
member_id_field="ac_id",
# forcing the timer of all populations to be initialized to 0
timer_gen=timers_gen
)
timer_values = story.timer["remaining"].copy()
forced = pd.Index(["ac_1", "ac_3", "ac_7", "ac_8", "ac_9"])
not_forced = pd.Index(["ac_0", "ac_2", "ac_4", "ac_4", "ac_6"])
# force_act_next should only impact those ids
story.force_act_next(forced)
assert story.timer.loc[forced]["remaining"].tolist() == [0, 0, 0, 0, 0]
assert story.timer.loc[not_forced]["remaining"].equals(
timer_values[not_forced]
)
# resetting the timers should not change the timers of the populations that
# are being forced
story.reset_timers()
assert story.timer.loc[forced]["remaining"].tolist() == [0, 0, 0, 0, 0]
# Ticking the timers should not change the timers of the populations that
# are being forced.
# This is important for population forcing themselves to act at the next
# clock
# step (typical scenario for retry) => the fact of thick the clock at the
# end of the execution should not impact them.
story.timer_tick(population.ids)
assert story.timer.loc[forced]["remaining"].tolist() == [0, 0, 0, 0, 0]
assert story.timer.loc[not_forced]["remaining"].equals(
timer_values[not_forced] - 1
)
def test_bugfix_force_populations_should_only_act_once():
population = Population(circus=None, size=10,
ids_gen=SequencialGenerator(prefix="ac_", max_length=1))
# 5 populations should trigger in 2 ticks, and 5 more
init_timers = pd.Series([2] * 5 + [5] * 5, index=population.ids)
timers_gen = MockTimerGenerator(init_timers)
story = Story(
name="tested",
initiating_population=population,
member_id_field="ac_id",
# forcing the timer of all populations to be initialized to 0
timer_gen=timers_gen)
recording_op = FakeRecording()
story.set_operations(recording_op)
forced = pd.Index(["ac_1", "ac_3", "ac_7", "ac_8", "ac_9"])
# force_act_next should only impact those ids
story.force_act_next(forced)
assert story.timer["remaining"].tolist() == [2, 0, 2, 0, 2, 5, 5, 0, 0, 0]
story.execute()
assert recording_op.last_seen_population_ids == ["ac_1", "ac_3", "ac_7", "ac_8", "ac_9"]
print(story.timer["remaining"].tolist())
assert story.timer["remaining"].tolist() == [1, 2, 1, 2, 1, 4, 4, 5, 5, 5]
recording_op.reset()
story.execute()
assert recording_op.last_seen_population_ids == []
assert story.timer["remaining"].tolist() == [0, 1, 0, 1, 0, 3, 3, 4, 4, 4]
story.execute()
assert recording_op.last_seen_population_ids == ["ac_0", "ac_2", "ac_4"]
assert story.timer["remaining"].tolist() == [2, 0, 2, 0, 2, 2, 2, 3, 3, 3]
| 38.027397
| 110
| 0.667237
| 2,969
| 22,208
| 4.812058
| 0.098013
| 0.025198
| 0.030237
| 0.038497
| 0.786169
| 0.766501
| 0.741513
| 0.729824
| 0.724365
| 0.711696
| 0
| 0.023293
| 0.236401
| 22,208
| 583
| 111
| 38.092624
| 0.8192
| 0.298406
| 0
| 0.679612
| 0
| 0
| 0.063639
| 0.008758
| 0
| 0
| 0
| 0
| 0.161812
| 1
| 0.048544
| false
| 0
| 0.02589
| 0
| 0.074434
| 0.006472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1b762e80c1be8b3a14ca6575eddb7dbcb2b6b449
| 42
|
py
|
Python
|
app/api/controller/__init__.py
|
ChegeBryan/black-bandana
|
6ef8f62c4e9d4415c6f6f1cc7cd8240ae21e9ce3
|
[
"MIT"
] | 2
|
2019-01-05T07:01:13.000Z
|
2019-03-17T08:11:19.000Z
|
app/api/controller/__init__.py
|
ChegeBryan/black-bandana
|
6ef8f62c4e9d4415c6f6f1cc7cd8240ae21e9ce3
|
[
"MIT"
] | 3
|
2019-01-23T21:09:04.000Z
|
2020-11-20T07:40:16.000Z
|
app/api/controller/__init__.py
|
ChegeBryan/black-bandana
|
6ef8f62c4e9d4415c6f6f1cc7cd8240ae21e9ce3
|
[
"MIT"
] | null | null | null |
# package holds the api endpoints modules
| 21
| 41
| 0.809524
| 6
| 42
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 1
| 42
| 42
| 0.971429
| 0.928571
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
942a00727a853c63d15c8690c815d5c98f60a0f6
| 215
|
py
|
Python
|
voting/admin.py
|
vendari12/ELECX
|
316f9f942b76c4279dbb138bee06a7a5732bb0cc
|
[
"Apache-2.0"
] | 1
|
2022-03-26T18:55:24.000Z
|
2022-03-26T18:55:24.000Z
|
voting/admin.py
|
vendari12/ELECX
|
316f9f942b76c4279dbb138bee06a7a5732bb0cc
|
[
"Apache-2.0"
] | null | null | null |
voting/admin.py
|
vendari12/ELECX
|
316f9f942b76c4279dbb138bee06a7a5732bb0cc
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import Candidate, VotingSession, VoteUser
# Register your models here.
admin.site.register(Candidate)
admin.site.register(VotingSession)
admin.site.register(VoteUser)
| 23.888889
| 54
| 0.823256
| 27
| 215
| 6.555556
| 0.481481
| 0.152542
| 0.288136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 215
| 8
| 55
| 26.875
| 0.907692
| 0.12093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9430d47d5186e402c57d6d7eff549863ca7c1d0a
| 625
|
py
|
Python
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/_api/v1/python_io/__init__.py
|
JustinACoder/H22-GR3-UnrealAI
|
361eb9ef1147f8a2991e5f98c4118cd823184adf
|
[
"MIT"
] | 6
|
2022-02-04T18:12:24.000Z
|
2022-03-21T23:57:12.000Z
|
Lib/site-packages/tensorflow/_api/v1/python_io/__init__.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/tensorflow/_api/v1/python_io/__init__.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | 1
|
2022-02-08T03:53:23.000Z
|
2022-02-08T03:53:23.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Python functions for directly manipulating TFRecord-formatted files.
See the [Python IO](https://tensorflow.org/api_guides/python/python_io) guide.
"""
from __future__ import print_function
from tensorflow.python.lib.io.python_io import TFRecordCompressionType
from tensorflow.python.lib.io.python_io import TFRecordOptions
from tensorflow.python.lib.io.python_io import TFRecordWriter
from tensorflow.python.lib.io.python_io import tf_record_iterator
del print_function
| 36.764706
| 83
| 0.8112
| 87
| 625
| 5.643678
| 0.505747
| 0.09776
| 0.162933
| 0.187373
| 0.317719
| 0.317719
| 0.317719
| 0.317719
| 0
| 0
| 0
| 0
| 0.112
| 625
| 16
| 84
| 39.0625
| 0.884685
| 0.44
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.833333
| 0
| 0.833333
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
944c14419fa992106c7d04a97e49df966c997025
| 547
|
py
|
Python
|
src/server_design/algorithms/compressor/designSolutions/sol_553.py
|
robertpardillo/Funnel
|
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
|
[
"MIT"
] | 1
|
2021-05-18T16:10:49.000Z
|
2021-05-18T16:10:49.000Z
|
src/server_design/algorithms/compressor/designSolutions/sol_553.py
|
robertpardillo/Funnel
|
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
|
[
"MIT"
] | null | null | null |
src/server_design/algorithms/compressor/designSolutions/sol_553.py
|
robertpardillo/Funnel
|
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
|
[
"MIT"
] | null | null | null |
from miscellaneous.functions import print as prt, form
def sol553(design_parameters):
"""
design_parameters = [size, stall margin, cost, off-design]
Psi_c, phi_c grande :size
constant swirl : cost and stall
without focus in off-design characteristics
:param design_parameters: list()
:return: distribution of phi, psi, R
"""
prt('Design governing by this mantras (in oder of importance): \n size, cost, stall margin, off.design', 'blue')
pass
| 32.176471
| 117
| 0.619744
| 66
| 547
| 5.060606
| 0.651515
| 0.143713
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007813
| 0.297989
| 547
| 17
| 118
| 32.176471
| 0.861979
| 0.446069
| 0
| 0
| 0
| 0.25
| 0.461187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.5
| 0
| 0.75
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
9485b60f8cfa96bb0d79244d1d0aac16b2852f44
| 138
|
py
|
Python
|
src_py/hat/orchestrator/__main__.py
|
hat-open/hat-orchestrator
|
db729151c5a61f5c4195fb2a7fba0b0131f84e96
|
[
"Apache-2.0"
] | 1
|
2022-02-01T13:42:57.000Z
|
2022-02-01T13:42:57.000Z
|
src_py/hat/orchestrator/__main__.py
|
hat-open/hat-orchestrator
|
db729151c5a61f5c4195fb2a7fba0b0131f84e96
|
[
"Apache-2.0"
] | null | null | null |
src_py/hat/orchestrator/__main__.py
|
hat-open/hat-orchestrator
|
db729151c5a61f5c4195fb2a7fba0b0131f84e96
|
[
"Apache-2.0"
] | null | null | null |
import sys
from hat.orchestrator.main import main
if __name__ == '__main__':
sys.argv[0] = 'hat-orchestrator'
sys.exit(main())
| 15.333333
| 38
| 0.681159
| 19
| 138
| 4.526316
| 0.578947
| 0.348837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00885
| 0.181159
| 138
| 8
| 39
| 17.25
| 0.752212
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
84966f224e36eae6a804e23ecceaf2afb7faaf09
| 39
|
py
|
Python
|
test/__init__.py
|
mits58/Python-Graph-Library
|
aa85788ad63e356944d77a4c251ad707562dd9c0
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
mits58/Python-Graph-Library
|
aa85788ad63e356944d77a4c251ad707562dd9c0
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
mits58/Python-Graph-Library
|
aa85788ad63e356944d77a4c251ad707562dd9c0
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('pygraph')
| 9.75
| 26
| 0.74359
| 6
| 39
| 4.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 3
| 27
| 13
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
84981a5324835da326ba3ad9b78c8a4ef0d73441
| 1,048
|
py
|
Python
|
droxi/drox/omcdbase/set1/models.py
|
andydude/droxtools
|
d608ceb715908fb00398c0d28eee74286fef3750
|
[
"MIT"
] | null | null | null |
droxi/drox/omcdbase/set1/models.py
|
andydude/droxtools
|
d608ceb715908fb00398c0d28eee74286fef3750
|
[
"MIT"
] | null | null | null |
droxi/drox/omcdbase/set1/models.py
|
andydude/droxtools
|
d608ceb715908fb00398c0d28eee74286fef3750
|
[
"MIT"
] | null | null | null |
'''
Created on Mar 31, 2014
@author: ajr
'''
from ..models import OMSym
@OMSym.called("set1", "cartesian_product")
class CartesianProduct(OMSym):
pass
@OMSym.called("set1", "in")
class In(OMSym):
pass
@OMSym.called("set1", "intersect")
class Intersect(OMSym):
pass
@OMSym.called("set1", "notin")
class NotIn(OMSym):
pass
@OMSym.called("set1", "notprsubset")
class NotPrSubSet(OMSym):
pass
@OMSym.called("set1", "notsubset")
class NotSubSet(OMSym):
pass
@OMSym.called("set1", "prsubset")
class PrSubSet(OMSym):
pass
@OMSym.called("set1", "set")
class Set(OMSym):
pass
@OMSym.called("set1", "setdiff")
class SetDiff(OMSym):
pass
@OMSym.called("set1", "size")
class Size(OMSym):
pass
@OMSym.called("set1", "subset")
class SubSet(OMSym):
pass
@OMSym.called("set1", "union")
class Union(OMSym):
pass
@OMSym.called("set1", "emptyset")
class EmptySet(OMSym):
pass
@OMSym.called("set1", "map")
class Map(OMSym):
pass
@OMSym.called("set1", "suchthat")
class SuchThat(OMSym):
pass
| 15.411765
| 42
| 0.660305
| 132
| 1,048
| 5.234848
| 0.242424
| 0.238784
| 0.325615
| 0.40521
| 0.486252
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023783
| 0.157443
| 1,048
| 67
| 43
| 15.641791
| 0.758777
| 0.035305
| 0
| 0.326087
| 0
| 0
| 0.164506
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.326087
| 0.021739
| 0
| 0.347826
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
84bad64e5d4cd7387b898f9318163be90ae0f6fa
| 434
|
py
|
Python
|
CursoemVideo/ex009.py
|
arthxvr/coding--python
|
1e91707be6cb8fef816dad0c1a65f2cc3327357e
|
[
"MIT"
] | null | null | null |
CursoemVideo/ex009.py
|
arthxvr/coding--python
|
1e91707be6cb8fef816dad0c1a65f2cc3327357e
|
[
"MIT"
] | null | null | null |
CursoemVideo/ex009.py
|
arthxvr/coding--python
|
1e91707be6cb8fef816dad0c1a65f2cc3327357e
|
[
"MIT"
] | null | null | null |
numero = int(input('Número: '))
print(f'{numero} x {1} = {numero * 1}')
print(f'{numero} x {2} = {numero * 2}')
print(f'{numero} x {3} = {numero * 3}')
print(f'{numero} x {4} = {numero * 4}')
print(f'{numero} x {5} = {numero * 5}')
print(f'{numero} x {6} = {numero * 6}')
print(f'{numero} x {7} = {numero * 7}')
print(f'{numero} x {8} = {numero * 8}')
print(f'{numero} x {9} = {numero * 9}')
print(f'{numero} x {10} = {numero * 10}')
| 36.166667
| 41
| 0.534562
| 74
| 434
| 3.135135
| 0.22973
| 0.258621
| 0.517241
| 0.560345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061111
| 0.170507
| 434
| 11
| 42
| 39.454545
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0.691244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.909091
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
84e2dbcc9c935452f045b79fcb975450d4a7696c
| 167
|
py
|
Python
|
demo/app/models/prediction.py
|
T-Sumida/TfLiteModelMaker-TfjsTaskAPI-Example
|
d1ca090910efcedddd99d61443e21b31ee4334c2
|
[
"MIT"
] | 2
|
2021-09-18T10:57:47.000Z
|
2021-09-27T08:35:44.000Z
|
demo/app/models/prediction.py
|
T-Sumida/TfLiteModelMaker-TfjsTaskAPI-Example
|
d1ca090910efcedddd99d61443e21b31ee4334c2
|
[
"MIT"
] | 1
|
2021-09-18T10:18:19.000Z
|
2021-09-24T04:06:09.000Z
|
demo/app/models/prediction.py
|
T-Sumida/TfLiteModelMaker-TfjsTaskAPI-Example
|
d1ca090910efcedddd99d61443e21b31ee4334c2
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
from typing import List
from pydantic import BaseModel
class PredictionResult(BaseModel):
bboxes: List
scores: List
classes: List
| 16.7
| 34
| 0.706587
| 20
| 167
| 5.9
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007519
| 0.203593
| 167
| 9
| 35
| 18.555556
| 0.879699
| 0.11976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
84f9ce3211cd674acdd81c9dc8da13209314b1c3
| 953
|
py
|
Python
|
models.py
|
csvillalta/rl-state-influence
|
319942826efcbca4d67389b7b034f7a957bccfee
|
[
"MIT"
] | null | null | null |
models.py
|
csvillalta/rl-state-influence
|
319942826efcbca4d67389b7b034f7a957bccfee
|
[
"MIT"
] | 1
|
2019-04-06T02:43:34.000Z
|
2019-04-06T02:43:34.000Z
|
models.py
|
csvillalta/rl-state-influence
|
319942826efcbca4d67389b7b034f7a957bccfee
|
[
"MIT"
] | null | null | null |
import gin
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import Adam
@gin.configurable
def build_basic_network(observation_size, action_size, learning_rate):
"""Builds a basic neural network architecture."""
model = Sequential()
model.add(Dense(24, input_dim=observation_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(action_size, activation='linear'))
model.compile(loss='mse', optimizer=Adam(lr=learning_rate))
return model
@gin.configurable
def simple_network(observation_size, action_size, learning_rate):
"""Builds a basic neural network architecture."""
model = Sequential()
model.add(Dense(10, input_dim=observation_size, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(action_size, activation='linear'))
model.compile(loss='mse', optimizer=Adam(lr=learning_rate))
return model
| 38.12
| 71
| 0.745016
| 124
| 953
| 5.58871
| 0.322581
| 0.069264
| 0.112554
| 0.126984
| 0.776335
| 0.776335
| 0.776335
| 0.776335
| 0.776335
| 0.632035
| 0
| 0.009674
| 0.132214
| 953
| 25
| 72
| 38.12
| 0.828295
| 0.091291
| 0
| 0.5
| 0
| 0
| 0.03972
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ca159138f117f7c1b62dde877096897d7cb4f75f
| 102
|
py
|
Python
|
src/benchmarking/main.py
|
Godwinh19/pyforecast
|
1c29e543749a1d72882496dc4a1ecf8da4196d60
|
[
"MIT"
] | null | null | null |
src/benchmarking/main.py
|
Godwinh19/pyforecast
|
1c29e543749a1d72882496dc4a1ecf8da4196d60
|
[
"MIT"
] | null | null | null |
src/benchmarking/main.py
|
Godwinh19/pyforecast
|
1c29e543749a1d72882496dc4a1ecf8da4196d60
|
[
"MIT"
] | null | null | null |
from typing import List
def benchmark(methods: List):
assert isinstance(methods, list)
pass
| 14.571429
| 36
| 0.72549
| 13
| 102
| 5.692308
| 0.769231
| 0.297297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 102
| 6
| 37
| 17
| 0.91358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
ca3d0b18c1dd22c11071eb9d35190606adfffe0d
| 89
|
py
|
Python
|
python/spam/a.py
|
guoxiaoyong/simple-useful
|
63f483250cc5e96ef112aac7499ab9e3a35572a8
|
[
"CC0-1.0"
] | null | null | null |
python/spam/a.py
|
guoxiaoyong/simple-useful
|
63f483250cc5e96ef112aac7499ab9e3a35572a8
|
[
"CC0-1.0"
] | null | null | null |
python/spam/a.py
|
guoxiaoyong/simple-useful
|
63f483250cc5e96ef112aac7499ab9e3a35572a8
|
[
"CC0-1.0"
] | null | null | null |
import pyximport
pyximport.install(inplace=True)
import sumn
print(sumn.add(100000000))
| 14.833333
| 31
| 0.820225
| 12
| 89
| 6.083333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109756
| 0.078652
| 89
| 5
| 32
| 17.8
| 0.780488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ca468a071267acef9c52c50beac5369761036ef5
| 302
|
py
|
Python
|
tree/Ratndeep/is tree bst.py
|
Nagendracse1/Competitive-Programming
|
325e151b9259dbc31d331c8932def42e3ab09913
|
[
"MIT"
] | 3
|
2020-12-20T10:23:11.000Z
|
2021-06-16T10:34:18.000Z
|
tree/Ratndeep/is tree bst.py
|
Spring-dot/Competitive-Programming
|
98add277a8b029710c749d1082de25c524e12408
|
[
"MIT"
] | null | null | null |
tree/Ratndeep/is tree bst.py
|
Spring-dot/Competitive-Programming
|
98add277a8b029710c749d1082de25c524e12408
|
[
"MIT"
] | null | null | null |
def bstutil(root,min_v,max_v):
if root is None:
return True
if root.data>=min_v and root.data<max_v and bstutil(root.left,min_v,root.data) and bstutil(root.right,root.data,max_v):
return True
return False
def isBST(root):
return bstutil(root,-float("inf"),float("inf"))
| 33.555556
| 123
| 0.682119
| 53
| 302
| 3.773585
| 0.358491
| 0.22
| 0.11
| 0.12
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18543
| 302
| 8
| 124
| 37.75
| 0.813008
| 0
| 0
| 0.25
| 0
| 0
| 0.019868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.125
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ca52b41ef88e1f25652328d01a7b289539c97607
| 83
|
py
|
Python
|
algorithm_logger/__init__.py
|
mbodenhamer/algorithm-logger
|
44e2c4fd322f54329cc135be709f068fe96c4bc9
|
[
"MIT"
] | null | null | null |
algorithm_logger/__init__.py
|
mbodenhamer/algorithm-logger
|
44e2c4fd322f54329cc135be709f068fe96c4bc9
|
[
"MIT"
] | 8
|
2019-10-03T20:42:21.000Z
|
2021-05-08T17:00:01.000Z
|
algorithm_logger/__init__.py
|
mbodenhamer/algorithm-logger
|
44e2c4fd322f54329cc135be709f068fe96c4bc9
|
[
"MIT"
] | null | null | null |
from .base import *
from .spec import *
from .event import *
from .logger import *
| 16.6
| 21
| 0.710843
| 12
| 83
| 4.916667
| 0.5
| 0.508475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192771
| 83
| 4
| 22
| 20.75
| 0.880597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ca73a59272d2e30651990bdbe4650bed6c4a932b
| 334
|
py
|
Python
|
autofactory/django/builders/booleans.py
|
nickgashkov/autofactoryboy
|
b897346c34333512d8b5503679336d316113ec48
|
[
"MIT"
] | 5
|
2019-01-09T19:43:40.000Z
|
2019-09-09T04:54:32.000Z
|
autofactory/django/builders/booleans.py
|
nickgashkov/autofactoryboy
|
b897346c34333512d8b5503679336d316113ec48
|
[
"MIT"
] | null | null | null |
autofactory/django/builders/booleans.py
|
nickgashkov/autofactoryboy
|
b897346c34333512d8b5503679336d316113ec48
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018-2019 Nick Gashkov
#
# Distributed under MIT License. See LICENSE file for details.
from __future__ import unicode_literals
import factory
def build_booleanfield(field_cls):
return factory.Faker("pybool")
def build_nullbooleanfield(field_cls):
return factory.Faker("pybool")
| 19.647059
| 62
| 0.748503
| 43
| 334
| 5.604651
| 0.744186
| 0.06639
| 0.116183
| 0.174274
| 0.26556
| 0.26556
| 0
| 0
| 0
| 0
| 0
| 0.03169
| 0.149701
| 334
| 16
| 63
| 20.875
| 0.816901
| 0.356287
| 0
| 0.333333
| 0
| 0
| 0.057416
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
ca801718b628ef67e725516cf0eac17563c723f8
| 136
|
py
|
Python
|
abc_delegation/__init__.py
|
jayvdb/abc-delegation
|
0f25d26c4db4c90dc0593ede43fc917210264373
|
[
"MIT"
] | 2
|
2020-07-15T10:03:42.000Z
|
2020-09-02T11:43:02.000Z
|
abc_delegation/__init__.py
|
jayvdb/abc-delegation
|
0f25d26c4db4c90dc0593ede43fc917210264373
|
[
"MIT"
] | 15
|
2020-06-17T14:04:18.000Z
|
2020-08-20T15:11:35.000Z
|
abc_delegation/__init__.py
|
jayvdb/abc-delegation
|
0f25d26c4db4c90dc0593ede43fc917210264373
|
[
"MIT"
] | 1
|
2020-09-03T08:01:48.000Z
|
2020-09-03T08:01:48.000Z
|
from .delegate import delegation_metaclass, DelegatingMeta, UnsafeDelegatingMeta
from .multi_delegate import multi_delegation_metaclass
| 45.333333
| 80
| 0.897059
| 14
| 136
| 8.428571
| 0.571429
| 0.237288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 136
| 2
| 81
| 68
| 0.936508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ca95e927a707430f7c578a5a79ab9f7cf777c86d
| 49
|
py
|
Python
|
python_modules/dagster/dagster/serdes/errors.py
|
withshubh/dagster
|
ff4a0db53e126f44097a337eecef54988cc718ef
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/serdes/errors.py
|
withshubh/dagster
|
ff4a0db53e126f44097a337eecef54988cc718ef
|
[
"Apache-2.0"
] | 1
|
2021-06-21T18:30:02.000Z
|
2021-06-25T21:18:39.000Z
|
python_modules/dagster/dagster/serdes/errors.py
|
withshubh/dagster
|
ff4a0db53e126f44097a337eecef54988cc718ef
|
[
"Apache-2.0"
] | null | null | null |
class SerdesClassUsageError(Exception):
pass
| 16.333333
| 39
| 0.795918
| 4
| 49
| 9.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 49
| 2
| 40
| 24.5
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
04ab8c889b4fb08a172816a06d3ef52f4194d999
| 40
|
py
|
Python
|
spikeforest_widgets/widgets/DirectoryView/__init__.py
|
michaeljohnclancy/spikeforest2
|
93bdde2c570aef9426b3d7bceb69f3605c9f005a
|
[
"Apache-2.0"
] | 26
|
2020-02-03T02:12:20.000Z
|
2022-03-25T09:14:32.000Z
|
spikeforest_widgets/widgets/DirectoryView/__init__.py
|
michaeljohnclancy/spikeforest2
|
93bdde2c570aef9426b3d7bceb69f3605c9f005a
|
[
"Apache-2.0"
] | 27
|
2020-01-10T12:35:55.000Z
|
2021-08-01T23:13:52.000Z
|
spikeforest_widgets/widgets/DirectoryView/__init__.py
|
michaeljohnclancy/spikeforest2
|
93bdde2c570aef9426b3d7bceb69f3605c9f005a
|
[
"Apache-2.0"
] | 11
|
2019-02-15T15:21:47.000Z
|
2021-09-23T01:07:24.000Z
|
from .DirectoryView import DirectoryView
| 40
| 40
| 0.9
| 4
| 40
| 9
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
04ca0055ecf5e4aa29789d8a2776fa11f2efe68b
| 141
|
py
|
Python
|
code/models/__init__.py
|
wazhoy/DocRED
|
c1b0dc2257f6d041c8cdd62f57fee3ba0a0089aa
|
[
"MIT"
] | 487
|
2019-06-04T15:18:42.000Z
|
2022-03-30T06:27:44.000Z
|
code/models/__init__.py
|
wazhoy/DocRED
|
c1b0dc2257f6d041c8cdd62f57fee3ba0a0089aa
|
[
"MIT"
] | 67
|
2019-06-17T11:44:50.000Z
|
2022-02-22T02:57:35.000Z
|
code/models/__init__.py
|
wazhoy/DocRED
|
c1b0dc2257f6d041c8cdd62f57fee3ba0a0089aa
|
[
"MIT"
] | 97
|
2019-06-13T14:58:35.000Z
|
2022-03-15T15:10:40.000Z
|
from .CNN3 import CNN3
from .LSTM import LSTM
from .BiLSTM import BiLSTM
from .ContextAware import ContextAware
from .LSTM_SP import LSTM_SP
| 23.5
| 38
| 0.822695
| 22
| 141
| 5.181818
| 0.318182
| 0.140351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016529
| 0.141844
| 141
| 5
| 39
| 28.2
| 0.92562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
04cae1b39ba6c1915ccf4baa95bfaad0e6b0dbc9
| 17,170
|
py
|
Python
|
tests/api/test_meals.py
|
Rdbaker/Mealbound
|
37cec6b45a632ac26a5341a0c9556279b6229ea8
|
[
"BSD-3-Clause"
] | 1
|
2018-11-03T17:48:50.000Z
|
2018-11-03T17:48:50.000Z
|
tests/api/test_meals.py
|
Rdbaker/Mealbound
|
37cec6b45a632ac26a5341a0c9556279b6229ea8
|
[
"BSD-3-Clause"
] | 3
|
2021-03-09T09:47:04.000Z
|
2022-02-12T13:04:41.000Z
|
tests/api/test_meals.py
|
Rdbaker/Mealbound
|
37cec6b45a632ac26a5341a0c9556279b6229ea8
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""Test the views at /api/v1/meals."""
import uuid
from datetime import datetime as dt
from datetime import timedelta as td
from unittest.mock import patch
import pytest
from ceraon.models.meals import Meal, UserMeal
from tests.utils import BaseViewTest
@pytest.mark.usefixtures('db')
class TestFindMeal(BaseViewTest):
"""Test GET /api/v1/meals/UUID."""
base_url = '/api/v1/meals/{}'
def test_nonexistent_get(self, testapp):
"""Test the a nonexistent get returns a 404."""
res = testapp.get(self.base_url.format(uuid.uuid4()), status=404)
assert res.status_code == 404
assert 'error_code' in res.json
assert 'error_message' in res.json
def test_successful_get(self, testapp, meal):
"""Test that a normal GET works just fine."""
res = testapp.get(self.base_url.format(meal.id))
assert res.status_code == 200
data = res.json['data']
assert 'id' in data
assert 'name' in data
assert 'description' in data
assert 'price' in data
assert 'scheduled_for' in data
assert 'host' in data
@pytest.mark.usefixtures('db')
class TestCreateMeal(BaseViewTest):
"""Test POST /api/v1/meals."""
base_url = '/api/v1/meals'
def setup_method(self, method):
"""Set up the test class. Pytest will call this for us."""
self.valid_data = {
'scheduled_for': (dt.now().astimezone() + td(days=1)).isoformat(),
'name': 'some new meal',
'description': 'this is my description',
'price': 7.00,
}
def test_unauthenticated_create(self, testapp):
"""Test that we get a 401 if the user is not authenticated."""
res = testapp.post_json(self.base_url, self.valid_data, status=401)
assert res.status_code == 401
def test_user_not_created_location(self, testapp, user):
"""Test that 428 is returned if the user has no location."""
self.login(user, testapp)
res = testapp.post_json(self.base_url, self.valid_data, status=428)
assert res.status_code == 428
def test_meal_needs_name(self, testapp, host, hosted_location):
"""Test that a meal needs a name."""
del self.valid_data['name']
self.login(host, testapp)
res = testapp.post_json(self.base_url, self.valid_data, status=422)
assert 'name' in res.json['error_message']
def test_meal_with_tags(self, testapp, host, hosted_location, tag_one):
"""Test creating a meal with a tag associated."""
post_data = {'tags': [{'id': tag_one.id}]}
post_data.update(self.valid_data)
self.login(host, testapp)
res = testapp.post_json(self.base_url, post_data)
assert 'tags' in res.json['data']
assert res.json['data']['tags'][0]['id'] == tag_one.id
def test_meal_needs_price(self, testapp, host, hosted_location):
"""Test that a meal needs a price."""
del self.valid_data['price']
self.login(host, testapp)
res = testapp.post_json(self.base_url, self.valid_data, status=422)
assert 'price' in res.json['error_message']
def test_meal_price_positive(self, testapp, host, hosted_location):
"""Test that a meal needs a positive price."""
self.valid_data['price'] = -1.50
self.login(host, testapp)
res = testapp.post_json(self.base_url, self.valid_data, status=422)
assert 'price' in res.json['error_message']
def test_meal_needs_scheduled_for(self, testapp, host, hosted_location):
"""Test that a meal needs a scheduled_for."""
del self.valid_data['scheduled_for']
self.login(host, testapp)
res = testapp.post_json(self.base_url, self.valid_data, status=422)
assert 'scheduled_for' in res.json['error_message']
def test_meal_scheduled_for_past(self, testapp, host, hosted_location):
"""Test that a meal needs a scheduled_for in the future."""
self.valid_data['scheduled_for'] = (dt.now().astimezone() -
td(days=1)).isoformat()
self.login(host, testapp)
res = testapp.post_json(self.base_url, self.valid_data, status=422)
assert 'scheduled_for' in res.json['error_message']
def test_user_meal_create_successful(self, testapp, host, hosted_location):
"""Test that a user can create a meal."""
self.login(host, testapp)
res = testapp.post_json(self.base_url, self.valid_data)
assert res.status_code == 201
assert 'message' in res.json
data = res.json['data']
assert data['price'] == self.valid_data['price']
assert data['name'] == self.valid_data['name']
assert data['description'] == self.valid_data['description']
# TODO: figure out how to compare that these two values are the same
# TODO: time across different timezones and uncomment this assert
# assert data['scheduled_for'] == \
# self.valid_data['scheduled_for']
@pytest.mark.usefixtures('db')
class TestUpdateMeal(BaseViewTest):
"""Test PATCH /api/v1/meals/UUID."""
base_url = '/api/v1/meals/{}'
def setup_method(self, method):
"""Set up the test class. Pytest will call this for us."""
self.valid_data = {
'scheduled_for': (dt.now().astimezone() + td(days=3)).isoformat(),
'name': 'some new meal name',
'description': 'this is my new description',
'price': 7.80,
}
def test_unauthenticated(self, testapp, meal):
"""Test that unauthenticated gets a 401."""
res = testapp.patch_json(self.base_url.format(meal.id),
self.valid_data, status=401)
assert res.status_code == 401
def test_no_meal_found(self, testapp, guest, guest_location):
"""Test that a nonexistent meal gets a 404."""
self.login(guest, testapp)
res = testapp.patch_json(self.base_url.format(uuid.uuid4()),
self.valid_data, status=404)
assert res.status_code == 404
def test_unauthorized(self, testapp, meal, guest, guest_location):
"""Test that unauthorized gets a 403."""
self.login(guest, testapp)
res = testapp.patch_json(self.base_url.format(meal.id),
self.valid_data, status=403)
assert res.status_code == 403
def test_update_works(self, testapp, host, hosted_location, meal):
"""Test that updating a meal works."""
self.login(host, testapp)
res = testapp.patch_json(self.base_url.format(meal.id),
self.valid_data)
assert res.status_code == 200
assert meal.price == self.valid_data['price']
def test_meal_with_tags(self, testapp, host, meal, hosted_location,
tag_one):
"""Test updating a meal with a tag associated."""
patch_data = {'tags': [{'id': tag_one.id}]}
patch_data.update(self.valid_data)
self.login(host, testapp)
res = testapp.patch_json(self.base_url.format(meal.id), patch_data)
assert 'tags' in res.json['data']
assert res.json['data']['tags'][0]['id'] == tag_one.id
def test_partial_update_works(self, testapp, host, hosted_location, meal):
"""Test that only partially updating a meal works."""
self.login(host, testapp)
res = testapp.patch_json(self.base_url.format(meal.id),
{'price': 4.00})
assert res.status_code == 200
assert meal.price == 4.00
@pytest.mark.usefixtures('db')
class TestReplaceMeal(BaseViewTest):
"""Test PUT /api/v1/meals/UUID."""
base_url = '/api/v1/meals/{}'
def setup_method(self, method):
"""Set up the test class. Pytest will call this for us."""
self.valid_data = {
'scheduled_for': (dt.now() + td(days=3)).isoformat(),
'name': 'some new meal name',
'description': 'this is my new description',
'price': 7.80,
}
def test_unauthenticated(self, testapp, meal):
"""Test that unauthenticated gets a 401."""
res = testapp.put_json(self.base_url.format(meal.id),
self.valid_data, status=401)
assert res.status_code == 401
def test_no_meal_found(self, testapp, guest, guest_location):
"""Test that a nonexistent meal gets a 404."""
self.login(guest, testapp)
res = testapp.put_json(self.base_url.format(uuid.uuid4()),
self.valid_data, status=404)
assert res.status_code == 404
def test_unauthorized(self, testapp, meal, guest, guest_location):
"""Test that unauthorized gets a 403."""
self.login(guest, testapp)
res = testapp.put_json(self.base_url.format(meal.id),
self.valid_data, status=403)
assert res.status_code == 403
def test_replace_works(self, testapp, host, hosted_location, meal):
"""Test that replacing a meal works."""
self.login(host, testapp)
res = testapp.put_json(self.base_url.format(meal.id),
self.valid_data)
assert res.status_code == 200
assert meal.price == self.valid_data['price']
def test_meal_with_tags(self, testapp, host, meal, hosted_location,
tag_one):
"""Test replacing a meal with a tag associated."""
put_data = {'tags': [{'id': tag_one.id}]}
put_data.update(self.valid_data)
self.login(host, testapp)
res = testapp.put_json(self.base_url.format(meal.id), put_data)
assert 'tags' in res.json['data']
assert res.json['data']['tags'][0]['id'] == tag_one.id
def test_partial_replace_fails(self, testapp, host, hosted_location, meal):
"""Test that only partially replacing a meal fails."""
self.login(host, testapp)
res = testapp.put_json(self.base_url.format(meal.id),
{'price': 4.00}, status=422)
assert res.status_code == 422
assert 'name' in res.json['error_message']
@pytest.mark.usefixtures('db')
class TestDestroyMeal(BaseViewTest):
"""Test DELETE /api/v1/meals/UUID."""
base_url = '/api/v1/meals/{}'
def test_unauthenticated(self, testapp, meal):
"""Test that unauthenticated gets a 401."""
res = testapp.delete(self.base_url.format(meal.id), status=401)
assert res.status_code == 401
def test_meal_not_found(self, testapp, user):
"""Test that a meal not found gets a 404."""
self.login(user, testapp)
res = testapp.delete(self.base_url.format(uuid.uuid4()), status=404)
assert res.status_code == 404
def test_not_meal_host(self, testapp, guest, guest_location, meal):
"""Test that not being meal owner gets a 403."""
self.login(guest, testapp)
res = testapp.delete(self.base_url.format(meal.id), status=403)
assert res.status_code == 403
def test_meal_deleted(self, testapp, host, hosted_location, meal):
"""Test that host can delete a meal."""
self.login(host, testapp)
res = testapp.delete(self.base_url.format(meal.id))
assert res.status_code == 204
try_find_meal = Meal.find(meal.id)
assert try_find_meal is None
@pytest.mark.usefixtures('db')
class TestJoinMeal(BaseViewTest):
"""Test POST /api/v1/meals/UUID/reservation."""
base_url = '/api/v1/meals/{}/reservation'
def test_unauthenticated(self, testapp, meal):
"""Test that an unauthenticated user gets a 401."""
res = testapp.post(self.base_url.format(meal.id), status=401)
assert res.status_code == 401
def test_meal_not_found(self, testapp, user):
"""Test that a user cannot join a meal that does not exist."""
self.login(user, testapp)
res = testapp.post(self.base_url.format(uuid.uuid4()), status=404)
assert res.status_code == 404
@patch('ceraon.models.transactions.stripe')
def test_join_meal_card_on_file(self, stripe_mock, testapp, user, meal):
"""Test that a user can join a meal with a card on file."""
user.stripe_customer_id = 'customer-id'
self.login(user, testapp)
res = testapp.post(self.base_url.format(meal.id))
assert res.status_code == 201
new_um = UserMeal.query.get((user.id, meal.id))
assert new_um is not None
@patch('ceraon.models.transactions.stripe')
def test_join_meal_no_card_on_file(self, stripe_mock, testapp, user, meal):
"""Test that a user can join a meal without having a card on file."""
self.login(user, testapp)
res = testapp.post_json(self.base_url.format(meal.id),
{'stripe_token': 'some-token'})
assert res.status_code == 201
new_um = UserMeal.query.get((user.id, meal.id))
assert new_um is not None
def test_cannot_join_meal_again(self, testapp, guest, meal):
"""Test that a user cannot join a meal twice."""
self.login(guest, testapp)
res = testapp.post(self.base_url.format(meal.id), status=409)
assert res.status_code == 409
def test_host_cannot_join_meal(self, testapp, host, meal):
"""Test that a host cannot join their own meal."""
self.login(host, testapp)
res = testapp.post(self.base_url.format(meal.id), status=400)
assert res.status_code == 400
def test_join_past_meal(self, testapp, user, past_meal):
"""Test that a user cannot join a meal that happened already."""
self.login(user, testapp)
res = testapp.post(self.base_url.format(past_meal.id), status=400)
assert res.status_code == 400
@pytest.mark.usefixtures('db')
class TestLeaveMeal(BaseViewTest):
"""Test DELETE /api/v1/meals/UUID/reservation."""
base_url = '/api/v1/meals/{}/reservation'
def test_unauthenticated(self, testapp, meal):
"""Test that an unauthenticated user gets a 401."""
res = testapp.delete(self.base_url.format(meal.id), status=401)
assert res.status_code == 401
def test_leave_meal(self, testapp, guest, meal):
"""Test that a user can leave a meal."""
self.login(guest, testapp)
res = testapp.delete(self.base_url.format(meal.id))
assert res.status_code == 200
assert res.json['data'] is not None
new_um = UserMeal.query.get((guest.id, meal.id))
assert new_um is None
def test_cannot_leave_meal_again(self, testapp, user, meal):
"""Test that a user cannot leave a meal that has not joined first."""
self.login(user, testapp)
res = testapp.delete(self.base_url.format(meal.id), status=428)
assert res.status_code == 428
def test_meal_not_found(self, testapp, user):
"""Test that a user cannot leave a meal that does not exist."""
self.login(user, testapp)
res = testapp.delete(self.base_url.format(uuid.uuid4()), status=404)
assert res.status_code == 404
def test_leave_past_meal(self, testapp, guest, past_meal):
"""Test that a user cannot leave a meal that happened already."""
self.login(guest, testapp)
res = testapp.delete(self.base_url.format(past_meal.id), status=400)
assert res.status_code == 400
@pytest.mark.usefixtures('db')
class TestGetMyMeals(BaseViewTest):
"""Test GET /api/v1/meals/mine/<role>."""
base_url = '/api/v1/meals/mine/{}'
def test_unauthenticated(self, testapp, meal):
"""Test that an unauthenticated user gets a 401."""
res = testapp.get(self.base_url, status=401)
assert res.status_code == 401
def test_see_joined_meal(self, testapp, guest, meal):
"""Test that a user can see the meals they joined."""
self.login(guest, testapp)
res = testapp.get(self.base_url.format('guest'))
assert res.status_code == 200
assert res.json['data'][0]['id'] == str(meal.id)
assert len(res.json['data']) == 1
def test_see_hosted_meal(self, testapp, host, meal):
"""Test that a user can see the meals they host."""
self.login(host, testapp)
res = testapp.get(self.base_url.format('host'))
assert res.status_code == 200
assert res.json['data'][0]['id'] == str(meal.id)
assert len(res.json['data']) == 1
def test_see_hosts_joined_meals(self, testapp, host, meal):
"""Check that the host has joined no meals... just a sanity check."""
self.login(host, testapp)
res = testapp.get(self.base_url.format('guest'))
assert res.status_code == 200
assert len(res.json['data']) == 0
def test_bad_role(self, testapp, user):
"""Test that you can only specify 'guest' or 'host' as a role."""
self.login(user, testapp)
res = testapp.get(self.base_url.format('somethingelse'), status=400)
assert res.status_code == 400
| 41.076555
| 79
| 0.628072
| 2,346
| 17,170
| 4.450554
| 0.090793
| 0.034863
| 0.046356
| 0.065511
| 0.809214
| 0.76008
| 0.733646
| 0.714299
| 0.680107
| 0.646298
| 0
| 0.02292
| 0.245312
| 17,170
| 417
| 80
| 41.17506
| 0.782837
| 0.156552
| 0
| 0.588652
| 0
| 0
| 0.064591
| 0.010073
| 0
| 0
| 0
| 0.002398
| 0.258865
| 1
| 0.166667
| false
| 0
| 0.024823
| 0
| 0.248227
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b6c304ca3e4f245c0f40bc4463456cbd2d64abca
| 190
|
py
|
Python
|
port_monitor/apps.py
|
tzing/telnet-monitor
|
b92e4fca99eaba72bf30397656d70251034fd579
|
[
"MIT"
] | null | null | null |
port_monitor/apps.py
|
tzing/telnet-monitor
|
b92e4fca99eaba72bf30397656d70251034fd579
|
[
"MIT"
] | 2
|
2019-12-04T22:29:00.000Z
|
2020-06-05T20:07:26.000Z
|
port_monitor/apps.py
|
tzing/telnet-monitor
|
b92e4fca99eaba72bf30397656d70251034fd579
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class PortMonitorConfig(AppConfig):
name = 'port_monitor'
verbose_name = _('Port Monitor')
| 23.75
| 54
| 0.773684
| 23
| 190
| 6.173913
| 0.695652
| 0.140845
| 0.211268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152632
| 190
| 7
| 55
| 27.142857
| 0.881988
| 0
| 0
| 0
| 0
| 0
| 0.126316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b6ec529b773ffcbc167d83c5f5bbb43f7b997fe1
| 8,585
|
py
|
Python
|
tests/test_ocs_feed.py
|
simonsobs/ocs
|
24c6a617ea3038fccdb40bfd602ffd541415a476
|
[
"BSD-2-Clause"
] | 9
|
2019-09-02T14:17:06.000Z
|
2022-03-11T21:26:34.000Z
|
tests/test_ocs_feed.py
|
simonsobs/ocs
|
24c6a617ea3038fccdb40bfd602ffd541415a476
|
[
"BSD-2-Clause"
] | 158
|
2019-05-17T17:54:37.000Z
|
2022-03-14T19:29:59.000Z
|
tests/test_ocs_feed.py
|
simonsobs/ocs
|
24c6a617ea3038fccdb40bfd602ffd541415a476
|
[
"BSD-2-Clause"
] | 1
|
2021-07-16T13:21:45.000Z
|
2021-07-16T13:21:45.000Z
|
import time
from unittest.mock import MagicMock
import pytest
from ocs import ocs_feed
# ocs_feed.Feed
class TestPublishMessage:
"""Test ocs_feed.Feed.publish_message().
"""
def test_valid_single_sample_input(self):
"""We should be able to pass single ints and floats to a feed.
"""
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
test_message = {
'block_name': 'test',
'timestamp': time.time(),
'data': {
'key1': 1.,
'key2': 10,
}
}
test_feed.publish_message(test_message)
def test_valid_multi_sample_input(self):
"""We should be able to pass lists of ints and floats to a feed.
"""
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
test_message = {
'block_name': 'test',
'timestamps': [time.time(), time.time()+1],
'data': {
'key1': [1., 2.],
'key2': [10, 5]
}
}
test_feed.publish_message(test_message)
def test_str_single_sample_input(self):
"""We should also now be able to pass strings.
"""
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
test_message = {
'block_name': 'test',
'timestamp': time.time(),
'data': {
'key1': 1.,
'key2': 'string',
}
}
test_feed.publish_message(test_message)
def test_bool_single_sample_input(self):
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
test_message = {
'block_name': 'test',
'timestamp': time.time(),
'data': {
'key1': True,
}
}
with pytest.raises(TypeError):
test_feed.publish_message(test_message)
def test_bool_multi_sample_input(self):
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
test_message = {
'block_name': 'test',
'timestamps': [time.time(), time.time()+1, time.time()+2],
'data': {
'key1': [True, False, True],
}
}
with pytest.raises(TypeError):
test_feed.publish_message(test_message)
def test_str_multi_sample_input(self):
"""Passing multiple points, including invalid datatypes,
should cause a TypeError upon publishing.
"""
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
test_message = {
'block_name': 'test',
'timestamps': [time.time(), time.time()+1, time.time()+2],
'data': {
'key1': [1., 3.4, 4.3],
'key2': [10., 'string', None]
}
}
with pytest.raises(TypeError):
test_feed.publish_message(test_message)
def test_invalid_data_key_character(self):
"""Passing disallowed characters in a field key should result in a
ValueError upon publishing.
"""
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
test_message = {
'block_name': 'test',
'timestamp': time.time(),
'data': {
'invalid.key1': 1.,
'valid_key2': 1.,
}
}
with pytest.raises(ValueError):
test_feed.publish_message(test_message)
def test_data_key_start_with_number(self):
"""Field names should start with a letter.
"""
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
test_message = {
'block_name': 'test',
'timestamp': time.time(),
'data': {
'1invalidkey': 1.,
'valid_key2': 1.,
}
}
with pytest.raises(ValueError):
test_feed.publish_message(test_message)
def test_data_key_too_long(self):
"""Passing a data key that exceeds 255 characters should raise a
ValueError upon publishing.
"""
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
test_message = {
'block_name': 'test',
'timestamp': time.time(),
'data': {
'a'*256: 1.,
'valid_key2': 1.,
}
}
with pytest.raises(ValueError):
test_feed.publish_message(test_message)
def test_data_key_start_underscore1(self):
"""Data keys can start with any number of _'s followed by a letter.
Test several cases where we start with underscores.
"""
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
# Valid underscore + letter start
test_message = {
'block_name': 'test',
'timestamp': time.time(),
'data': {
'_valid': 1.,
'valid_key2': 1.,
}
}
test_feed.publish_message(test_message)
def test_data_key_start_underscore2(self):
"""Data keys can start with any number of _'s followed by a letter.
Test several cases where we start with underscores.
"""
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
# Valid multi-underscore + letter start
test_message = {
'block_name': 'test',
'timestamp': time.time(),
'data': {
'____valid1': 1.,
'valid_key2': 1.,
}
}
test_feed.publish_message(test_message)
def test_data_key_start_underscore3(self):
"""Data keys can start with any number of _'s followed by a letter.
Test several cases where we start with underscores.
"""
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
# Invalid underscore + number start
test_message = {
'block_name': 'test',
'timestamp': time.time(),
'data': {
'_1valid': 1.,
'valid_key2': 1.,
}
}
with pytest.raises(ValueError):
test_feed.publish_message(test_message)
def test_data_key_start_underscore4(self):
"""Data keys can start with any number of _'s followed by a letter.
Test several cases where we start with underscores.
"""
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
# Invalid multi-underscore + number start
test_message = {
'block_name': 'test',
'timestamp': time.time(),
'data': {
'____1valid': 1.,
'valid_key2': 1.,
}
}
with pytest.raises(ValueError):
test_feed.publish_message(test_message)
def test_empty_field_name(self):
"""Check for empty string as a field name.
"""
mock_agent = MagicMock()
test_feed = ocs_feed.Feed(mock_agent, 'test_feed', record=True)
# Invalid multi-underscore + number start
test_message = {
'block_name': 'test',
'timestamp': time.time(),
'data': {
'': 1.,
'valid_key2': 1.,
}
}
with pytest.raises(ValueError):
test_feed.publish_message(test_message)
# ocs_feed.Block
def test_block_creation():
"""Test the creation of a simple feed Block."""
test_block = ocs_feed.Block('test_block', ['key1'])
assert test_block.name == 'test_block'
def test_block_append():
"""Test adding some data to a Block."""
test_block = ocs_feed.Block('test_block', ['key1'])
time_samples = [1558044482.2398098, 1558044483.2398098,
1558044484.2398098]
data_samples = [1, 2, 3]
data = {'timestamp': time_samples,
'data': {'key1': data_samples}}
test_block.append(data)
assert test_block.data['key1'][0] == data_samples
assert test_block.timestamps[0] == time_samples
| 28.712375
| 75
| 0.544205
| 937
| 8,585
| 4.72572
| 0.13127
| 0.075881
| 0.039747
| 0.050813
| 0.772358
| 0.772358
| 0.764453
| 0.764453
| 0.75542
| 0.700768
| 0
| 0.023005
| 0.341759
| 8,585
| 298
| 76
| 28.808725
| 0.760573
| 0.16028
| 0
| 0.611399
| 0
| 0
| 0.107842
| 0
| 0
| 0
| 0
| 0
| 0.015544
| 1
| 0.082902
| false
| 0
| 0.020725
| 0
| 0.108808
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b6ece2a81449e5358547aab20fa113c3902e50c4
| 188
|
py
|
Python
|
tests/file_field/models.py
|
avin-kavish/graphene_django_crud
|
2ed7dc457da006fe872a6257b8b62256381d9eb7
|
[
"MIT"
] | 19
|
2021-01-16T17:31:34.000Z
|
2022-03-22T20:15:28.000Z
|
tests/file_field/models.py
|
avin-kavish/graphene_django_crud
|
2ed7dc457da006fe872a6257b8b62256381d9eb7
|
[
"MIT"
] | 8
|
2021-05-24T05:42:35.000Z
|
2022-03-07T12:14:53.000Z
|
tests/file_field/models.py
|
avin-kavish/graphene_django_crud
|
2ed7dc457da006fe872a6257b8b62256381d9eb7
|
[
"MIT"
] | 6
|
2021-05-28T16:21:13.000Z
|
2022-03-04T12:46:17.000Z
|
# -*- coding: utf-8 -*-
from django.db import models
class TestFile(models.Model):
file = models.FileField(null=True, blank=True)
image = models.ImageField(null=True, blank=True)
| 26.857143
| 52
| 0.702128
| 26
| 188
| 5.076923
| 0.692308
| 0.121212
| 0.19697
| 0.257576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00625
| 0.148936
| 188
| 7
| 52
| 26.857143
| 0.81875
| 0.111702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
b6f92bc68b32ddacdc9cc562c2ee7aad3cfab81a
| 95
|
py
|
Python
|
models/head/__init__.py
|
RoseSakurai/PSENet_paddle
|
6b45f95059724080932b116a98d5af14ea0e1640
|
[
"Apache-2.0"
] | 4
|
2021-05-13T15:24:53.000Z
|
2022-03-04T06:05:20.000Z
|
models/head/__init__.py
|
RoseSakurai/PSENet_paddle
|
6b45f95059724080932b116a98d5af14ea0e1640
|
[
"Apache-2.0"
] | null | null | null |
models/head/__init__.py
|
RoseSakurai/PSENet_paddle
|
6b45f95059724080932b116a98d5af14ea0e1640
|
[
"Apache-2.0"
] | null | null | null |
from .psenet_head import PSENet_Head
from .builder import build_head
__all__ = ['PSENet_Head']
| 23.75
| 36
| 0.810526
| 14
| 95
| 4.928571
| 0.5
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115789
| 95
| 4
| 37
| 23.75
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0.114583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8e08bdb741888ce7f5cb8c9e0d6d94b52f438454
| 621
|
py
|
Python
|
sdk/python/pulumi_oci/filestorage/__init__.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/filestorage/__init__.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/filestorage/__init__.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .export import *
from .export_set import *
from .file_system import *
from .get_export_sets import *
from .get_exports import *
from .get_file_systems import *
from .get_mount_targets import *
from .get_snapshot import *
from .get_snapshots import *
from .mount_target import *
from .snapshot import *
from ._inputs import *
from . import outputs
| 29.571429
| 87
| 0.753623
| 93
| 621
| 4.88172
| 0.537634
| 0.264317
| 0.171806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001919
| 0.161031
| 621
| 20
| 88
| 31.05
| 0.869482
| 0.352657
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8e08d3fb5c5531b3d49156c345f031fbcdd537e7
| 48
|
py
|
Python
|
tests/__init__.py
|
CriticalSteffen/mbwrapper
|
901db45b2ed4893da5a543991be43c239bb8da28
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
CriticalSteffen/mbwrapper
|
901db45b2ed4893da5a543991be43c239bb8da28
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
CriticalSteffen/mbwrapper
|
901db45b2ed4893da5a543991be43c239bb8da28
|
[
"MIT"
] | null | null | null |
"""Malware Bazaar API Wrapper Library Tests."""
| 24
| 47
| 0.729167
| 6
| 48
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 48
| 1
| 48
| 48
| 0.833333
| 0.854167
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8e09a0c519db618816349d93b3388d051bfe20cb
| 95
|
py
|
Python
|
sharp_aquos_rc/__init__.py
|
HerrHofrat/sharp_aquos_rc
|
9a5a1140241a866150dea2d26555680dc8e7f057
|
[
"MIT"
] | 10
|
2016-01-05T03:28:25.000Z
|
2021-04-01T20:07:12.000Z
|
sharp_aquos_rc/__init__.py
|
HerrHofrat/sharp_aquos_rc
|
9a5a1140241a866150dea2d26555680dc8e7f057
|
[
"MIT"
] | 12
|
2016-10-03T22:46:20.000Z
|
2019-01-30T05:01:35.000Z
|
sharp_aquos_rc/__init__.py
|
jmoore987/sharp_aquos_rc
|
9a5a1140241a866150dea2d26555680dc8e7f057
|
[
"MIT"
] | 13
|
2016-02-14T23:45:58.000Z
|
2020-06-11T05:49:54.000Z
|
"""Module to control a Sharp Aquos Remote Control enabled TV via TCP/IP"""
from .tv import TV
| 23.75
| 74
| 0.736842
| 17
| 95
| 4.117647
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178947
| 95
| 3
| 75
| 31.666667
| 0.897436
| 0.715789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6d02834e4225a930ff3f1c2e336e10dc6074a7d2
| 4,830
|
py
|
Python
|
tests/unit/domain_validation_tests.py
|
ukncsc/edge-mod
|
95737e71945f4a8823f20a554e5efb9841183a26
|
[
"Unlicense"
] | 2
|
2016-08-23T07:55:01.000Z
|
2016-09-27T15:13:32.000Z
|
tests/unit/domain_validation_tests.py
|
ukncsc/edge-mod
|
95737e71945f4a8823f20a554e5efb9841183a26
|
[
"Unlicense"
] | null | null | null |
tests/unit/domain_validation_tests.py
|
ukncsc/edge-mod
|
95737e71945f4a8823f20a554e5efb9841183a26
|
[
"Unlicense"
] | 2
|
2020-10-02T13:27:10.000Z
|
2021-04-11T09:45:16.000Z
|
import unittest
import mock
from adapters.certuk_mod.validation import ValidationStatus
from adapters.certuk_mod.validation.observable.domain import DomainNameValidationInfo
class DomainValidationTests(unittest.TestCase):
VALID_DOMAINS = {
'TLD': ('.com', '.uk', 'gov'),
'FQDN': ('abc.com', '123.com', 'www.abc.co.uk')
}
INVALID_DOMAINS = {
'TLD': ('.co.uk', 'gov.uk', 'c0m'),
'FQDN': ('-abc.com', 'test-.co.uk', 'abc.c0m')
}
def test_Validate_IfValueButNoType_Error(self):
domain_validation = DomainNameValidationInfo.validate(value='.com')
self.assertEqual(domain_validation.type.status if domain_validation.type else None,
ValidationStatus.ERROR)
self.assertIsNone(domain_validation.value)
def test_Validate_IfValidTypeButNoValue_Error(self):
for domain_type in self.VALID_DOMAINS:
domain_validation = DomainNameValidationInfo.validate(type=domain_type)
self.assertEqual(domain_validation.value.status if domain_validation.value else None,
ValidationStatus.ERROR)
self.assertIsNone(domain_validation.type)
def test_Validate_IfValidTypeButInvalidValue_Warn(self):
for domain_type in self.INVALID_DOMAINS:
for domain_value in self.INVALID_DOMAINS[domain_type]:
domain_validation = DomainNameValidationInfo.validate(type=domain_type, value=domain_value)
self.assertEqual(
domain_validation.value.status if domain_validation.value else None, ValidationStatus.WARN,
'Unexpected validation (%s) with type/value: %s/%s' % (
domain_validation.value, domain_type, domain_value
)
)
self.assertIsNone(domain_validation.type)
def test_Validate_IfInvalidType_Error(self):
domain_validation = DomainNameValidationInfo.validate(type='xxx')
self.assertEqual(domain_validation.value.status if domain_validation.value else None, ValidationStatus.ERROR)
self.assertEqual(domain_validation.type.status if domain_validation.type else None, ValidationStatus.ERROR)
domain_validation = DomainNameValidationInfo.validate(type='xxx', value='-')
self.assertIsNone(domain_validation.value)
self.assertEqual(domain_validation.type.status if domain_validation.type else None, ValidationStatus.ERROR)
def test_Validate_IfValidTypeAndValue_Pass(self):
for domain_type in self.VALID_DOMAINS:
for domain_value in self.VALID_DOMAINS[domain_type]:
domain_validation = DomainNameValidationInfo.validate(type=domain_type, value=domain_value)
self.assertIsNone(domain_validation.value,
'Expected no value validation info, got %s with type/value %s/%s'
% (domain_validation.value, domain_type, domain_value))
self.assertIsNone(domain_validation.type,
'Expected no value validation info, got %s with type/value %s/%s'
% (domain_validation.type, domain_type, domain_value))
@mock.patch('adapters.certuk_mod.validation.observable.domain.DomainNameValidationInfo.FQDN_MATCHER')
@mock.patch('adapters.certuk_mod.validation.observable.domain.DomainNameValidationInfo.TLD_MATCHER')
def test_Get_domain_type_from_value_IfValidTLD_ReturnTrue(self, mock_tld_matcher, mock_fqdn_matcher):
mock_tld_matcher.match.return_value = True
mock_fqdn_matcher.match.return_value = False
self.assertEqual(DomainNameValidationInfo.get_domain_type_from_value('Dummy value'), 'TLD')
@mock.patch('adapters.certuk_mod.validation.observable.domain.DomainNameValidationInfo.FQDN_MATCHER')
@mock.patch('adapters.certuk_mod.validation.observable.domain.DomainNameValidationInfo.TLD_MATCHER')
def test_Get_domain_type_from_value_IfValidFQDN_ReturnTrue(self, mock_tld_matcher, mock_fqdn_matcher):
mock_tld_matcher.match.return_value = False
mock_fqdn_matcher.match.return_value = True
self.assertEqual(DomainNameValidationInfo.get_domain_type_from_value('Dummy value'), 'FQDN')
@mock.patch('adapters.certuk_mod.validation.observable.domain.DomainNameValidationInfo.FQDN_MATCHER')
@mock.patch('adapters.certuk_mod.validation.observable.domain.DomainNameValidationInfo.TLD_MATCHER')
def test_Get_domain_type_from_value_IfInvalidDomain_ReturnFalse(self, mock_tld_matcher, mock_fqdn_matcher):
mock_tld_matcher.match.return_value = False
mock_fqdn_matcher.match.return_value = False
self.assertEqual(DomainNameValidationInfo.get_domain_type_from_value('Dummy value'), None)
| 57.5
| 117
| 0.717391
| 526
| 4,830
| 6.313688
| 0.134981
| 0.130081
| 0.069557
| 0.065041
| 0.848841
| 0.832882
| 0.723276
| 0.704607
| 0.655224
| 0.655224
| 0
| 0.001288
| 0.19648
| 4,830
| 83
| 118
| 58.192771
| 0.854419
| 0
| 0
| 0.342857
| 0
| 0
| 0.17205
| 0.106211
| 0
| 0
| 0
| 0
| 0.214286
| 1
| 0.114286
| false
| 0.014286
| 0.057143
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6d15ac32e5bd4a225a52089cca36e5419af62b9c
| 2,132
|
py
|
Python
|
flotilla/test/test_util.py
|
YeoLab/flotilla
|
31da64567e59003c2b9c03fc8f4eb27ee62e299c
|
[
"MIT",
"BSD-3-Clause"
] | 98
|
2015-01-08T19:38:47.000Z
|
2021-05-04T02:11:55.000Z
|
flotilla/test/test_util.py
|
YeoLab/flotilla
|
31da64567e59003c2b9c03fc8f4eb27ee62e299c
|
[
"MIT",
"BSD-3-Clause"
] | 123
|
2015-01-08T22:28:43.000Z
|
2019-12-20T05:22:29.000Z
|
flotilla/test/test_util.py
|
YeoLab/flotilla
|
31da64567e59003c2b9c03fc8f4eb27ee62e299c
|
[
"MIT",
"BSD-3-Clause"
] | 27
|
2015-01-21T15:41:40.000Z
|
2020-12-22T05:40:47.000Z
|
"""
Test utilities interfacing with external-facing modules,
e.g. links to gene lists
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
def test_timeout():
pass
def test_serve_ipython():
pass
def test_dict_to_str():
from flotilla.util import dict_to_str
assert(dict_to_str({'a': 1, 'b': 2}) == 'a:1_b:2')
#
#
# def test_install_development_package():
# pass
#
#
# def test_memoize():
# pass
#
#
# def test_cached_property():
# pass
#
#
# def test_as_numpy():
# pass
#
#
# def test_natural_sort():
# pass
#
#
# def test_to_base_file_tuple():
# pass
#
#
# def test_add_package_data_resource():
# pass
#
#
# def test_validate_params():
# pass
#
#
# def test_load_pickle_df():
# pass
#
#
# def test_write_pickle_df():
# pass
#
#
# def test_load_gzip_pickle_df():
# pass
#
#
# def test_write_gzip_pickle_df():
# pass
#
#
# def test_load_tsv():
# pass
#
#
# def test_load_json():
# pass
#
#
# def test_write_tsv():
# pass
#
#
# def test_load_csv():
# pass
#
#
# def test_write_csv():
# pass
#
#
# def test_load_hdf():
# pass
#
#
# def test_write_hdf():
# pass
#
#
# def test_get_loading_method():
# pass
#
#
# def test_timestamp():
# pass
#
#
# def test_AssertionError():
# pass
def test_link_to_list():
pass
# test_list = link_to_list(genelist_link)
#
# if genelist_link.startswith("http"):
# sys.stderr.write(
# "WARNING, downloading things from the internet, potential"
# " danger from untrusted sources\n")
# filename = tempfile.NamedTemporaryFile(mode='w+')
# filename.write(subprocess.check_output(
# ["curl", "-k", '--location-trusted', genelist_link]))
# filename.seek(0)
# elif genelist_link.startswith("/"):
# assert os.path.exists(os.path.abspath(genelist_link))
# filename = os.path.abspath(genelist_link)
# true_list = pd.read_table(filename, squeeze=True, header=None).values \
# .tolist()
#
# assert true_list == test_list
| 16.4
| 77
| 0.613977
| 263
| 2,132
| 4.642586
| 0.43346
| 0.149058
| 0.216216
| 0.07371
| 0.153972
| 0.083538
| 0
| 0
| 0
| 0
| 0
| 0.003098
| 0.242964
| 2,132
| 129
| 78
| 16.527132
| 0.753408
| 0.707786
| 0
| 0.272727
| 0
| 0
| 0.017613
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.363636
| true
| 0.272727
| 0.181818
| 0
| 0.545455
| 0.090909
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
6d2d45313cfb6695aa1ffc83b57c82d165fd508c
| 178
|
py
|
Python
|
tasmanium/exceptions.py
|
Dri0m/tasmanium
|
39a1a60de40aaefacdf84e9e87dc06c81f084bf8
|
[
"MIT"
] | null | null | null |
tasmanium/exceptions.py
|
Dri0m/tasmanium
|
39a1a60de40aaefacdf84e9e87dc06c81f084bf8
|
[
"MIT"
] | null | null | null |
tasmanium/exceptions.py
|
Dri0m/tasmanium
|
39a1a60de40aaefacdf84e9e87dc06c81f084bf8
|
[
"MIT"
] | null | null | null |
class KeywordError(Exception):
pass
class SingletonError(Exception):
pass
class StepNotFoundError(Exception):
pass
class EmptyFeatureError(Exception):
pass
| 11.866667
| 35
| 0.741573
| 16
| 178
| 8.25
| 0.4375
| 0.393939
| 0.409091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191011
| 178
| 14
| 36
| 12.714286
| 0.916667
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
6d3a67734848854f5d90b6f2a93a4729fb55e84d
| 5,295
|
py
|
Python
|
scripts/plot_weekdays.py
|
Comrades-Gate/Herald-Bot
|
24cb4be32b0f1bd90ea458232864c2a38665084d
|
[
"MIT"
] | 1
|
2021-05-09T06:04:32.000Z
|
2021-05-09T06:04:32.000Z
|
scripts/plot_weekdays.py
|
Comrades-Gate/Herald-Bot
|
24cb4be32b0f1bd90ea458232864c2a38665084d
|
[
"MIT"
] | 8
|
2021-05-09T02:41:52.000Z
|
2021-05-13T19:50:02.000Z
|
scripts/plot_weekdays.py
|
Comrades-Gate/Herald-Bot
|
24cb4be32b0f1bd90ea458232864c2a38665084d
|
[
"MIT"
] | null | null | null |
import datetime as dat
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
lookback_from = '01/01/2021' #MM/DD/YYYY
def memberflow():
frame = "https://raw.githubusercontent.com/Comrades-Gate/Herald-Bot/main/alltime_memberflow.csv"
df = pd.read_csv(frame, header=0)
df["DateTime"] = pd.to_datetime(df.DateTime)
df["Weekday"] = df.DateTime.dt.day_name()
df["Day"] = df["DateTime"].dt.day
df["Month"] = df.DateTime.dt.month_name()
df["Year"] = df["DateTime"].dt.year
dfn = df.loc[df.DateTime >= lookback_from, :]
weekdays = ['Thursday', 'Wednesday', 'Tuesday', 'Monday', 'Sunday', 'Saturday', 'Friday']
### Plot data by WEEKDAY.
wd = df[['Total Members', 'Weekday']].groupby('Weekday').mean().reindex(weekdays).reset_index()
wds = df[['Total Members', 'Weekday']].groupby('Weekday').std().reindex(weekdays).reset_index()
wdn = dfn[['Total Members', 'Weekday']].groupby('Weekday').mean().reindex(weekdays).reset_index()
wdns = dfn[['Total Members', 'Weekday']].groupby('Weekday').std().reindex(weekdays).reset_index()
fig, (ax1,ax2) = plt.subplots(nrows=2, figsize=(8,7))
ax1.barh(wd['Weekday'], wd['Total Members'], xerr=wds['Total Members'], align='center')
ax2.barh(wdn['Weekday'], wdn['Total Members'], xerr=wdns['Total Members'], align='center')
plt.suptitle('Average Weekday Member Count at The Gate', fontsize=14)
ax1.set_title("YTD Data: Comprehensive", fontsize=8)
string = "YTD From: "
ax2_title = string+lookback_from
ax2.set_title(ax2_title, fontsize=8)
plt.xlabel("Total Members")
xmin1 = min(wd['Total Members']) - max(wds['Total Members']) - 100
xmin2 = min(wdn['Total Members']) - max(wdns['Total Members']) - 100
xmax1 = max(wd['Total Members']) + max(wds['Total Members']) + 100
xmax2 = max(wdn['Total Members']) + max(wdns['Total Members']) + 100
ax1.set_xlim(xmin1, xmax1)
ax2.set_xlim(xmin2, xmax2)
plt.savefig('memberflow_weekday_all.png', dpi=300)
memberflow()
def messages():
frame = "https://raw.githubusercontent.com/Comrades-Gate/Herald-Bot/main/alltime_messages.csv"
df = pd.read_csv(frame, header=0)
df["DateTime"] = pd.to_datetime(df.DateTime)
df["Weekday"] = df.DateTime.dt.day_name()
df["Day"] = df["DateTime"].dt.day
df["Month"] = df.DateTime.dt.month_name()
df["Year"] = df["DateTime"].dt.year
dfn = df.loc[df.DateTime >= lookback_from, :]
weekdays = ['Thursday', 'Wednesday', 'Tuesday', 'Monday', 'Sunday', 'Saturday', 'Friday']
### Plot data by WEEKDAY.
wd = df[['Messages', 'Weekday']].groupby('Weekday').mean().reindex(weekdays).reset_index()
wdn = dfn[['Messages', 'Weekday']].groupby('Weekday').mean().reindex(weekdays).reset_index()
fig, (ax1,ax2) = plt.subplots(nrows=2, figsize=(8,7))
ax1.barh(wd['Weekday'], wd['Messages'], align='center')
ax2.barh(wdn['Weekday'], wdn['Messages'], align='center')
plt.suptitle('Average Weekday Messages Sent at The Gate', fontsize=14)
ax1.set_title("YTD Data: Comprehensive", fontsize=8)
string = "YTD From: "
ax2_title = string+lookback_from
ax2.set_title(ax2_title, fontsize=8)
plt.xlabel("Messages Sent")
xmin1 = min(wd['Messages']) - 10
xmin2 = min(wdn['Messages']) - 10
xmax1 = max(wd['Messages']) + 10
xmax2 = max(wdn['Messages'])+ 10
ax1.set_xlim(xmin1, xmax1)
ax2.set_xlim(xmin2, xmax2)
plt.savefig('message_weekday_all.png', dpi=300)
messages()
def voice():
frame = "https://raw.githubusercontent.com/Comrades-Gate/Herald-Bot/main/alltime_voice.csv"
df = pd.read_csv(frame, header=0)
df["DateTime"] = pd.to_datetime(df.DateTime)
df["Weekday"] = df.DateTime.dt.day_name()
df["Day"] = df["DateTime"].dt.day
df["Month"] = df.DateTime.dt.month_name()
df["Year"] = df["DateTime"].dt.year
dfn = df.loc[df.DateTime >= lookback_from, :]
weekdays = ['Thursday', 'Wednesday', 'Tuesday', 'Monday', 'Sunday', 'Saturday', 'Friday']
### Plot data by WEEKDAY.
wd = df[['Minutes', 'Weekday']].groupby('Weekday').mean().reindex(weekdays).reset_index()
wds = df[['Minutes', 'Weekday']].groupby('Weekday').std().reindex(weekdays).reset_index()
wdn = dfn[['Minutes', 'Weekday']].groupby('Weekday').mean().reindex(weekdays).reset_index()
wdns = dfn[['Minutes', 'Weekday']].groupby('Weekday').std().reindex(weekdays).reset_index()
fig, (ax1,ax2) = plt.subplots(nrows=2, figsize=(8,7))
ax1.barh(wd['Weekday'], wd['Minutes'], xerr=wds['Minutes'], align='center')
ax2.barh(wdn['Weekday'], wdn['Minutes'], xerr=wdns['Minutes'], align='center')
plt.suptitle('Average Weekday Voice Minutes at The Gate', fontsize=14)
ax1.set_title("YTD Data: Comprehensive", fontsize=8)
string = "YTD From: "
ax2_title = string+lookback_from
ax2.set_title(ax2_title, fontsize=8)
plt.xlabel("Voice Minutes")
xmin1 = min(wd['Minutes']) - max(wds['Minutes']) - 100
xmin2 = min(wdn['Minutes']) - max(wdns['Minutes']) - 100
xmax1 = max(wd['Minutes']) + max(wds['Minutes']) + 100
xmax2 = max(wdn['Minutes']) + max(wdns['Minutes']) + 100
ax1.set_xlim(xmin1, xmax1)
ax2.set_xlim(xmin2, xmax2)
plt.savefig('voice_weekday_all.png', dpi=300)
voice()
| 43.04878
| 101
| 0.649669
| 718
| 5,295
| 4.713092
| 0.158774
| 0.062057
| 0.042553
| 0.073877
| 0.839244
| 0.8224
| 0.758865
| 0.731383
| 0.689716
| 0.654551
| 0
| 0.028329
| 0.153352
| 5,295
| 122
| 102
| 43.401639
| 0.726522
| 0.014353
| 0
| 0.46875
| 0
| 0.03125
| 0.282588
| 0.013438
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.041667
| 0
| 0.072917
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6d4c067909cf8dc55e9ee521a03270ab4c5eec3c
| 36
|
py
|
Python
|
otherpythonfile.py
|
bradleybarrett3160/cs3240-labdemo
|
b7bfa762e2b33996230154fd685fe53788c8080c
|
[
"MIT"
] | null | null | null |
otherpythonfile.py
|
bradleybarrett3160/cs3240-labdemo
|
b7bfa762e2b33996230154fd685fe53788c8080c
|
[
"MIT"
] | null | null | null |
otherpythonfile.py
|
bradleybarrett3160/cs3240-labdemo
|
b7bfa762e2b33996230154fd685fe53788c8080c
|
[
"MIT"
] | null | null | null |
def othergreeting(msg):
print(msg)
| 12
| 23
| 0.75
| 5
| 36
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 2
| 24
| 18
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
6d4df010be14394a7139f9de6ce3e4ab431e75a6
| 104
|
py
|
Python
|
enthought/mayavi/tools/data_wizards/csv_sniff.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/mayavi/tools/data_wizards/csv_sniff.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/mayavi/tools/data_wizards/csv_sniff.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from mayavi.tools.data_wizards.csv_sniff import *
| 26
| 49
| 0.846154
| 15
| 104
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105769
| 104
| 3
| 50
| 34.666667
| 0.870968
| 0.115385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ed8f4fdca923f6e806140c5b44fb52d4a3fb8c94
| 2,164
|
py
|
Python
|
python/openmldb/sqlalchemy_openmldb/requirements.py
|
HuilinWu2/OpenMLDB
|
58aceec149cdfb064e7e0cf7bd7052a93089377d
|
[
"Apache-2.0"
] | 18
|
2020-06-14T21:29:12.000Z
|
2022-01-28T10:58:14.000Z
|
sqlalchemy_access/requirements.py
|
gordthompson/sqlalchemy-access
|
da4b8036643649503cacb2cf9b43fc001bf1915a
|
[
"MIT"
] | 8
|
2020-03-30T21:00:57.000Z
|
2022-01-12T15:07:56.000Z
|
python/openmldb/sqlalchemy_openmldb/requirements.py
|
HuilinWu2/OpenMLDB
|
58aceec149cdfb064e7e0cf7bd7052a93089377d
|
[
"Apache-2.0"
] | 7
|
2020-03-30T18:42:13.000Z
|
2022-03-04T08:08:31.000Z
|
from sqlalchemy.testing.requirements import SuiteRequirements
from sqlalchemy.testing import exclusions
class Requirements(SuiteRequirements):
@property
def bound_limit_offset(self):
return exclusions.closed()
@property
def date(self):
return exclusions.closed()
@property
def datetime_microseconds(self):
return exclusions.closed()
@property
def floats_to_four_decimals(self):
return exclusions.closed()
# TODO: remove this when SQLA released with
# https://gerrit.sqlalchemy.org/c/sqlalchemy/sqlalchemy/+/2990
@property
def implicitly_named_constraints(self):
return exclusions.open()
@property
def nullable_booleans(self):
"""Target database allows boolean columns to store NULL."""
# Access Yes/No doesn't allow null
return exclusions.closed()
@property
def offset(self):
# Access does LIMIT (via TOP) but not OFFSET
return exclusions.closed()
@property
def parens_in_union_contained_select_w_limit_offset(self):
return exclusions.closed()
@property
def precision_generic_float_type(self):
return exclusions.closed()
@property
def reflects_pk_names(self):
return exclusions.open()
@property
def sql_expression_limit_offset(self):
return exclusions.closed()
@property
def temp_table_reflection(self):
return exclusions.closed()
@property
def temporary_tables(self):
return exclusions.closed()
@property
def temporary_views(self):
return exclusions.closed()
@property
def time(self):
return exclusions.closed()
@property
def time_microseconds(self):
return exclusions.closed()
@property
def timestamp_microseconds(self):
return exclusions.closed()
@property
def unicode_ddl(self):
# Access won't let you drop a child table unless
# you drop the FK constraint first. Not worth the grief.
return exclusions.closed()
@property
def view_column_reflection(self):
return exclusions.open()
| 24.314607
| 72
| 0.672366
| 238
| 2,164
| 5.97479
| 0.415966
| 0.146976
| 0.225035
| 0.316456
| 0.49789
| 0.42827
| 0.327004
| 0.101266
| 0
| 0
| 0
| 0.00246
| 0.248614
| 2,164
| 88
| 73
| 24.590909
| 0.872079
| 0.157579
| 0
| 0.633333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011364
| 0
| 1
| 0.316667
| false
| 0
| 0.033333
| 0.3
| 0.683333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
edcb724c7dd497f998fcb3921114156b12a7a9ed
| 205
|
py
|
Python
|
coptim/optimizer.py
|
cmazzaanthony/Optimization_Algorithms
|
8dcfe1fcadbe4b3908b33dbc0f14f6d5c0178ce5
|
[
"MIT"
] | 3
|
2019-06-20T17:26:07.000Z
|
2019-07-02T22:14:38.000Z
|
coptim/optimizer.py
|
cmazzaanthony/coptim
|
8dcfe1fcadbe4b3908b33dbc0f14f6d5c0178ce5
|
[
"MIT"
] | null | null | null |
coptim/optimizer.py
|
cmazzaanthony/coptim
|
8dcfe1fcadbe4b3908b33dbc0f14f6d5c0178ce5
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class Optimizer(ABC):
@abstractmethod
def optimize(self, **kwargs):
pass
@abstractmethod
def stopping_criteria(self, **kwargs):
pass
| 15.769231
| 42
| 0.653659
| 21
| 205
| 6.333333
| 0.619048
| 0.255639
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.258537
| 205
| 12
| 43
| 17.083333
| 0.875
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.125
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
eddf5727087c0e59873fb9d8e126aa8e8e161b03
| 39
|
py
|
Python
|
src/server/classes/__init__.py
|
LiteralGenie/HvData
|
e4a8cac99c7443d7c6be41b4586b1d5a01e27a2b
|
[
"MIT"
] | null | null | null |
src/server/classes/__init__.py
|
LiteralGenie/HvData
|
e4a8cac99c7443d7c6be41b4586b1d5a01e27a2b
|
[
"MIT"
] | null | null | null |
src/server/classes/__init__.py
|
LiteralGenie/HvData
|
e4a8cac99c7443d7c6be41b4586b1d5a01e27a2b
|
[
"MIT"
] | null | null | null |
from .proxy_session import ProxySession
| 39
| 39
| 0.897436
| 5
| 39
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ede0765cb8b3e90d86b3edbe3434c84f1a8f8e2b
| 49
|
py
|
Python
|
plugins/web.py
|
ryoung2512/bot
|
a0d42152410086630a03a3fdb45436935cb48402
|
[
"MIT"
] | null | null | null |
plugins/web.py
|
ryoung2512/bot
|
a0d42152410086630a03a3fdb45436935cb48402
|
[
"MIT"
] | null | null | null |
plugins/web.py
|
ryoung2512/bot
|
a0d42152410086630a03a3fdb45436935cb48402
|
[
"MIT"
] | null | null | null |
def web_search(args):
print("in websearch")
| 12.25
| 25
| 0.673469
| 7
| 49
| 4.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183673
| 49
| 3
| 26
| 16.333333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
61193c76816a80c335ade6b510f94c5403635951
| 146
|
py
|
Python
|
test/material/gold_sentence.py
|
nowindxdw/flask_base
|
44963513a3945ebf6cd7c4dcd7fbd67d6d8c5641
|
[
"MIT"
] | null | null | null |
test/material/gold_sentence.py
|
nowindxdw/flask_base
|
44963513a3945ebf6cd7c4dcd7fbd67d6d8c5641
|
[
"MIT"
] | 2
|
2020-04-22T11:26:13.000Z
|
2020-04-22T11:26:20.000Z
|
test/material/test_gold_sentence.py
|
nowindxdw/flask_base
|
44963513a3945ebf6cd7c4dcd7fbd67d6d8c5641
|
[
"MIT"
] | null | null | null |
[{"val": "life is not easy.", "id": 1, "key": ["a", "b", "c", "ddef"]}, {"val": "Knowledge is power.", "id": 2, "key": ["1", "2", "3", "4", "5"]}]
| 146
| 146
| 0.40411
| 24
| 146
| 2.458333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056911
| 0.157534
| 146
| 1
| 146
| 146
| 0.422764
| 0
| 0
| 0
| 0
| 0
| 0.435374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b6243ccdf256a2c3b1a3cbcdedbfe12806555b63
| 597
|
py
|
Python
|
data/users.py
|
lev2454/VGA-Web-Edition
|
4d8fb7b93373ee00fb78889cab2213aaa5a4cdc9
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T16:10:36.000Z
|
2020-05-05T16:10:36.000Z
|
data/users.py
|
lev2454/VGA-Web-Edition
|
4d8fb7b93373ee00fb78889cab2213aaa5a4cdc9
|
[
"BSD-3-Clause"
] | null | null | null |
data/users.py
|
lev2454/VGA-Web-Edition
|
4d8fb7b93373ee00fb78889cab2213aaa5a4cdc9
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import sqlalchemy
from .db_session import SqlAlchemyBase
from sqlalchemy_serializer import SerializerMixin
class User(SqlAlchemyBase, SerializerMixin):
__tablename__ = 'users'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
login = sqlalchemy.Column(sqlalchemy.String, unique=True)
email = sqlalchemy.Column(sqlalchemy.String, index=True, unique=True)
hashed_password = sqlalchemy.Column(sqlalchemy.String, nullable=True)
created_date = sqlalchemy.Column(sqlalchemy.DateTime, default=datetime.datetime.now)
| 39.8
| 89
| 0.782245
| 64
| 597
| 7.15625
| 0.5
| 0.174672
| 0.283843
| 0.209607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135678
| 597
| 14
| 90
| 42.642857
| 0.887597
| 0
| 0
| 0
| 0
| 0
| 0.008576
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.090909
| 0.363636
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
b64dcf91fb015f1b1308b7066c8190a651a149b1
| 92
|
py
|
Python
|
designthinking/admin.py
|
IBMIXN/DesignThinkingapp
|
2f05f9130d8fd26f3ae7a94456a3e442c9fa5518
|
[
"Apache-2.0"
] | 5
|
2021-07-19T14:41:52.000Z
|
2022-03-26T06:51:49.000Z
|
designthinking/admin.py
|
IBMIXN/DesignThinkingapp
|
2f05f9130d8fd26f3ae7a94456a3e442c9fa5518
|
[
"Apache-2.0"
] | null | null | null |
designthinking/admin.py
|
IBMIXN/DesignThinkingapp
|
2f05f9130d8fd26f3ae7a94456a3e442c9fa5518
|
[
"Apache-2.0"
] | 1
|
2021-10-21T17:38:51.000Z
|
2021-10-21T17:38:51.000Z
|
from django.contrib import admin
from . models import Contact
admin.site.register(Contact)
| 18.4
| 32
| 0.815217
| 13
| 92
| 5.769231
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119565
| 92
| 4
| 33
| 23
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b65cde0826ad03e3db61b13b244683fe072ebed6
| 57,385
|
py
|
Python
|
cli/polyaxon/client/project.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | null | null | null |
cli/polyaxon/client/project.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | 1
|
2022-01-24T11:26:47.000Z
|
2022-03-18T23:17:58.000Z
|
cli/polyaxon/client/project.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from datetime import datetime
from requests import HTTPError
from typing import Dict, List, Tuple, Union
import ujson
from marshmallow import EXCLUDE
import polyaxon_sdk
from polyaxon.client.client import PolyaxonClient
from polyaxon.client.decorators import client_handler, get_global_or_inline_config
from polyaxon.constants.globals import DEFAULT
from polyaxon.contexts import paths as ctx_paths
from polyaxon.exceptions import PolyaxonClientException
from polyaxon.lifecycle import V1ProjectVersionKind, V1StageCondition
from polyaxon.logger import logger
from polyaxon.utils.fqn_utils import get_entity_full_name, get_entity_info
from polyaxon.utils.path_utils import check_or_create_path, delete_path
from polyaxon.utils.query_params import get_query_params
from polyaxon.utils.tz_utils import now
from polyaxon.utils.validation import validate_tags
from polyaxon_sdk.rest import ApiException
from traceml.artifacts import V1RunArtifact
class ProjectClient:
"""ProjectClient is a client to communicate with Polyaxon projects endpoints.
If no values are passed to this class,
Polyaxon will try to resolve the owner and project from the environment:
* If you have a configured CLI, Polyaxon will use the configuration of the cli.
* If you have a cached project using the CLI,
the client will default to that cached project unless you override the values.
* If you use this client in the context of a job or a service managed by Polyaxon,
a configuration will be available to resolve the values based on that run.
If you intend to create a new project instance or to list projects,
only the `owner` parameter is required.
Properties:
project: str.
owner: str.
project_data: V1Project.
Args:
owner: str, optional, the owner is the username or
the organization name owning this project.
project: str, optional, project name.
client: [PolyaxonClient](/docs/core/python-library/polyaxon-client/), optional,
an instance of a configured client, if not passed,
a new instance will be created based on the available environment.
is_offline: bool, optional,
To trigger the offline mode manually instead of depending on `POLYAXON_IS_OFFLINE`.
no_op: bool, optional,
To set the NO_OP mode manually instead of depending on `POLYAXON_NO_OP`.
Raises:
PolyaxonClientException: If no owner is passed and Polyaxon cannot
resolve an owner from the environment.
"""
@client_handler(check_no_op=True)
def __init__(
self,
owner: str = None,
project: str = None,
client: PolyaxonClient = None,
is_offline: bool = None,
no_op: bool = None,
):
self._is_offline = get_global_or_inline_config(
config_key="is_offline", config_value=is_offline, client=client
)
self._no_op = get_global_or_inline_config(
config_key="no_op", config_value=no_op, client=client
)
if self._no_op:
return
if not owner and project:
owner, project = get_entity_info(
get_entity_full_name(owner=owner, entity=project)
)
if not owner:
raise PolyaxonClientException("Please provide a valid owner.")
self._client = client
self._owner = owner or DEFAULT
self._project = project
self._project_data = polyaxon_sdk.V1Project()
@property
def client(self):
if self._client:
return self._client
self._client = PolyaxonClient()
return self._client
@property
def owner(self):
return self._owner
@property
def project(self):
return self._project
@property
def project_data(self):
return self._project_data
@client_handler(check_no_op=True, check_offline=True)
def refresh_data(self):
"""Fetches the project data from the api."""
self._project_data = self.client.projects_v1.get_project(
self.owner, self.project
)
if self._project_data.owner is None:
self._project_data.owner = self.owner
@client_handler(check_no_op=True, check_offline=True)
def create(
self, data: Union[Dict, polyaxon_sdk.V1Project]
) -> polyaxon_sdk.V1Project:
"""Creates a new project based on the data passed.
[Project API](/docs/api/#operation/CreateProject)
Args:
data: dict or V1Project, required.
Returns:
V1Project, project instance from the response.
"""
self._project_data = self.client.projects_v1.create_project(
self.owner,
data,
async_req=False,
)
self._project_data.owner = self.owner
self._project = self._project_data.name
return self._project_data
@client_handler(check_no_op=True, check_offline=True)
def list(
self, query: str = None, sort: str = None, limit: int = None, offset: int = None
) -> List[polyaxon_sdk.V1Project]:
"""Lists projects under the current owner.
[Project API](/docs/api/#operation/ListProjects)
Args:
query: str, optional, query filters, please refer to
[Project PQL](/docs/core/query-language/projects/#query)
sort: str, optional, fields to order by, please refer to
[Project PQL](/docs/core/query-language/projects/#sort)
limit: int, optional, limit of projects to return.
offset: int, optional, offset pages to paginate projects.
Returns:
List[V1Project], list of project instances.
"""
params = get_query_params(limit=limit, offset=offset, query=query, sort=sort)
return self.client.projects_v1.list_projects(self.owner, **params)
@client_handler(check_no_op=True, check_offline=True)
def delete(self):
"""Deletes project based on the current owner and project."""
return self.client.projects_v1.delete_project(self.owner, self.project)
@client_handler(check_no_op=True, check_offline=True)
def update(
self, data: Union[Dict, polyaxon_sdk.V1Project]
) -> polyaxon_sdk.V1Project:
"""Updates a project based on the data passed.
[Project API](/docs/api/#operation/PatchProject)
Args:
data: Dict or V1Project, required.
Returns:
V1Project, project instance from the response.
"""
self._project_data = self.client.projects_v1.patch_project(
self.owner,
self.project,
body=data,
async_req=False,
)
self._project = self._project_data.name
return self._project_data
@client_handler(check_no_op=True, check_offline=True)
def list_runs(
self, query: str = None, sort: str = None, limit: int = None, offset: int = None
):
"""Lists runs under the current owner/project.
[Run API](/docs/api/#operation/ListRuns)
Args:
query: str, optional, query filters, please refer to
[Run PQL](/docs/core/query-language/runs/#query)
sort: str, optional, fields to order by, please refer to
[Run PQL](/docs/core/query-language/runs/#sort)
limit: int, optional, limit of runs to return.
offset: int, optional, offset pages to paginate runs.
Returns:
List[V1Run], list of run instances.
"""
params = get_query_params(
limit=limit or 20, offset=offset, query=query, sort=sort
)
return self.client.runs_v1.list_runs(self.owner, self.project, **params)
def _validate_kind(self, kind: V1ProjectVersionKind):
if kind not in V1ProjectVersionKind.allowable_values:
raise ValueError(
"The kind `{}` is not supported, it must be one of the values `{}`".format(
kind, V1ProjectVersionKind.allowable_values
)
)
@client_handler(check_no_op=True, check_offline=True)
def list_versions(
self,
kind: V1ProjectVersionKind,
query: str = None,
sort: str = None,
limit: int = None,
offset: int = None,
) -> polyaxon_sdk.V1ListProjectVersionsResponse:
"""Lists project versions under the current owner/project based on version kind.
This is a generic function that maps to list:
* component versions
* model versions
* artifact versions
[Project API](/docs/api/#operation/ListProjectVersions)
Args:
kind: V1ProjectVersionKind, kind of the project version.
query: str, optional, query filters, please refer to
[Run PQL](/docs/core/query-language/project-versions/#query)
sort: str, optional, fields to order by, please refer to
[Run PQL](/docs/core/query-language/project-versions/#sort)
limit: int, optional, limit of project versions to return.
offset: int, optional, offset pages to paginate project versions.
Returns:
List[V1ProjectVersion], list of versions.
"""
self._validate_kind(kind)
params = get_query_params(
limit=limit or 20, offset=offset, query=query, sort=sort
)
return self.client.projects_v1.list_versions(
self.owner, self.project, kind, **params
)
@client_handler(check_no_op=True, check_offline=True)
def list_component_versions(
self,
query: str = None,
sort: str = None,
limit: int = None,
offset: int = None,
) -> polyaxon_sdk.V1ListProjectVersionsResponse:
"""Lists component versions under the current owner/project.
[Project API](/docs/api/#operation/ListProjectVersions)
Args:
query: str, optional, query filters, please refer to
[Run PQL](/docs/core/query-language/project-versions/#query)
sort: str, optional, fields to order by, please refer to
[Run PQL](/docs/core/query-language/project-versions/#sort)
limit: int, optional, limit of project versions to return.
offset: int, optional, offset pages to paginate project versions.
Returns:
List[V1ProjectVersion], list of component versions.
"""
return self.list_versions(
kind=V1ProjectVersionKind.COMPONENT,
query=query,
sort=sort,
limit=limit,
offset=offset,
)
@client_handler(check_no_op=True, check_offline=True)
def list_model_versions(
self,
query: str = None,
sort: str = None,
limit: int = None,
offset: int = None,
) -> polyaxon_sdk.V1ListProjectVersionsResponse:
"""Lists model versions under the current owner/project.
[Project API](/docs/api/#operation/ListProjectVersions)
Args:
query: str, optional, query filters, please refer to
[Run PQL](/docs/core/query-language/project-versions/#query)
sort: str, optional, fields to order by, please refer to
[Run PQL](/docs/core/query-language/project-versions/#sort)
limit: int, optional, limit of project versions to return.
offset: int, optional, offset pages to paginate project versions.
Returns:
List[V1ProjectVersion], list of model versions.
"""
return self.list_versions(
kind=V1ProjectVersionKind.MODEL,
query=query,
sort=sort,
limit=limit,
offset=offset,
)
@client_handler(check_no_op=True, check_offline=True)
def list_artifact_versions(
self,
query: str = None,
sort: str = None,
limit: int = None,
offset: int = None,
) -> polyaxon_sdk.V1ListProjectVersionsResponse:
"""Lists artifact versions under the current owner/project.
[Project API](/docs/api/#operation/ListProjectVersions)
Args:
query: str, optional, query filters, please refer to
[Run PQL](/docs/core/query-language/project-versions/#query)
sort: str, optional, fields to order by, please refer to
[Run PQL](/docs/core/query-language/project-versions/#sort)
limit: int, optional, limit of project versions to return.
offset: int, optional, offset pages to paginate project versions.
Returns:
List[V1ProjectVersion], list of artifact versions.
"""
return self.list_versions(
kind=V1ProjectVersionKind.ARTIFACT,
query=query,
sort=sort,
limit=limit,
offset=offset,
)
@client_handler(check_no_op=True, check_offline=True)
def get_version(
self, kind: V1ProjectVersionKind, version: str
) -> polyaxon_sdk.V1ProjectVersion:
"""Gets a project version under the current owner/project based on version kind.
This is a generic function that maps to get:
* component version
* model version
* artifact version
[Project API](/docs/api/#operation/GetVersion)
Args:
kind: V1ProjectVersionKind, kind of the project version.
version: str, required, the version name/tag.
Returns:
V1ProjectVersion.
"""
self._validate_kind(kind)
response = self.client.projects_v1.get_version(
self.owner, self.project, kind, version
)
if response.kind != kind:
raise PolyaxonClientException("This version is not of kind `%s`." % kind)
return response
@client_handler(check_no_op=True, check_offline=True)
def get_component_version(self, version: str) -> polyaxon_sdk.V1ProjectVersion:
"""Gets a component version under the current owner/project.
[Project API](/docs/api/#operation/GetVersion)
Args:
version: str, required, the version name/tag.
Returns:
V1ProjectVersion, component version.
"""
return self.get_version(kind=V1ProjectVersionKind.COMPONENT, version=version)
@client_handler(check_no_op=True, check_offline=True)
def get_model_version(self, version: str) -> polyaxon_sdk.V1ProjectVersion:
"""Gets a model version under the current owner/project.
[Project API](/docs/api/#operation/GetVersion)
Args:
version: str, required, the version name/tag.
Returns:
V1ProjectVersion, model version.
"""
return self.get_version(kind=V1ProjectVersionKind.MODEL, version=version)
@client_handler(check_no_op=True, check_offline=True)
def get_artifact_version(self, version: str) -> polyaxon_sdk.V1ProjectVersion:
"""Gets an artifact version under the current owner/project.
[Project API](/docs/api/#operation/GetVersion)
Args:
version: str, required, the version name/tag.
Returns:
V1ProjectVersion, artifact version.
"""
return self.get_version(kind=V1ProjectVersionKind.ARTIFACT, version=version)
@client_handler(check_no_op=True, check_offline=True)
def get_version_stages(
self, kind: V1ProjectVersionKind, version: str
) -> Tuple[str, List[V1StageCondition]]:
"""Gets a project version stages under the current owner/project based on version kind.
This is a generic function that maps to get:
* component version
* model version
* artifact version
[Project API](/docs/api/#operation/GetVersionStages)
Args:
kind: V1ProjectVersionKind, kind of the project version.
version: str, required, the version name/tag.
Returns:
Tuple[str, List[V1StageCondition]]
"""
self._validate_kind(kind)
response = self.client.projects_v1.get_version_stages(
self.owner, self.project, kind, version
)
return response.stage, response.stage_conditions
@client_handler(check_no_op=True, check_offline=True)
def get_component_version_stages(
self, version: str
) -> Tuple[str, List[V1StageCondition]]:
"""Gets a component version stages under the current owner/project.
[Project API](/docs/api/#operation/GetVersionStages)
Args:
version: str, required, the version name/tag.
Returns:
Tuple[str, List[V1StageCondition]]
"""
return self.get_version_stages(
kind=V1ProjectVersionKind.COMPONENT, version=version
)
@client_handler(check_no_op=True, check_offline=True)
def get_model_version_stages(
self, version: str
) -> Tuple[str, List[V1StageCondition]]:
"""Gets a model version under the current owner/project.
[Project API](/docs/api/#operation/GetVersionStages)
Args:
version: str, required, the version name/tag.
Returns:
Tuple[str, List[V1StageCondition]]
"""
return self.get_version_stages(kind=V1ProjectVersionKind.MODEL, version=version)
@client_handler(check_no_op=True, check_offline=True)
def get_artifact_version_stages(
self, version: str
) -> Tuple[str, List[V1StageCondition]]:
"""Gets an artifact version under the current owner/project.
[Project API](/docs/api/#operation/GetVersionStages)
Args:
version: str, required, the version name/tag.
Returns:
Tuple[str, List[V1StageCondition]]
"""
return self.get_version_stages(
kind=V1ProjectVersionKind.ARTIFACT, version=version
)
@client_handler(check_no_op=True, check_offline=True)
def create_version(
self,
kind: V1ProjectVersionKind,
data: Union[Dict, polyaxon_sdk.V1ProjectVersion],
) -> polyaxon_sdk.V1ProjectVersion:
"""Creates a project version based on the data passed based on version kind.
This is a generic function based on the kind passed and creates a:
* component version
* model version
* artifact version
[Project API](/docs/api/#operation/CreateVersion)
Args:
kind: V1ProjectVersionKind, kind of the project version.
data: Dict or V1ProjectVersion, required.
Returns:
V1ProjectVersion.
"""
self._validate_kind(kind)
if isinstance(data, polyaxon_sdk.V1ProjectVersion):
data.kind = kind
elif isinstance(data, dict):
data["kind"] = kind
return self.client.projects_v1.create_version(
self.owner,
self.project,
kind,
body=data,
async_req=False,
)
@client_handler(check_no_op=True, check_offline=True)
def create_component_version(
self,
data: Union[Dict, polyaxon_sdk.V1ProjectVersion],
) -> polyaxon_sdk.V1ProjectVersion:
"""Creates a component version based on the data passed.
[Project API](/docs/api/#operation/CreateVersion)
Args:
data: Dict or V1ProjectVersion, required.
Returns:
V1ProjectVersion, component version.
"""
return self.create_version(
kind=V1ProjectVersionKind.COMPONENT,
data=data,
)
@client_handler(check_no_op=True, check_offline=True)
def create_model_version(
self,
data: Union[Dict, polyaxon_sdk.V1ProjectVersion],
) -> polyaxon_sdk.V1ProjectVersion:
"""Creates a model version based on the data passed.
[Project API](/docs/api/#operation/CreateVersion)
Args:
data: Dict or V1ProjectVersion, required.
Returns:
V1ProjectVersion, model version.
"""
return self.create_version(
kind=V1ProjectVersionKind.MODEL,
data=data,
)
@client_handler(check_no_op=True, check_offline=True)
def create_artifact_version(
self,
data: Union[Dict, polyaxon_sdk.V1ProjectVersion],
) -> polyaxon_sdk.V1ProjectVersion:
"""Creates an artifact version based on the data passed.
[Project API](/docs/api/#operation/CreateVersion)
Args:
data: Dict or V1ProjectVersion, required.
Returns:
V1ProjectVersion, artifact version.
"""
return self.create_version(
kind=V1ProjectVersionKind.ARTIFACT,
data=data,
)
@client_handler(check_no_op=True, check_offline=True)
def patch_version(
self,
kind: V1ProjectVersionKind,
version: str,
data: Union[Dict, polyaxon_sdk.V1ProjectVersion],
) -> polyaxon_sdk.V1ProjectVersion:
"""Updates a project version based on the data passed and version kind.
This is a generic function based on the kind passed and patches a:
* component version
* model version
* artifact version
[Project API](/docs/api/#operation/PatchVersion)
Args:
kind: V1ProjectVersionKind, kind of the project version.
version: str, required, the version name/tag.
data: Dict or V1ProjectVersion, required.
Returns:
V1ProjectVersion.
"""
self._validate_kind(kind)
return self.client.projects_v1.patch_version(
self.owner,
self.project,
kind,
version,
body=data,
async_req=False,
)
@client_handler(check_no_op=True, check_offline=True)
def patch_component_version(
self,
version: str,
data: Union[Dict, polyaxon_sdk.V1ProjectVersion],
) -> polyaxon_sdk.V1ProjectVersion:
"""Updates a component version based on the data passed.
[Project API](/docs/api/#operation/PatchVersion)
Args:
version: str, required, the version name/tag.
data: Dict or V1ProjectVersion, required.
Returns:
V1ProjectVersion, component version.
"""
return self.patch_version(
kind=V1ProjectVersionKind.COMPONENT,
version=version,
data=data,
)
@client_handler(check_no_op=True, check_offline=True)
def patch_model_version(
self,
version: str,
data: Union[Dict, polyaxon_sdk.V1ProjectVersion],
) -> polyaxon_sdk.V1ProjectVersion:
"""Updates a model version based on the data passed.
[Project API](/docs/api/#operation/PatchVersion)
Args:
version: str, required, the version name/tag.
data: Dict or V1ProjectVersion, required.
Returns:
V1ProjectVersion, model version.
"""
return self.patch_version(
kind=V1ProjectVersionKind.MODEL,
version=version,
data=data,
)
@client_handler(check_no_op=True, check_offline=True)
def patch_artifact_version(
self,
version: str,
data: Union[Dict, polyaxon_sdk.V1ProjectVersion],
) -> polyaxon_sdk.V1ProjectVersion:
"""Updates an artifact version based on the data passed.
[Project API](/docs/api/#operation/PatchVersion)
Args:
version: str, required, the version name/tag.
data: Dict or V1ProjectVersion, required.
Returns:
V1ProjectVersion, artifact version.
"""
return self.patch_version(
kind=V1ProjectVersionKind.ARTIFACT,
version=version,
data=data,
)
@client_handler(check_no_op=True, check_offline=True)
def register_version(
self,
kind: V1ProjectVersionKind,
version: str,
description: str = None,
tags: Union[str, List[str]] = None,
content: Union[str, Dict] = None,
run: str = None,
connection: str = None,
artifacts: List[str] = None,
force: bool = False,
) -> polyaxon_sdk.V1ProjectVersion:
"""Creates or Updates a project version based on the data passed.
This is a generic function based on the kind passed and registers a:
* component version
* model version
* artifact version
Args:
kind: V1ProjectVersionKind, kind of the project version.
version: str, optional, the version name/tag.
description: str, optional, the version description.
tags: str or List[str], optional.
content: str or dict, optional, content/metadata (JSON object) of the version.
run: str, optional, a uuid reference to the run.
connection: str, optional, a uuid reference to a connection.
artifacts: List[str], optional, list of artifacts to highlight(requires passing a run)
force: bool, optional, to force push, i.e. update if exists.
Returns:
V1ProjectVersion.
"""
try:
self.get_version(kind, version)
if not force:
raise PolyaxonClientException(
"A {} version {} already exists. "
"Please pass the `force` argument or `--force` flag for CLI) "
"if you want to push force this version.".format(kind, version)
)
to_update = True
except (ApiException, HTTPError, AttributeError):
to_update = False
if content:
content = content if isinstance(content, str) else ujson.dumps(content)
if tags is not None:
tags = validate_tags(tags, validate_yaml=True)
if artifacts is not None:
artifacts = validate_tags(artifacts, validate_yaml=True)
if to_update:
version_config = polyaxon_sdk.V1ProjectVersion()
if description is not None:
version_config.description = description
if tags:
version_config.tags = tags
if content:
version_config.content = content
if run:
version_config.run = run
if artifacts is not None:
version_config.artifacts = artifacts
if connection is not None:
version_config.connection = connection
return self.patch_version(
kind=kind,
version=version,
data=version_config,
)
else:
version_config = polyaxon_sdk.V1ProjectVersion(
name=version,
description=description,
tags=tags,
run=run,
artifacts=artifacts,
connection=connection,
content=content,
)
return self.create_version(kind=kind, data=version_config)
@client_handler(check_no_op=True, check_offline=True)
def register_component_version(
self,
version: str,
description: str = None,
tags: Union[str, List[str]] = None,
content: Union[str, Dict] = None,
run: str = None,
force: bool = False,
) -> polyaxon_sdk.V1ProjectVersion:
"""Creates or Updates a component version based on the data passed.
Args:
version: str, optional, the version name/tag.
description: str, optional, the version description.
tags: str or List[str], optional.
content: str or dict, optional, content/metadata (JSON object) of the version.
run: str, optional, a uuid reference to the run.
force: bool, optional, to force push, i.e. update if exists.
Returns:
V1ProjectVersion, component verison.
"""
return self.register_version(
kind=V1ProjectVersionKind.COMPONENT,
version=version,
description=description,
tags=tags,
content=content,
run=run,
force=force,
)
@client_handler(check_no_op=True, check_offline=True)
def register_model_version(
self,
version: str,
description: str = None,
tags: Union[str, List[str]] = None,
content: Union[str, Dict] = None,
run: str = None,
connection: str = None,
artifacts: List[str] = None,
force: bool = False,
) -> polyaxon_sdk.V1ProjectVersion:
"""Create or Update a model version based on the data passed.
Args:
version: str, optional, the version name/tag.
description: str, optional, the version description.
tags: str or List[str], optional.
content: str or dict, optional, content/metadata (JSON object) of the version.
run: str, optional, a uuid reference to the run.
connection: str, optional, a uuid reference to a connection.
artifacts: List[str], optional, list of artifacts to highlight(requires passing a run)
force: bool, optional, to force push, i.e. update if exists.
Returns:
V1ProjectVersion, model version.
"""
return self.register_version(
kind=V1ProjectVersionKind.MODEL,
version=version,
description=description,
tags=tags,
content=content,
run=run,
connection=connection,
artifacts=artifacts,
force=force,
)
@client_handler(check_no_op=True, check_offline=True)
def register_artifact_version(
self,
version: str,
description: str = None,
tags: Union[str, List[str]] = None,
content: Union[str, Dict] = None,
run: str = None,
connection: str = None,
artifacts: List[str] = None,
force: bool = False,
) -> polyaxon_sdk.V1ProjectVersion:
"""Create or Update an artifact version based on the data passed.
Args:
version: str, optional, the version name/tag.
description: str, optional, the version description.
tags: str or List[str], optional.
content: str or dict, optional, content/metadata (JSON object) of the version.
run: str, optional, a uuid reference to the run.
connection: str, optional, a uuid reference to a connection.
artifacts: List[str], optional, list of artifacts to highlight(requires passing a run)
force: bool, optional, to force push, i.e. update if exists.
Returns:
V1ProjectVersion, artifact version.
"""
return self.register_version(
kind=V1ProjectVersionKind.ARTIFACT,
version=version,
description=description,
tags=tags,
content=content,
run=run,
connection=connection,
artifacts=artifacts,
force=force,
)
@client_handler(check_no_op=True, check_offline=True)
def delete_version(self, kind: V1ProjectVersionKind, version: str):
"""Deletes a project version under the current owner/project.
This is a generic function based on the kind passed and deletes a:
* component version
* model version
* artifact version
[Project API](/docs/api/#operation/DeleteVersion)
Args:
kind: V1ProjectVersionKind, kind of the project version.
version: str, required, the version name/tag.
"""
self._validate_kind(kind)
logger.info("Deleting {} version: `{}`".format(kind, version))
return self.client.projects_v1.delete_version(
self.owner,
self.project,
kind,
version,
async_req=False,
)
@client_handler(check_no_op=True, check_offline=True)
def delete_component_version(self, version: str):
"""Deletes a component version under the current owner/project.
[Project API](/docs/api/#operation/DeleteVersion)
Args:
version: str, required, the version name/tag.
"""
return self.delete_version(
kind=V1ProjectVersionKind.COMPONENT,
version=version,
)
@client_handler(check_no_op=True, check_offline=True)
def delete_model_version(self, version: str):
"""Deletes a model version under the current owner/project.
[Project API](/docs/api/#operation/DeleteVersion)
Args:
version: str, required, the version name/tag.
"""
return self.delete_version(
kind=V1ProjectVersionKind.MODEL,
version=version,
)
@client_handler(check_no_op=True, check_offline=True)
def delete_artifact_version(self, version: str):
"""Deletes an artifact version under the current owner/project.
[Project API](/docs/api/#operation/DeleteVersion)
Args:
version: str, required, the version name/tag.
"""
return self.delete_version(
kind=V1ProjectVersionKind.ARTIFACT,
version=version,
)
@client_handler(check_no_op=True, check_offline=True)
def stage_version(
self,
kind: V1ProjectVersionKind,
version: str,
stage: str,
reason: str = None,
message: str = None,
last_transition_time: datetime = None,
last_update_time: datetime = None,
):
"""Creates a new a project version stage.
This is a generic function based on the kind passed and stages a:
* component version
* model version
* artifact version
[Project API](/docs/api/#operation/CreateVersionStage)
Args:
kind: V1ProjectVersionKind, kind of the project version.
version: str, required, the version name/tag.
stage: str, a valid [Stages](/docs/core/specification/lifecycle/) value.
reason: str, optional, reason or service issuing the stage change.
message: str, optional, message to log with this status.
last_transition_time: datetime, default `now`.
last_update_time: datetime, default `now`.
"""
self._validate_kind(kind)
current_date = now()
stage_condition = V1StageCondition(
type=stage,
status=True,
reason=reason or "ClientStageUpdate",
message=message,
last_transition_time=last_transition_time or current_date,
last_update_time=last_update_time or current_date,
)
return self.client.projects_v1.create_version_stage(
self.owner,
self.project,
kind,
version,
body={"condition": stage_condition},
async_req=False,
)
@client_handler(check_no_op=True, check_offline=True)
def stage_component_version(
self,
version: str,
stage: str,
reason: str = None,
message: str = None,
last_transition_time: datetime = None,
last_update_time: datetime = None,
):
"""Creates a new a component version stage.
[Project API](/docs/api/#operation/CreateVersionStage)
Args:
version: str, required, the version name/tag.
stage: str, a valid [Stages](/docs/core/specification/lifecycle/) value.
reason: str, optional, reason or service issuing the status change.
message: str, optional, message to log with this status.
last_transition_time: datetime, default `now`.
last_update_time: datetime, default `now`.
"""
return self.stage_version(
kind=V1ProjectVersionKind.COMPONENT,
version=version,
stage=stage,
reason=reason,
message=message,
last_transition_time=last_transition_time,
last_update_time=last_update_time,
)
@client_handler(check_no_op=True, check_offline=True)
def stage_model_version(
self,
version: str,
stage: str,
reason: str = None,
message: str = None,
last_transition_time: datetime = None,
last_update_time: datetime = None,
):
"""Creates a new a model version stage.
[Project API](/docs/api/#operation/CreateVersionStage)
Args:
version: str, required, the version name/tag.
stage: str, a valid [Stages](/docs/core/specification/lifecycle/) value.
reason: str, optional, reason or service issuing the status change.
message: str, optional, message to log with this status.
last_transition_time: datetime, default `now`.
last_update_time: datetime, default `now`.
"""
return self.stage_version(
kind=V1ProjectVersionKind.MODEL,
version=version,
stage=stage,
reason=reason,
message=message,
last_transition_time=last_transition_time,
last_update_time=last_update_time,
)
@client_handler(check_no_op=True, check_offline=True)
def stage_artifact_version(
self,
version: str,
stage: str,
reason: str = None,
message: str = None,
last_transition_time: datetime = None,
last_update_time: datetime = None,
):
"""Creates a new an artifact version stage.
[Project API](/docs/api/#operation/CreateVersionStage)
Args:
version: str, required, the version name/tag.
stage: str, a valid [Stages](/docs/core/specification/lifecycle/) value.
reason: str, optional, reason or service issuing the status change.
message: str, optional, message to log with this status.
last_transition_time: datetime, default `now`.
last_update_time: datetime, default `now`.
"""
return self.stage_version(
kind=V1ProjectVersionKind.ARTIFACT,
version=version,
stage=stage,
reason=reason,
message=message,
last_transition_time=last_transition_time,
last_update_time=last_update_time,
)
@client_handler(check_no_op=True, check_offline=True)
def transfer_version(
self, kind: V1ProjectVersionKind, version: str, to_project: str
):
"""Transfers the version to a project under the same owner/organization.
This is a generic function based on the kind passed and transfers a:
* component version
* model version
* artifact version
[Run API](/docs/api/#operation/TransferVersion)
Args:
kind: V1ProjectVersionKind, kind of the project version.
version: str, required, the version name/tag.
to_project: str, required, the destination project to transfer the version to.
"""
self._validate_kind(kind)
return self.client.projects_v1.transfer_version(
self.owner,
self.project,
kind,
version,
body={"project": to_project},
async_req=False,
)
@client_handler(check_no_op=True, check_offline=True)
def transfer_component_version(self, version: str, to_project: str):
"""Transfers the component version to a project under the same owner/organization.
[Run API](/docs/api/#operation/TransferVersion)
Args:
version: str, required, the version name/tag.
to_project: str, required, the destination project to transfer the version to.
"""
return self.transfer_version(
kind=V1ProjectVersionKind.COMPONENT,
version=version,
to_project=to_project,
)
@client_handler(check_no_op=True, check_offline=True)
def transfer_model_version(self, version: str, to_project: str):
"""Transfers the model version to a project under the same owner/organization.
[Run API](/docs/api/#operation/TransferVersion)
Args:
version: str, required, the version name/tag.
to_project: str, required, the destination project to transfer the version to.
"""
return self.transfer_version(
kind=V1ProjectVersionKind.MODEL,
version=version,
to_project=to_project,
)
@client_handler(check_no_op=True, check_offline=True)
def transfer_artifact_version(self, version: str, to_project: str):
"""Transfers the artifact version to a project under the same owner/organization.
[Run API](/docs/api/#operation/TransferVersion)
Args:
version: str, required, the version name/tag.
to_project: str, required, the destination project to transfer the version to.
"""
return self.transfer_version(
kind=V1ProjectVersionKind.ARTIFACT,
version=version,
to_project=to_project,
)
@client_handler(check_no_op=True, check_offline=True)
def copy_version(
self,
kind: V1ProjectVersionKind,
version: str,
to_project: str = None,
name: str = None,
description: str = None,
tags: Union[str, List[str]] = None,
content: Union[str, Dict] = None,
force: bool = False,
) -> polyaxon_sdk.V1ProjectVersion:
"""Copies the version to the same project or to a destination project.
If `to_project` is provided,
the version will be copied to the destination project under the same owner/organization.
If `name` is provided the version will be copied with the new name,
otherwise the copied version will be have a suffix `-copy`.
This is a generic function based on the kind passed and copies a:
* component version
* model version
* artifact version
Args:
kind: V1ProjectVersionKind, kind of the project version.
version: str, required, the version name/tag.
to_project: str, optional, the destination project to copy the version to.
name: str, optional, the name to use for registering the copied version,
default value is the original version's name with `-copy` prefix.
description: str, optional, the version description,
default value is the original version's description.
tags: str or List[str], optional, the version description,
default value is the original version's description.
content: str or dict, optional, content/metadata (JSON object) of the version,
default value is the original version's content.
force: bool, optional, to force push, i.e. update if exists.
"""
original_version = self.get_version(kind, version)
version = name if name else "{}-copy".format(version)
return ProjectClient(
owner=self.owner,
project=to_project or self.project,
client=self.client,
).register_version(
kind=kind,
version=version,
description=description or original_version.description,
tags=tags or original_version.tags,
content=content or original_version.content,
run=original_version.run,
connection=original_version.connection,
artifacts=original_version.artifacts,
force=force,
)
@client_handler(check_no_op=True, check_offline=True)
def copy_component_version(
self,
version: str,
to_project: str = None,
name: str = None,
description: str = None,
tags: Union[str, List[str]] = None,
content: Union[str, Dict] = None,
force: bool = False,
) -> polyaxon_sdk.V1ProjectVersion:
"""Copies the component version to the same project or to a destination project.
If `to_project` is provided,
the version will be copied to the destination project under the same owner/organization.
If `name` is provided the version will be copied with the new name,
otherwise the copied version will be have a suffix `-copy`.
Args:
version: str, required, the version name/tag.
to_project: str, optional, the destination project to copy the version to.
name: str, optional, the name to use for registering the copied version,
default value is the original version's name with `-copy` prefix.
description: str, optional, the version description,
default value is the original version's description.
tags: str or List[str], optional, the version description,
default value is the original version's description.
content: str or dict, optional, content/metadata (JSON object) of the version,
default value is the original version's content.
force: bool, optional, to force push, i.e. update if exists.
"""
return self.copy_version(
kind=V1ProjectVersionKind.COMPONENT,
version=version,
to_project=to_project,
name=name,
description=description,
tags=tags,
content=content,
force=force,
)
@client_handler(check_no_op=True, check_offline=True)
def copy_model_version(
self,
version: str,
to_project: str = None,
name: str = None,
description: str = None,
tags: Union[str, List[str]] = None,
content: Union[str, Dict] = None,
force: bool = False,
) -> polyaxon_sdk.V1ProjectVersion:
"""Copies the model version to the same project or to a destination project.
If `to_project` is provided,
the version will be copied to the destination project under the same owner/organization.
If `name` is provided the version will be copied with the new name,
otherwise the copied version will be have a suffix `-copy`.
Args:
version: str, required, the version name/tag.
to_project: str, optional, the destination project to copy the version to.
name: str, optional, the name to use for registering the copied version,
default value is the original version's name with `-copy` prefix.
description: str, optional, the version description,
default value is the original version's description.
tags: str or List[str], optional, the version description,
default value is the original version's description.
content: str or dict, optional, content/metadata (JSON object) of the version,
default value is the original version's content.
force: bool, optional, to force push, i.e. update if exists.
"""
return self.copy_version(
kind=V1ProjectVersionKind.MODEL,
version=version,
to_project=to_project,
name=name,
description=description,
tags=tags,
content=content,
force=force,
)
@client_handler(check_no_op=True, check_offline=True)
def copy_artifact_version(
self,
version: str,
to_project: str = None,
name: str = None,
description: str = None,
tags: Union[str, List[str]] = None,
content: Union[str, Dict] = None,
force: bool = False,
) -> polyaxon_sdk.V1ProjectVersion:
"""Copies the artifact version to the same project or to a destination project.
If `to_project` is provided,
the version will be copied to the destination project under the same owner/organization.
If `name` is provided the version will be copied with the new name,
otherwise the copied version will be have a suffix `-copy`.
Args:
version: str, required, the version name/tag.
to_project: str, optional, the destination project to copy the version to.
name: str, optional, the name to use for registering the copied version,
default value is the original version's name with `-copy` prefix.
description: str, optional, the version description,
default value is the original version's description.
tags: str or List[str], optional, the version description,
default value is the original version's description.
content: str or dict, optional, content/metadata (JSON object) of the version,
default value is the original version's content.
force: bool, optional, to force push, i.e. update if exists.
"""
return self.copy_version(
kind=V1ProjectVersionKind.ARTIFACT,
version=version,
to_project=to_project,
name=name,
description=description,
tags=tags,
content=content,
force=force,
)
@client_handler(check_no_op=True)
def persist_version(self, config: polyaxon_sdk.V1ProjectVersion, path: str):
"""Persists a version to a local path.
Args:
config: V1ProjectVersion, the version config to persist.
path: str, the path where to persist the version config.
"""
if not config:
logger.debug(
"Persist offline run call failed. "
"Make sure that the offline mode is enabled and that run_data is provided."
)
return
if not path or not os.path.exists(path):
check_or_create_path(path, is_dir=True)
version_path = "{}/{}".format(path, ctx_paths.CONTEXT_LOCAL_VERSION)
with open(version_path, "w") as config_file:
config_file.write(
ujson.dumps(self.client.sanitize_for_serialization(config))
)
if not config.content:
return
if config.kind == V1ProjectVersionKind.COMPONENT:
version_path = "{}/{}".format(path, ctx_paths.CONTEXT_LOCAL_POLYAXONFILE)
else:
# Persist content metadata as content.json file
version_path = "{}/{}".format(path, ctx_paths.CONTEXT_LOCAL_CONTENT)
with open(version_path, "w") as config_file:
config_file.write(config.content)
@client_handler(check_no_op=True, check_offline=True)
def download_artifacts_for_version(
self, config: polyaxon_sdk.V1ProjectVersion, path: str
):
"""Collects and downloads all artifacts and assets linked to a version.
Args:
config: V1ProjectVersion, the version config to download the artifacts for.
path: str, the path where to persist the artifacts and assets.
"""
if config.kind not in {
V1ProjectVersionKind.MODEL,
V1ProjectVersionKind.ARTIFACT,
}:
logger.info(
"Skip artifacts download for version {} with kind {}.".format(
config.name, config.kind
)
)
return
meta_info = config.meta_info or {}
run_info = meta_info.get("run", {})
if not run_info:
logger.info(
"Skip artifacts download for version {} with kind {}. "
"The version is not linked to any run.".format(config.name, config.kind)
)
return
run_artifacts = [
V1RunArtifact.from_dict(a, unknown=EXCLUDE)
for a in meta_info.get("lineage", [])
]
if not run_artifacts:
logger.info(
"Skip artifacts download for version {} with kind {}. "
"The version is not linked to any artifacts.".format(
config.name, config.kind
)
)
return
run_project = run_info.get("project", self.project)
run_uuid = run_info.get("uuid", config.run)
from polyaxon.client.run import RunClient
# Creating run client to download artifacts
run_client = RunClient(owner=self.owner, project=run_project, run_uuid=run_uuid)
for artifact_lineage in run_artifacts:
logger.info(
"Downloading artifact {} with kind {} and remote path {} to {}".format(
artifact_lineage.name,
artifact_lineage.kind,
artifact_lineage.path,
path,
)
)
run_client.download_artifact_for_lineage(
lineage=artifact_lineage, path_to=path
)
@client_handler(check_no_op=True, check_offline=True)
def pull_version(
self,
kind: V1ProjectVersionKind,
version: str,
path: str,
download_artifacts: bool = True,
):
"""Packages and downloads the version to a local path.
This is a generic function based on the kind passed and pulls a:
* component version
* model version
* artifact version
Args:
kind: V1ProjectVersionKind, kind of the project version.
version: str, required, the version name/tag.
path: str, optional, defaults to the offline root path,
path where to persist the metadata and artifacts.
download_artifacts: bool, optional, to download the artifacts based on linked lineage.
"""
path = ctx_paths.get_offline_path(
entity_value=version, entity_kind=kind, path=path
)
delete_path(path)
config = self.get_version(kind=kind, version=version)
self.persist_version(config=config, path=path)
if download_artifacts:
self.download_artifacts_for_version(config=config, path=path)
return path
@client_handler(check_no_op=True, check_offline=True)
def pull_component_version(
self,
version: str,
path: str,
):
"""Packages and downloads the component version to a local path.
Args:
version: str, required, the version name/tag.
path: str, local path where to persist the metadata and artifacts.
"""
return self.pull_version(
kind=V1ProjectVersionKind.COMPONENT,
version=version,
path=path,
download_artifacts=False,
)
@client_handler(check_no_op=True, check_offline=True)
def pull_model_version(
self,
version: str,
path: str,
download_artifacts: bool = True,
):
"""Packages and downloads the model version to a local path.
Args:
version: str, required, the version name/tag.
path: str, local path where to persist the metadata and artifacts.
download_artifacts: bool, optional, to download the artifacts based on linked lineage.
"""
return self.pull_version(
kind=V1ProjectVersionKind.MODEL,
version=version,
path=path,
download_artifacts=download_artifacts,
)
@client_handler(check_no_op=True, check_offline=True)
def pull_artifact_version(
self,
version: str,
path: str,
download_artifacts: bool = True,
):
"""Packages and downloads the artifact version to a local path.
Args:
version: str, required, the version name/tag.
path: str, local path where to persist the metadata and artifacts.
download_artifacts: bool, optional, to download the artifacts based on linked lineage.
"""
return self.pull_version(
kind=V1ProjectVersionKind.ARTIFACT,
version=version,
path=path,
download_artifacts=download_artifacts,
)
| 36.342622
| 98
| 0.614011
| 6,390
| 57,385
| 5.39687
| 0.056025
| 0.023198
| 0.027663
| 0.030737
| 0.802529
| 0.775909
| 0.749667
| 0.707707
| 0.682596
| 0.661979
| 0
| 0.00463
| 0.307467
| 57,385
| 1,578
| 99
| 36.365653
| 0.863139
| 0.404966
| 0
| 0.621324
| 0
| 0
| 0.026058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071078
| false
| 0.001225
| 0.026961
| 0.003676
| 0.17402
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b681878e058048f54d7a087ac45cdcd89032e3fe
| 235
|
py
|
Python
|
elf/types/base/bitmask/__init__.py
|
Valmarelox/elftoolsng
|
99c3f4913a7e477007b1d81df83274d7657bf693
|
[
"MIT"
] | null | null | null |
elf/types/base/bitmask/__init__.py
|
Valmarelox/elftoolsng
|
99c3f4913a7e477007b1d81df83274d7657bf693
|
[
"MIT"
] | null | null | null |
elf/types/base/bitmask/__init__.py
|
Valmarelox/elftoolsng
|
99c3f4913a7e477007b1d81df83274d7657bf693
|
[
"MIT"
] | null | null | null |
from .elf_int_8_bitmask import ElfInt8BitMask
from .elf_int_16_bitmask import ElfInt16BitMask
from .elf_int_32_bitmask import ElfInt32BitMask
from .elf_int_64_bitmask import ElfInt64BitMask
from .elf_int_n_bitmask import ElfIntNBitMask
| 47
| 47
| 0.897872
| 35
| 235
| 5.6
| 0.428571
| 0.178571
| 0.255102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064815
| 0.080851
| 235
| 5
| 48
| 47
| 0.842593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fcb188a5bd49f61c5a28dd70a26db2b4f5516bc4
| 181
|
py
|
Python
|
tests/unit_tests.py
|
bmgxyz/passport
|
708ac0805445de7a414c698fd9398aa336fa4d05
|
[
"Unlicense"
] | 1
|
2018-04-16T23:45:41.000Z
|
2018-04-16T23:45:41.000Z
|
tests/unit_tests.py
|
bmgxyz/passport
|
708ac0805445de7a414c698fd9398aa336fa4d05
|
[
"Unlicense"
] | null | null | null |
tests/unit_tests.py
|
bmgxyz/passport
|
708ac0805445de7a414c698fd9398aa336fa4d05
|
[
"Unlicense"
] | 1
|
2018-04-16T23:45:52.000Z
|
2018-04-16T23:45:52.000Z
|
import unittest
from get_key import GetKey
from encrypt_and_write import EncryptAndWrite
from read_and_decrypt import ReadAndDecrypt
if __name__ == '__main__':
unittest.main()
| 22.625
| 45
| 0.823204
| 24
| 181
| 5.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132597
| 181
| 7
| 46
| 25.857143
| 0.866242
| 0
| 0
| 0
| 0
| 0
| 0.044199
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fcd6135249e75ebce7f7a72f26fd53083cc883f6
| 289
|
py
|
Python
|
Yakisizwe/views.py
|
NAL0/nalbt
|
c411ead60fac8923e960e67f4bbad5c7aeffc614
|
[
"MIT"
] | null | null | null |
Yakisizwe/views.py
|
NAL0/nalbt
|
c411ead60fac8923e960e67f4bbad5c7aeffc614
|
[
"MIT"
] | null | null | null |
Yakisizwe/views.py
|
NAL0/nalbt
|
c411ead60fac8923e960e67f4bbad5c7aeffc614
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'Yakisizwe/initiative.html')
def index2(request):
return render(request, 'Yakisizwe/dash.html')
def index3(request):
return render(request, 'Yakisizwe/Education.html')
| 22.230769
| 55
| 0.747405
| 36
| 289
| 6
| 0.555556
| 0.180556
| 0.263889
| 0.361111
| 0.486111
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008032
| 0.138408
| 289
| 12
| 56
| 24.083333
| 0.859438
| 0.079585
| 0
| 0
| 0
| 0
| 0.257576
| 0.185606
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
fcdddc826a06124ad6fc2cfde912e62a58a4be9a
| 70
|
py
|
Python
|
tests/conftest.py
|
mlasch/scikit-build
|
664dd9c41cc54047d6d648b0466d525573da5a94
|
[
"MIT"
] | 299
|
2015-10-19T22:45:08.000Z
|
2022-03-30T21:15:55.000Z
|
tests/conftest.py
|
mlasch/scikit-build
|
664dd9c41cc54047d6d648b0466d525573da5a94
|
[
"MIT"
] | 588
|
2015-09-17T04:26:59.000Z
|
2022-03-29T14:51:54.000Z
|
tests/conftest.py
|
mlasch/scikit-build
|
664dd9c41cc54047d6d648b0466d525573da5a94
|
[
"MIT"
] | 102
|
2015-10-19T22:45:13.000Z
|
2022-03-20T21:09:08.000Z
|
import pytest
pytest.register_assert_rewrite('tests.pytest_helpers')
| 17.5
| 54
| 0.857143
| 9
| 70
| 6.333333
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 70
| 3
| 55
| 23.333333
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1e033e28d7cfb58bd742272737d742d887182a2d
| 5,747
|
py
|
Python
|
notebooks/prediction_exploration.py
|
TuomoKareoja/phone-sentiment-analysis
|
ed0739b6f25fd1b9bd813939b129f01902faa5c4
|
[
"MIT"
] | null | null | null |
notebooks/prediction_exploration.py
|
TuomoKareoja/phone-sentiment-analysis
|
ed0739b6f25fd1b9bd813939b129f01902faa5c4
|
[
"MIT"
] | null | null | null |
notebooks/prediction_exploration.py
|
TuomoKareoja/phone-sentiment-analysis
|
ed0739b6f25fd1b9bd813939b129f01902faa5c4
|
[
"MIT"
] | null | null | null |
#%%
import re
import os
import pprint
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from dotenv import find_dotenv, load_dotenv
from IPython.core.interactiveshell import InteractiveShell
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
# Setting styles
pp = pprint.PrettyPrinter(indent=4)
InteractiveShell.ast_node_interactivity = "all"
sns.set(style="whitegrid", color_codes=True, rc={"figure.figsize": (12.7, 9.27)})
#%%
data = pd.read_csv(os.path.join("data", "predictions", "predictions.csv"))
#%%
sns.countplot(x="iphone", data=data)
plt.title("Number of Times IPhone Mentioned")
plt.xlabel("Number of Mentions")
plt.show()
sns.countplot(x="samsunggalaxy", data=data)
plt.title("Number of Times Galaxy Mentioned")
plt.xlabel("Number of Mentions")
plt.show()
# %%
sns.scatterplot(
x="random_forest_iphone",
y="iphone",
alpha=0.2,
data=data[data.iphone < 100],
s=100,
label="iPhone",
)
sns.scatterplot(
x="random_forest_galaxy",
y="samsunggalaxy",
alpha=0.2,
data=data,
s=100,
label="Samsung Galaxy",
)
plt.xlabel("Positivity of Sentiment")
plt.ylabel("Number of Mentions")
plt.legend()
plt.show()
# %%
sns.scatterplot(
x="random_forest_iphone",
y="iphone",
alpha=0.2,
data=data[(data.iphone > 0) & (data.iphone < 100)],
s=100,
label="iPhone",
)
sns.scatterplot(
x="random_forest_galaxy",
y="samsunggalaxy",
alpha=0.2,
data=data[data.samsunggalaxy > 0],
s=100,
label="Samsung Galaxy",
)
plt.xlabel("Positivity of Sentiment")
plt.ylabel("Number of Mentions")
plt.legend()
plt.show()
#%%
sns.scatterplot(
x="random_forest_iphone",
y="iphone",
alpha=0.2,
data=data[
(data.url.str.contains("iphone")) & (data.iphone > 1) & (data.iphone < 100)
],
s=100,
label="iPhone",
)
sns.scatterplot(
x="random_forest_galaxy",
y="samsunggalaxy",
alpha=0.2,
data=data[(data.url.str.contains("galaxy")) & (data.samsunggalaxy > 1)],
s=100,
label="Samsung Galaxy",
)
plt.xlabel("Positivity of Sentiment")
plt.ylabel("Number of Mentions")
plt.legend()
plt.show()
pp.pprint(data[(data.url.str.contains("iphone"))].url.head(20))
pp.pprint(data[(data.url.str.contains("galaxy"))].url.head(20))
# %%
stopwords = set(STOPWORDS)
stopwords.update(
["html", "www", "https", "http", "wordpress", "www", "amp", "tag", "com", "net"]
)
def getWordsFromURL(url):
return re.compile(r"[\:/?=\-&]+", re.UNICODE).split(url)
text = " ".join(
" ".join(url)
for url in data[(data.iphone == 0) & (data.random_forest_iphone == 0)].url.apply(
getWordsFromURL
)
)
wordcloud = WordCloud(stopwords=stopwords).generate(text)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
text = " ".join(
" ".join(url) for url in data[(data.iphone == 1)].url.apply(getWordsFromURL)
)
wordcloud = WordCloud(stopwords=stopwords).generate(text)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
text = " ".join(
" ".join(url) for url in data[(data.iphone >= 10)].url.apply(getWordsFromURL)
)
wordcloud = WordCloud(stopwords=stopwords).generate(text)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
text = " ".join(
" ".join(url)
for url in data[
(data.samsunggalaxy == 0) & (data.random_forest_galaxy >= 4)
].url.apply(getWordsFromURL)
)
wordcloud = WordCloud(stopwords=stopwords).generate(text)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
text = " ".join(
" ".join(url) for url in data[(data.samsunggalaxy == 1)].url.apply(getWordsFromURL)
)
wordcloud = WordCloud(stopwords=stopwords).generate(text)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
text = " ".join(
" ".join(url) for url in data[(data.samsunggalaxy >= 10)].url.apply(getWordsFromURL)
)
wordcloud = WordCloud(stopwords=stopwords).generate(text)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# %%
sns.pairplot(
data[data.iphone < 100][
["iphone", "samsunggalaxy", "random_forest_iphone", "random_forest_galaxy"]
].sample(n=5000)
)
# plt.xlabel("Positivity of Sentiment")
# plt.ylabel("Number of Mentions")
# plt.legend()
plt.show()
# %%
sns.distplot(
data.random_forest_galaxy,
bins=50,
kde=False,
hist=True,
norm_hist=True,
label="Samsung Galaxy",
)
sns.distplot(
data.random_forest_iphone,
bins=50,
kde=False,
hist=True,
norm_hist=True,
label="iPhone",
)
plt.title("Phone Sentiment Distribution")
plt.xlabel("Positivity of Sentiment")
plt.ylabel("")
plt.legend()
plt.show()
# %%
sns.distplot(
data[data.samsunggalaxy > 0].random_forest_galaxy,
bins=50,
kde=False,
hist=True,
norm_hist=True,
label="Samsung Galaxy",
)
sns.distplot(
data[data.iphone > 0].random_forest_iphone,
bins=50,
kde=False,
hist=True,
norm_hist=True,
label="iPhone",
)
plt.title("Phone Sentiment Distribution (Phones Mentioned at Least Once)")
plt.xlabel("Positivity of Sentiment")
plt.ylabel("")
plt.legend()
plt.show()
# %%
sns.distplot(
data[data.samsunggalaxy == 0].random_forest_galaxy,
bins=50,
kde=False,
hist=True,
norm_hist=True,
label="Samsung Galaxy",
)
sns.distplot(
data[data.iphone == 0].random_forest_iphone,
bins=50,
kde=False,
hist=True,
norm_hist=True,
label="iPhone",
)
plt.title("Phone Sentiment Distribution (Phones Mentioned at Least Once)")
plt.xlabel("Positivity of Sentiment")
plt.ylabel("")
plt.legend()
plt.show()
#%%
data[data.iphone > 100].url.head()
# %%
plt.show()
# %%
| 20.525
| 88
| 0.670089
| 751
| 5,747
| 5.069241
| 0.177097
| 0.056738
| 0.033097
| 0.038613
| 0.773575
| 0.771999
| 0.765695
| 0.731547
| 0.731547
| 0.703704
| 0
| 0.018591
| 0.157647
| 5,747
| 279
| 89
| 20.598566
| 0.767817
| 0.023665
| 0
| 0.641509
| 0
| 0
| 0.183444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004717
| false
| 0
| 0.051887
| 0.004717
| 0.061321
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1e4250b68aa97b862431161c706da912c5f0079e
| 64
|
py
|
Python
|
home/ts/__init__.py
|
d0ugal/home
|
e984716ae6c74dc8e40346584668ac5cfeaaf520
|
[
"BSD-3-Clause"
] | 1
|
2018-10-25T08:34:54.000Z
|
2018-10-25T08:34:54.000Z
|
home/ts/__init__.py
|
d0ugal/home
|
e984716ae6c74dc8e40346584668ac5cfeaaf520
|
[
"BSD-3-Clause"
] | null | null | null |
home/ts/__init__.py
|
d0ugal/home
|
e984716ae6c74dc8e40346584668ac5cfeaaf520
|
[
"BSD-3-Clause"
] | null | null | null |
"""
home.ts.__init__
================
Nothing to see here!
"""
| 9.142857
| 20
| 0.46875
| 7
| 64
| 3.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 64
| 6
| 21
| 10.666667
| 0.472727
| 0.859375
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1e4e8a84ea15ffd59ae2a3f4b34c480215fe587b
| 6,850
|
py
|
Python
|
bench/pyscripts/testferaxis.py
|
josborne-noaa/PyFerret
|
8496508e9902c0184898522e9f89f6caea6d4539
|
[
"Unlicense"
] | 44
|
2016-03-18T22:05:31.000Z
|
2021-12-23T01:50:09.000Z
|
bench/pyscripts/testferaxis.py
|
josborne-noaa/PyFerret
|
8496508e9902c0184898522e9f89f6caea6d4539
|
[
"Unlicense"
] | 88
|
2016-08-19T08:05:37.000Z
|
2022-03-28T23:29:21.000Z
|
bench/pyscripts/testferaxis.py
|
josborne-noaa/PyFerret
|
8496508e9902c0184898522e9f89f6caea6d4539
|
[
"Unlicense"
] | 24
|
2016-02-07T18:12:06.000Z
|
2022-02-19T09:06:17.000Z
|
# To be run in python after importing and starting pyferret
# such as from running "pyferret -python"
from __future__ import print_function
import numpy
import sys ; sys.ps1 = '' ; sys.ps2 = ''
print()
print(">>> normax = pyferret.FerAxis()")
normax = pyferret.FerAxis()
print(">>> print repr(normax)")
print(repr(normax))
print(">>> dir(normax)")
dir(normax)
print(">>> coads = pyferret.FerDSet('coads_climatology')")
coads = pyferret.FerDSet('coads_climatology')
print(">>> coads.sst.load()")
coads.sst.load()
print(">>> sstaxes = coads.sst.grid.axes")
sstaxes = coads.sst.grid.axes
print(">>> print repr(sstaxes)")
print(repr(sstaxes))
print(">>> normax == sstaxes[0]")
normax == sstaxes[0]
print(">>> normax != sstaxes[1]")
normax != sstaxes[1]
print(">>> normax == sstaxes[2]")
normax == sstaxes[2]
print(">>> normax != sstaxes[3]")
normax != sstaxes[3]
print(">>> print repr(sstaxes[0].axtype)")
print(repr(sstaxes[0].axtype))
print(">>> print repr(sstaxes[0].coords)")
print(repr(sstaxes[0].coords))
print(">>> print repr(sstaxes[0].unit)")
print(repr(sstaxes[0].unit))
print(">>> print repr(sstaxes[0].name)")
print(repr(sstaxes[0].name))
print(">>> print repr(sstaxes[1].axtype)")
print(repr(sstaxes[1].axtype))
print(">>> print repr(sstaxes[1].coords)")
print(repr(sstaxes[1].coords))
print(">>> print repr(sstaxes[1].unit)")
print(repr(sstaxes[1].unit))
print(">>> print repr(sstaxes[1].name)")
print(repr(sstaxes[1].name))
print(">>> print repr(sstaxes[2].axtype)")
print(repr(sstaxes[2].axtype))
print(">>> print repr(sstaxes[2].coords)")
print(repr(sstaxes[2].coords))
print(">>> print repr(sstaxes[2].unit)")
print(repr(sstaxes[2].unit))
print(">>> print repr(sstaxes[2].name)")
print(repr(sstaxes[2].name))
print(">>> print repr(sstaxes[3].axtype)")
print(repr(sstaxes[3].axtype))
print(">>> print repr(sstaxes[3].coords)")
print(repr(sstaxes[3].coords))
print(">>> print repr(sstaxes[3].unit)")
print(repr(sstaxes[3].unit))
print(">>> print repr(sstaxes[3].name)")
print(repr(sstaxes[3].name))
print(">>> dupaxis = sstaxes[0].copy()")
dupaxis = sstaxes[0].copy()
print(">>> dupaxis is sstaxes[0]")
dupaxis is sstaxes[0]
print(">>> dupaxis == sstaxes[0]")
dupaxis == sstaxes[0]
print(">>> dupaxis.coords is sstaxes[0].coords")
dupaxis.coords is sstaxes[0].coords
print(">>> numpy.allclose(dupaxis.coords, sstaxes[0].coords)")
numpy.allclose(dupaxis.coords, sstaxes[0].coords)
print(">>> dupaxis = sstaxes[3].copy()")
dupaxis = sstaxes[3].copy()
print(">>> dupaxis is sstaxes[3]")
dupaxis is sstaxes[3]
print(">>> dupaxis == sstaxes[3]")
dupaxis == sstaxes[3]
print(">>> dupaxis.coords is sstaxes[3].coords")
dupaxis.coords is sstaxes[3].coords
print(">>> numpy.allclose(dupaxis.coords, sstaxes[3].coords)")
numpy.allclose(dupaxis.coords, sstaxes[3].coords)
print(">>> print repr(pyferret.FerAxis._parsegeoval(None))")
print(repr(pyferret.FerAxis._parsegeoval(None)))
print(">>> print repr(pyferret.FerAxis._parsegeoval(0))")
print(repr(pyferret.FerAxis._parsegeoval(0)))
print(">>> print repr(pyferret.FerAxis._parsegeoval(0.0))")
print(repr(pyferret.FerAxis._parsegeoval(0.0)))
print(">>> print repr(pyferret.FerAxis._parsegeoval('0'))")
print(repr(pyferret.FerAxis._parsegeoval('0')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('5E'))")
print(repr(pyferret.FerAxis._parsegeoval('5E')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('6W'))")
print(repr(pyferret.FerAxis._parsegeoval('6W')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('7N'))")
print(repr(pyferret.FerAxis._parsegeoval('7N')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('8S'))")
print(repr(pyferret.FerAxis._parsegeoval('8S')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('9m'))")
print(repr(pyferret.FerAxis._parsegeoval('9m')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('03-APR-2005 06:07:08'))")
print(repr(pyferret.FerAxis._parsegeoval('03-APR-2005 06:07:08')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('03-APR-2005 06:07'))")
print(repr(pyferret.FerAxis._parsegeoval('03-APR-2005 06:07')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('03-APR-2005'))")
print(repr(pyferret.FerAxis._parsegeoval('03-APR-2005')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('2003-04-05T06:07:08'))")
print(repr(pyferret.FerAxis._parsegeoval('2003-04-05T06:07:08')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('2003-04-05T06:07'))")
print(repr(pyferret.FerAxis._parsegeoval('2003-04-05T06:07')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('2003-04-05 06:07:08'))")
print(repr(pyferret.FerAxis._parsegeoval('2003-04-05 06:07:08')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('2003-04-05 06:07'))")
print(repr(pyferret.FerAxis._parsegeoval('2003-04-05 06:07')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('2003-04-05'))")
print(repr(pyferret.FerAxis._parsegeoval('2003-04-05')))
print(">>> print repr(pyferret.FerAxis._parsegeoval('4y', istimestep=True))")
print(repr(pyferret.FerAxis._parsegeoval('4y', istimestep=True)))
print(">>> print repr(pyferret.FerAxis._parsegeoval('6d', istimestep=True))")
print(repr(pyferret.FerAxis._parsegeoval('6d', istimestep=True)))
print(">>> print repr(pyferret.FerAxis._parsegeoval('7h', istimestep=True))")
print(repr(pyferret.FerAxis._parsegeoval('7h', istimestep=True)))
print(">>> print repr(pyferret.FerAxis._parsegeoval('8m', istimestep=True))")
print(repr(pyferret.FerAxis._parsegeoval('8m', istimestep=True)))
print(">>> print repr(pyferret.FerAxis._parsegeoval('9s', istimestep=True))")
print(repr(pyferret.FerAxis._parsegeoval('9s', istimestep=True)))
print(">>> print repr(pyferret.FerAxis._parsegeoval('1', istimestep=True))")
print(repr(pyferret.FerAxis._parsegeoval('1', istimestep=True)))
print(">>> print repr(pyferret.FerAxis._parsegeoslice( slice(5,23,2) ))")
print(repr(pyferret.FerAxis._parsegeoslice( slice(5,23,2) )))
print(">>> print repr(pyferret.FerAxis._parsegeoslice( slice(-5.0,15.0,4.0) ))")
print(repr(pyferret.FerAxis._parsegeoslice( slice(-5.0,15.0,4.0) )))
print(">>> print repr(pyferret.FerAxis._parsegeoslice( slice('-6','11','5') ))")
print(repr(pyferret.FerAxis._parsegeoslice( slice('-6','11','5') )))
print(">>> print repr(pyferret.FerAxis._parsegeoslice( slice('25W','35E',5) ))")
print(repr(pyferret.FerAxis._parsegeoslice( slice('25W','35E',5) )))
print(">>> print repr(pyferret.FerAxis._parsegeoslice( slice('15S','30N',3) ))")
print(repr(pyferret.FerAxis._parsegeoslice( slice('15S','30N',3) )))
print(">>> print repr(pyferret.FerAxis._parsegeoslice( slice('-900m','-100m','50m') ))")
print(repr(pyferret.FerAxis._parsegeoslice( slice('-900m','-100m','50m') )))
print(">>> print repr(pyferret.FerAxis._parsegeoslice( slice('03-APR-2005 11:30','23-JUL-2006 23:30','12h') ))")
print(repr(pyferret.FerAxis._parsegeoslice( slice('03-APR-2005 11:30','23-JUL-2006 23:30','12h') )))
| 43.910256
| 112
| 0.710511
| 933
| 6,850
| 5.144695
| 0.095391
| 0.18
| 0.2125
| 0.3
| 0.867292
| 0.829167
| 0.643125
| 0.515
| 0.506875
| 0.345208
| 0
| 0.058741
| 0.060584
| 6,850
| 155
| 113
| 44.193548
| 0.687179
| 0.014161
| 0
| 0
| 0
| 0.028986
| 0.487776
| 0.257371
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021739
| 0
| 0.021739
| 0.847826
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1e708bb5283f22fe4fd8a2c3e5621b199ed8b72f
| 1,055
|
py
|
Python
|
lambdata_johanaluna/tryme_test.py
|
johanaluna/lambdata
|
342ffd027de3a7a68ce52164df568f502b65d77f
|
[
"MIT"
] | null | null | null |
lambdata_johanaluna/tryme_test.py
|
johanaluna/lambdata
|
342ffd027de3a7a68ce52164df568f502b65d77f
|
[
"MIT"
] | 4
|
2020-03-24T17:49:32.000Z
|
2021-06-02T00:34:44.000Z
|
lambdata_johanaluna/tryme_test.py
|
johanaluna/lambdata
|
342ffd027de3a7a68ce52164df568f502b65d77f
|
[
"MIT"
] | null | null | null |
import unittest
import pandas as pd
from tryme2 import Check_Data
class Check_Data_test(unittest.TestCase):
def test_nulls(self):
listdata = [['tom', 10,0], ['nick', 15,1],
['juli', 14,1],['sebastian', 10,0],['dfs', 10,0],
['isa', 34,1],['lucy', 15,0]]
data = pd.DataFrame(listdata, columns = ['Name', 'Age','Sex'])
target='Sex'
nulls_out=data.isnull().sum().sort_values(ascending=False)
tryme2_go= Check_Data(data,target)
self.assertIsNotNone(nulls_out,tryme2_go.reportnulls())
# def test_split(self):
# listdata = [['tom', 10,0], ['nick', 15,1],
# ['juli', 14,1],['sebastian', 10,0],['dfs', 10,0],
# ['isa', 34,1],['lucy', 15,0]]
# data = pd.DataFrame(listdata, columns = ['Name', 'Age','Sex'])
# target='Sex'
# nulls_out=data.isnull().sum().sort_values(ascending=False)
# tryme2_go= Check_Data(data,target)
# self.assertIsNotNone(nulls_out,tryme2_go.reportnulls())
if __name__ == '__main__':
unittest.main()
| 36.37931
| 72
| 0.587678
| 138
| 1,055
| 4.311594
| 0.347826
| 0.030252
| 0.05042
| 0.057143
| 0.773109
| 0.773109
| 0.773109
| 0.773109
| 0.773109
| 0.773109
| 0
| 0.056558
| 0.212322
| 1,055
| 28
| 73
| 37.678571
| 0.659446
| 0.381043
| 0
| 0
| 0
| 0
| 0.079316
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1ea0ef78bc59d233acff3239f3423830a799ffff
| 219
|
py
|
Python
|
src/airfly/_vendor/airflow/contrib/operators/adls_to_gcs.py
|
ryanchao2012/airfly
|
230ddd88885defc67485fa0c51f66c4a67ae98a9
|
[
"MIT"
] | 7
|
2021-09-27T11:38:48.000Z
|
2022-02-01T06:06:24.000Z
|
src/airfly/_vendor/airflow/contrib/operators/adls_to_gcs.py
|
ryanchao2012/airfly
|
230ddd88885defc67485fa0c51f66c4a67ae98a9
|
[
"MIT"
] | null | null | null |
src/airfly/_vendor/airflow/contrib/operators/adls_to_gcs.py
|
ryanchao2012/airfly
|
230ddd88885defc67485fa0c51f66c4a67ae98a9
|
[
"MIT"
] | null | null | null |
# Auto generated by 'inv collect-airflow'
from airfly._vendor.airflow.providers.google.cloud.transfers.adls_to_gcs import (
ADLSToGCSOperator,
)
class AdlsToGoogleCloudStorageOperator(ADLSToGCSOperator):
pass
| 24.333333
| 81
| 0.808219
| 23
| 219
| 7.565217
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114155
| 219
| 8
| 82
| 27.375
| 0.896907
| 0.178082
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.2
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
1ea881107329ce2c07d0951d9ffcea8c0ff1a260
| 44
|
py
|
Python
|
libreverse/__init__.py
|
TheAssassin/3d.models
|
682766f96a04f005946feda73ddf33afa0fb3f9b
|
[
"MIT"
] | 1
|
2020-07-17T11:01:13.000Z
|
2020-07-17T11:01:13.000Z
|
libreverse/__init__.py
|
TheAssassin/3d.models
|
682766f96a04f005946feda73ddf33afa0fb3f9b
|
[
"MIT"
] | null | null | null |
libreverse/__init__.py
|
TheAssassin/3d.models
|
682766f96a04f005946feda73ddf33afa0fb3f9b
|
[
"MIT"
] | null | null | null |
from .app_factory import create_app # noqa
| 22
| 43
| 0.795455
| 7
| 44
| 4.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 44
| 1
| 44
| 44
| 0.891892
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1eadadbcd38af33a7dd7484ee7670b9a6409db4b
| 494
|
py
|
Python
|
everest/ptolemaic/datalike/secondary/functional/__init__.py
|
rsbyrne/everest
|
1ec06301cdeb7c2b7d85daf6075d996c5529247e
|
[
"MIT"
] | 2
|
2020-12-17T02:27:28.000Z
|
2020-12-17T23:50:13.000Z
|
everest/ptolemaic/datalike/secondary/functional/__init__.py
|
rsbyrne/everest
|
1ec06301cdeb7c2b7d85daf6075d996c5529247e
|
[
"MIT"
] | 1
|
2020-12-07T10:14:45.000Z
|
2020-12-07T10:14:45.000Z
|
everest/ptolemaic/datalike/secondary/functional/__init__.py
|
rsbyrne/everest
|
1ec06301cdeb7c2b7d85daf6075d996c5529247e
|
[
"MIT"
] | 1
|
2020-10-22T11:16:50.000Z
|
2020-10-22T11:16:50.000Z
|
###############################################################################
''''''
###############################################################################
from .. import _classtools, _ur
from .. import Secondary as _Secondary
from ._functional import Functional
from .operation import *
# from .applicator import *
###############################################################################
###############################################################################
| 27.444444
| 79
| 0.253036
| 19
| 494
| 6.368421
| 0.473684
| 0.165289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072874
| 494
| 17
| 80
| 29.058824
| 0.264192
| 0.050607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1eb1fc015085cefe3a91f70451f5a7e14ea6b9e1
| 31
|
py
|
Python
|
openhgnn/auto/__init__.py
|
guyuisland/OpenHGNN
|
ab25b83431fed760136e122b442ca4470eb9522c
|
[
"Apache-2.0"
] | 235
|
2021-05-31T09:25:31.000Z
|
2022-03-30T23:20:10.000Z
|
openhgnn/auto/__init__.py
|
guyuisland/OpenHGNN
|
ab25b83431fed760136e122b442ca4470eb9522c
|
[
"Apache-2.0"
] | 17
|
2021-05-30T15:12:26.000Z
|
2022-03-09T08:32:12.000Z
|
openhgnn/auto/__init__.py
|
guyuisland/OpenHGNN
|
ab25b83431fed760136e122b442ca4470eb9522c
|
[
"Apache-2.0"
] | 65
|
2021-05-27T14:17:42.000Z
|
2022-03-29T12:28:32.000Z
|
from .hpo import hpo_experiment
| 31
| 31
| 0.870968
| 5
| 31
| 5.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1ee4b3fa29f7aa5eafcedb296f21e7103e7daa0d
| 64
|
py
|
Python
|
mal/issues.py
|
thiderman/mal
|
bbb4dd945e9a4b9bf5ebd2340d639bdab50be50f
|
[
"MIT"
] | 1
|
2015-08-06T23:04:10.000Z
|
2015-08-06T23:04:10.000Z
|
mal/issues.py
|
thiderman/mal
|
bbb4dd945e9a4b9bf5ebd2340d639bdab50be50f
|
[
"MIT"
] | null | null | null |
mal/issues.py
|
thiderman/mal
|
bbb4dd945e9a4b9bf5ebd2340d639bdab50be50f
|
[
"MIT"
] | null | null | null |
def get_oauth():
pass
def get_repo(owner, name):
pass
| 9.142857
| 26
| 0.625
| 10
| 64
| 3.8
| 0.7
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265625
| 64
| 6
| 27
| 10.666667
| 0.808511
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
949414dd4d50f2239986d95e4bdb9143b7c296d4
| 440
|
py
|
Python
|
tests/test_mouse.py
|
sejr/onu_micromouse_python
|
0d5aa26f235687b131dbc3536d4d4d437b199f61
|
[
"MIT"
] | null | null | null |
tests/test_mouse.py
|
sejr/onu_micromouse_python
|
0d5aa26f235687b131dbc3536d4d4d437b199f61
|
[
"MIT"
] | 3
|
2015-04-18T21:09:18.000Z
|
2015-04-18T21:11:17.000Z
|
tests/test_mouse.py
|
sejr/onu_micromouse_python
|
0d5aa26f235687b131dbc3536d4d4d437b199f61
|
[
"MIT"
] | null | null | null |
from micromouse import mouse
import unittest
class TestMouseMethods(unittest.TestCase):
def test_get_coordinates(self):
return 0
def test_set_coordinates(self):
return 0
def test_sensor_read(self):
return 0
def test_move_north(self):
return 0
def test_move_east(self):
return 0
def test_move_south(self):
return 0
def test_move_west(self):
return 0
| 17.6
| 42
| 0.654545
| 59
| 440
| 4.644068
| 0.389831
| 0.178832
| 0.281022
| 0.306569
| 0.532847
| 0.532847
| 0
| 0
| 0
| 0
| 0
| 0.022364
| 0.288636
| 440
| 25
| 43
| 17.6
| 0.853035
| 0
| 0
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.411765
| false
| 0
| 0.117647
| 0.411765
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
94be80d113d6765df32b05858dd89c9425379b68
| 129
|
py
|
Python
|
Website/Members/admin.py
|
sdeusch/django_member_management
|
ff649ce2845ac6774d6a4187d716349e7eb4a7b8
|
[
"Apache-2.0"
] | null | null | null |
Website/Members/admin.py
|
sdeusch/django_member_management
|
ff649ce2845ac6774d6a4187d716349e7eb4a7b8
|
[
"Apache-2.0"
] | null | null | null |
Website/Members/admin.py
|
sdeusch/django_member_management
|
ff649ce2845ac6774d6a4187d716349e7eb4a7b8
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import Member, Account
admin.site.register(Member)
admin.site.register(Account)
| 16.125
| 35
| 0.806202
| 18
| 129
| 5.777778
| 0.555556
| 0.173077
| 0.326923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108527
| 129
| 7
| 36
| 18.428571
| 0.904348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bf4e2c3610ccc56bc96d44373f9ec57c861389e8
| 81
|
py
|
Python
|
src/core/gui/SessionManager/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 21
|
2015-08-02T21:26:14.000Z
|
2019-12-27T09:57:44.000Z
|
src/core/gui/SessionManager/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 34
|
2015-01-12T00:38:14.000Z
|
2020-08-31T11:19:37.000Z
|
src/core/gui/SessionManager/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 15
|
2015-03-24T15:42:30.000Z
|
2020-09-24T20:26:42.000Z
|
from main import SessionManagerDialog
from new_session import NewSessionDialog
| 27
| 41
| 0.876543
| 9
| 81
| 7.777778
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123457
| 81
| 2
| 42
| 40.5
| 0.985915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bf69dda5bf9be46c5865552e0aedc621db4728e5
| 121
|
py
|
Python
|
crud/admin.py
|
Gornstats/HTMX-Django-Experiments
|
14553a6086412b243dfcc1f2c2a71f9e17adf82a
|
[
"BSD-2-Clause"
] | null | null | null |
crud/admin.py
|
Gornstats/HTMX-Django-Experiments
|
14553a6086412b243dfcc1f2c2a71f9e17adf82a
|
[
"BSD-2-Clause"
] | null | null | null |
crud/admin.py
|
Gornstats/HTMX-Django-Experiments
|
14553a6086412b243dfcc1f2c2a71f9e17adf82a
|
[
"BSD-2-Clause"
] | null | null | null |
from django.contrib import admin
from crud.models import Person
# Register your models here.
admin.site.register(Person)
| 24.2
| 32
| 0.818182
| 18
| 121
| 5.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115702
| 121
| 5
| 33
| 24.2
| 0.925234
| 0.214876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bf6cd6f7b566d536621056ce147c5d3d14ffc8d5
| 20,454
|
py
|
Python
|
project_tests/data_generation_scripts/milestone4.py
|
kevin5naug/column_store
|
a82c3bce33b7421cd0def340e00685e5fcd8f6ec
|
[
"MIT"
] | null | null | null |
project_tests/data_generation_scripts/milestone4.py
|
kevin5naug/column_store
|
a82c3bce33b7421cd0def340e00685e5fcd8f6ec
|
[
"MIT"
] | null | null | null |
project_tests/data_generation_scripts/milestone4.py
|
kevin5naug/column_store
|
a82c3bce33b7421cd0def340e00685e5fcd8f6ec
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys, string
from random import choice
import random
from string import ascii_lowercase
from scipy.stats import beta, uniform
import numpy as np
import struct
import pandas as pd
import math
import data_gen_utils
# note this is the base path where we store the data files we generate
TEST_BASE_DIR = "/cs165/generated_data"
# note this is the base path that _POINTS_ to the data files we generate
DOCKER_TEST_BASE_DIR = "/cs165/staff_test"
#
# Example usage:
# python milestone4.py 10000 10000 10000 42 1.0 50 ~/repo/cs165-docker-test-runner/test_data /cs165/staff_test
#
############################################################################
# Notes: You can generate your own scripts for generating data fairly easily by modifying this script.
#
############################################################################
class ZipfianDistribution:
def __init__(self,zipfianParam, numElements):
self.zipfianParam = zipfianParam
self.numElements = numElements
self.H_s = ZipfianDistribution.computeHarmonic(zipfianParam, numElements)
def computeHarmonic(zipfianParam, numElements):
total = 0.0
for k in range(1,numElements+1,1):
total += (1.0/math.pow(k, zipfianParam))
return total
def drawRandomSample(self, unifSample):
total = 0.0
k = 0
while (unifSample >= total):
k += 1
total += ((1.0/math.pow(k, self.zipfianParam)) / self.H_s)
return k
def createRandomNumpyArray(self,arraySize):
array = np.random.uniform(size=(arraySize))
vectorizedSampleFunc = np.vectorize(self.drawRandomSample)
return vectorizedSampleFunc(array)
def generateDataMilestone4(dataSizeFact, dataSizeDim1, dataSizeDim2, zipfianParam, numDistinctElements):
outputFile1 = TEST_BASE_DIR + '/' + 'data5_fact.csv'
outputFile2 = TEST_BASE_DIR + '/' + 'data5_dimension1.csv'
outputFile3 = TEST_BASE_DIR + '/' + 'data5_dimension2.csv'
header_line_fact = data_gen_utils.generateHeaderLine('db1', 'tbl5_fact', 4)
header_line_dim1 = data_gen_utils.generateHeaderLine('db1', 'tbl5_dim1', 3)
header_line_dim2 = data_gen_utils.generateHeaderLine('db1', 'tbl5_dim2', 2)
outputFactTable = pd.DataFrame(np.random.randint(0, dataSizeFact/5, size=(dataSizeFact, 4)), columns =['col1', 'col2', 'col3', 'col4'])
zipfDist = ZipfianDistribution(zipfianParam, numDistinctElements)
# See Zipf's distribution (wikipedia) for a description of this distribution.
outputFactTable['col1'] = zipfDist.createRandomNumpyArray(dataSizeFact)
outputFactTable['col3'] = np.full((dataSizeFact),1)
outputFactTable['col4'] = np.random.randint(1, dataSizeDim2, size=(dataSizeFact))
outputDimTable1 = pd.DataFrame(np.random.randint(0, dataSizeDim1/5, size=(dataSizeDim1, 3)), columns =['col1', 'col2', 'col3'])
# joinable on col1 with fact table
outputDimTable1['col1'] = zipfDist.createRandomNumpyArray(dataSizeDim1)
# joinable on col2 with dimension table 2
outputDimTable1['col2'] = np.random.randint(1, dataSizeDim2, size=(dataSizeDim1))
outputDimTable2 = pd.DataFrame(np.random.randint(0, dataSizeDim2/5, size=(dataSizeDim2, 2)), columns =['col1', 'col2'])
outputDimTable2['col1'] = np.arange(1,dataSizeDim2+1, 1)
outputFactTable.to_csv(outputFile1, sep=',', index=False, header=header_line_fact, line_terminator='\n')
outputDimTable1.to_csv(outputFile2, sep=',', index=False, header=header_line_dim1, line_terminator='\n')
outputDimTable2.to_csv(outputFile3, sep=',', index=False, header=header_line_dim2, line_terminator='\n')
return outputFactTable, outputDimTable1, outputDimTable2
def createTest31():
# prelude
output_file, exp_output_file = data_gen_utils.openFileHandles(31, TEST_DIR=TEST_BASE_DIR)
output_file.write('-- Creates tables for join tests\n')
output_file.write('-- without any indexes\n')
output_file.write('create(tbl,"tbl5_fact",db1,4)\n')
output_file.write('create(col,"col1",db1.tbl5_fact)\n')
output_file.write('create(col,"col2",db1.tbl5_fact)\n')
output_file.write('create(col,"col3",db1.tbl5_fact)\n')
output_file.write('create(col,"col4",db1.tbl5_fact)\n')
output_file.write('load("'+DOCKER_TEST_BASE_DIR+'/data5_fact.csv")\n')
output_file.write('--\n')
output_file.write('create(tbl,"tbl5_dim1",db1,3)\n')
output_file.write('create(col,"col1",db1.tbl5_dim1)\n')
output_file.write('create(col,"col2",db1.tbl5_dim1)\n')
output_file.write('create(col,"col3",db1.tbl5_dim1)\n')
output_file.write('load("'+DOCKER_TEST_BASE_DIR+'/data5_dimension1.csv")\n')
output_file.write('--\n')
output_file.write('create(tbl,"tbl5_dim2",db1,2)\n')
output_file.write('create(col,"col1",db1.tbl5_dim2)\n')
output_file.write('create(col,"col2",db1.tbl5_dim2)\n')
output_file.write('load("'+DOCKER_TEST_BASE_DIR+'/data5_dimension2.csv")\n')
output_file.write('-- Testing that the data and their indexes are durable on disk.\n')
output_file.write('shutdown\n')
# no expected results
data_gen_utils.closeFileHandles(output_file, exp_output_file)
def createTest32(factTable, dimTable2, dataSizeFact, dataSizeDim2, selectivityFact, selectivityDim2):
output_file, exp_output_file = data_gen_utils.openFileHandles(32, TEST_DIR=TEST_BASE_DIR)
output_file.write('-- First join test - nested-loop. Select + Join + aggregation\n')
output_file.write('-- Performs the join using nested loops\n')
output_file.write('-- Do this only on reasonable sized tables! (O(n^2))\n')
output_file.write('-- Query in SQL:\n')
output_file.write('-- SELECT avg(tbl5_fact.col2), sum(tbl5_fact.col3) FROM tbl5_fact,tbl5_dim2 WHERE tbl5_fact.col4=tbl5_dim2.col1 AND tbl5_fact.col2 < {} AND tbl5_dim2.col1<{};\n'.format(int((dataSizeFact/5) * selectivityFact), int(selectivityDim2 * dataSizeDim2)))
output_file.write('--\n')
output_file.write('--\n')
output_file.write('p1=select(db1.tbl5_fact.col2,null, {})\n'.format(int((dataSizeFact/5) * selectivityFact)))
output_file.write('p2=select(db1.tbl5_dim2.col1,null, {})\n'.format(int(dataSizeDim2 * selectivityDim2)))
#output_file.write('print(p1)\n')
#output_file.write('print(p2)\n')
output_file.write('f1=fetch(db1.tbl5_fact.col4,p1)\n')
output_file.write('f2=fetch(db1.tbl5_dim2.col1,p2)\n')
output_file.write('t1,t2=join(f1,p1,f2,p2,nested-loop)\n')
output_file.write('col2joined=fetch(db1.tbl5_fact.col2,t1)\n')
output_file.write('col3joined=fetch(db1.tbl5_fact.col3,t2)\n')
output_file.write('a1=avg(col2joined)\n')
output_file.write('a2=sum(col3joined)\n')
output_file.write('print(a1,a2)\n')
# generate expected results
dfFactTableMask = (factTable['col2'] < int((dataSizeFact/5) * selectivityFact))
dfDimTableMask = (dimTable2['col1'] < int(dataSizeDim2 * selectivityDim2))
preJoinFact = factTable[dfFactTableMask]
preJoinDim2 = dimTable2[dfDimTableMask]
joinedTable = preJoinFact.merge(preJoinDim2, left_on = 'col4', right_on = 'col1', suffixes=('','_right'))
col2ValuesMean = joinedTable['col2'].mean()
col3ValuesSum = joinedTable['col3'].sum()
if (math.isnan(col2ValuesMean)):
exp_output_file.write('0.00,')
else:
exp_output_file.write('{:0.2f},'.format(col2ValuesMean))
if (math.isnan(col3ValuesSum)):
exp_output_file.write('0\n')
else:
exp_output_file.write('{}\n'.format(col3ValuesSum))
def createTest33(factTable, dimTable2, dataSizeFact, dataSizeDim2, selectivityFact, selectivityDim2):
output_file, exp_output_file = data_gen_utils.openFileHandles(33, TEST_DIR=TEST_BASE_DIR)
output_file.write('-- First join test - hash. Select + Join + aggregation\n')
output_file.write('-- Performs the join using hashing\n')
output_file.write('-- Query in SQL:\n')
output_file.write('-- SELECT avg(tbl5_fact.col2), sum(tbl5_fact.col3) FROM tbl5_fact,tbl5_dim2 WHERE tbl5_fact.col4=tbl5_dim2.col1 AND tbl5_fact.col2 < {} AND tbl5_dim2.col1<{};\n'.format(int((dataSizeFact/5) * selectivityFact), int(selectivityDim2 * dataSizeDim2)))
output_file.write('--\n')
output_file.write('--\n')
output_file.write('p1=select(db1.tbl5_fact.col2,null, {})\n'.format(int((dataSizeFact/5) * selectivityFact)))
output_file.write('p2=select(db1.tbl5_dim2.col1,null, {})\n'.format(int(dataSizeDim2 * selectivityDim2)))
output_file.write('f1=fetch(db1.tbl5_fact.col4,p1)\n')
output_file.write('f2=fetch(db1.tbl5_dim2.col1,p2)\n')
output_file.write('t1,t2=join(f1,p1,f2,p2,hash)\n')
output_file.write('col2joined=fetch(db1.tbl5_fact.col2,t1)\n')
output_file.write('col3joined=fetch(db1.tbl5_fact.col3,t2)\n')
output_file.write('a1=avg(col2joined)\n')
output_file.write('a2=sum(col3joined)\n')
output_file.write('print(a1,a2)\n')
# generate expected results
dfFactTableMask = (factTable['col2'] < int((dataSizeFact/5) * selectivityFact))
dfDimTableMask = (dimTable2['col1'] < int(dataSizeDim2 * selectivityDim2))
preJoinFact = factTable[dfFactTableMask]
preJoinDim2 = dimTable2[dfDimTableMask]
joinedTable = preJoinFact.merge(preJoinDim2, left_on = 'col4', right_on = 'col1', suffixes=('','_right'))
col2ValuesMean = joinedTable['col2'].mean()
col3ValuesSum = joinedTable['col3'].sum()
if (math.isnan(col2ValuesMean)):
exp_output_file.write('0.00,')
else:
exp_output_file.write('{:0.2f},'.format(col2ValuesMean))
if (math.isnan(col3ValuesSum)):
exp_output_file.write('0\n')
else:
exp_output_file.write('{}\n'.format(col3ValuesSum))
def createTest34(factTable, dimTable1, dataSizeFact, dataSizeDim1, selectivityFact, selectivityDim1):
output_file, exp_output_file = data_gen_utils.openFileHandles(34, TEST_DIR=TEST_BASE_DIR)
output_file.write('-- Join test 2 - nested-loop. Select + Join + aggregation\n')
output_file.write('-- Performs the join using nested loops\n')
output_file.write('-- Do this only on reasonable sized tables! (O(n^2))\n')
output_file.write('-- Query in SQL:\n')
output_file.write('-- SELECT sum(tbl5_fact.col2), avg(tbl5_dim1.col1) FROM tbl5_fact,tbl5_dim1 WHERE tbl5_fact.col1=tbl5_dim1.col1 AND tbl5_fact.col2 < {} AND tbl5_dim1.col3<{};\n'.format(int(selectivityFact * (dataSizeFact / 5)), int((dataSizeDim1/5) * selectivityDim1)))
output_file.write('--\n')
output_file.write('--\n')
output_file.write('p1=select(db1.tbl5_fact.col2,null, {})\n'.format(int(selectivityFact * (dataSizeFact / 5))))
output_file.write('p2=select(db1.tbl5_dim1.col3,null, {})\n'.format(int((dataSizeDim1/5) * selectivityDim1)))
output_file.write('f1=fetch(db1.tbl5_fact.col1,p1)\n')
output_file.write('f2=fetch(db1.tbl5_dim1.col1,p2)\n')
output_file.write('t1,t2=join(f1,p1,f2,p2,nested-loop)\n')
output_file.write('col2joined=fetch(db1.tbl5_fact.col2,t1)\n')
output_file.write('col1joined=fetch(db1.tbl5_dim1.col1,t2)\n')
output_file.write('a1=sum(col2joined)\n')
output_file.write('a2=avg(col1joined)\n')
output_file.write('print(a1,a2)\n')
# generate expected results
dfFactTableMask = (factTable['col2'] < int(selectivityFact * (dataSizeFact / 5)))
dfDimTableMask = (dimTable1['col3'] < int((dataSizeDim1/5) * selectivityDim1))
preJoinFact = factTable[dfFactTableMask]
preJoinDim1 = dimTable1[dfDimTableMask]
joinedTable = preJoinFact.merge(preJoinDim1, left_on = 'col1', right_on = 'col1', suffixes=('','_right'))
col2ValuesSum = joinedTable['col2'].sum()
col1ValuesMean = joinedTable['col1'].mean()
if (math.isnan(col2ValuesSum)):
exp_output_file.write('0,')
else:
exp_output_file.write('{},'.format(col2ValuesSum))
if (math.isnan(col1ValuesMean)):
exp_output_file.write('0.00\n')
else:
exp_output_file.write('{:0.2f}\n'.format(col1ValuesMean))
def createTest35(factTable, dimTable1, dataSizeFact, dataSizeDim1, selectivityFact, selectivityDim1):
output_file, exp_output_file = data_gen_utils.openFileHandles(35, TEST_DIR=TEST_BASE_DIR)
output_file.write('-- join test 2 - hash. Select + Join + aggregation\n')
output_file.write('-- Performs the join using hashing\n')
output_file.write('-- Query in SQL:\n')
output_file.write('-- SELECT sum(tbl5_fact.col2), avg(tbl5_dim1.col1) FROM tbl5_fact,tbl5_dim1 WHERE tbl5_fact.col1=tbl5_dim1.col1 AND tbl5_fact.col2 < {} AND tbl5_dim1.col3<{};\n'.format(int(selectivityFact * (dataSizeFact / 5)), int((dataSizeDim1/5) * selectivityDim1)))
output_file.write('--\n')
output_file.write('--\n')
output_file.write('p1=select(db1.tbl5_fact.col2,null, {})\n'.format(int(selectivityFact * (dataSizeFact / 5))))
output_file.write('p2=select(db1.tbl5_dim1.col3,null, {})\n'.format(int((dataSizeDim1/5) * selectivityDim1)))
output_file.write('f1=fetch(db1.tbl5_fact.col1,p1)\n')
output_file.write('f2=fetch(db1.tbl5_dim1.col1,p2)\n')
output_file.write('t1,t2=join(f1,p1,f2,p2,hash)\n')
output_file.write('col2joined=fetch(db1.tbl5_fact.col2,t1)\n')
output_file.write('col1joined=fetch(db1.tbl5_dim1.col1,t2)\n')
output_file.write('a1=sum(col2joined)\n')
output_file.write('a2=avg(col1joined)\n')
output_file.write('print(a1,a2)\n')
# generate expected results
dfFactTableMask = (factTable['col2'] < int(selectivityFact * (dataSizeFact / 5)))
dfDimTableMask = (dimTable1['col3'] < int((dataSizeDim1/5) * selectivityDim1))
preJoinFact = factTable[dfFactTableMask]
preJoinDim1 = dimTable1[dfDimTableMask]
joinedTable = preJoinFact.merge(preJoinDim1, left_on = 'col1', right_on = 'col1', suffixes=('','_right'))
col2ValuesSum = joinedTable['col2'].sum()
col1ValuesMean = joinedTable['col1'].mean()
if (math.isnan(col2ValuesSum)):
exp_output_file.write('0,')
else:
exp_output_file.write('{},'.format(col2ValuesSum))
if (math.isnan(col1ValuesMean)):
exp_output_file.write('0.00\n')
else:
exp_output_file.write('{:0.2f}\n'.format(col1ValuesMean))
def createTest36(factTable, dimTable2, dataSizeFact, dataSizeDim2, selectivityFact, selectivityDim2):
output_file, exp_output_file = data_gen_utils.openFileHandles(36, TEST_DIR=TEST_BASE_DIR)
output_file.write('-- join test 3 - hashing many-one with larger selectivities.\n')
output_file.write('-- Select + Join + aggregation\n')
output_file.write('-- Performs the join using hashing\n')
output_file.write('-- Query in SQL:\n')
output_file.write('-- SELECT avg(tbl5_fact.col2), sum(tbl5_dim2.col2) FROM tbl5_fact,tbl5_dim2 WHERE tbl5_fact.col4=tbl5_dim2.col1 AND tbl5_fact.col2 < {} AND tbl5_dim2.col1<{};\n'.format(int((dataSizeFact/5) * selectivityFact), int(selectivityDim2 * dataSizeDim2)))
output_file.write('--\n')
output_file.write('--\n')
output_file.write('p1=select(db1.tbl5_fact.col2,null, {})\n'.format(int((dataSizeFact/5) * selectivityFact)))
output_file.write('p2=select(db1.tbl5_dim2.col1,null, {})\n'.format(int(dataSizeDim2 * selectivityDim2)))
output_file.write('f1=fetch(db1.tbl5_fact.col4,p1)\n')
output_file.write('f2=fetch(db1.tbl5_dim2.col1,p2)\n')
output_file.write('t1,t2=join(f1,p1,f2,p2,hash)\n')
output_file.write('col2joined=fetch(db1.tbl5_fact.col2,t1)\n')
output_file.write('col2t2joined=fetch(db1.tbl5_dim2.col2,t2)\n')
output_file.write('a1=avg(col2joined)\n')
output_file.write('a2=sum(col2t2joined)\n')
output_file.write('print(a1,a2)\n')
# generate expected results
dfFactTableMask = (factTable['col2'] < int((dataSizeFact/5) * selectivityFact))
dfDimTableMask = (dimTable2['col1'] < int(dataSizeDim2 * selectivityDim2))
preJoinFact = factTable[dfFactTableMask]
preJoinDim2 = dimTable2[dfDimTableMask]
joinedTable = preJoinFact.merge(preJoinDim2, left_on = 'col4', right_on = 'col1', suffixes=('','_right'))
col2ValuesMean = joinedTable['col2'].mean()
col3ValuesSum = joinedTable['col2_right'].sum()
if (math.isnan(col2ValuesMean)):
exp_output_file.write('0.00,')
else:
exp_output_file.write('{:0.2f},'.format(col2ValuesMean))
if (math.isnan(col3ValuesSum)):
exp_output_file.write('0\n')
else:
exp_output_file.write('{}\n'.format(col3ValuesSum))
def createTest37(factTable, dimTable1, dataSizeFact, dataSizeDim1, selectivityFact, selectivityDim1):
output_file, exp_output_file = data_gen_utils.openFileHandles(37, TEST_DIR=TEST_BASE_DIR)
output_file.write('-- join test 4 - hashing many-many with larger selectivities.\n')
output_file.write('-- Select + Join + aggregation\n')
output_file.write('-- Query in SQL:\n')
output_file.write('-- SELECT sum(tbl5_fact.col2), avg(tbl5_dim1.col1) FROM tbl5_fact,tbl5_dim1 WHERE tbl5_fact.col1=tbl5_dim1.col1 AND tbl5_fact.col2 < {} AND tbl5_dim1.col3<{};\n'.format(int(selectivityFact * (dataSizeFact / 5)), int((dataSizeDim1/5) * selectivityDim1)))
output_file.write('--\n')
output_file.write('--\n')
output_file.write('p1=select(db1.tbl5_fact.col2,null, {})\n'.format(int(selectivityFact * (dataSizeFact / 5))))
output_file.write('p2=select(db1.tbl5_dim1.col3,null, {})\n'.format(int((dataSizeDim1/5) * selectivityDim1)))
output_file.write('f1=fetch(db1.tbl5_fact.col1,p1)\n')
output_file.write('f2=fetch(db1.tbl5_dim1.col1,p2)\n')
output_file.write('t1,t2=join(f1,p1,f2,p2,hash)\n')
output_file.write('col2joined=fetch(db1.tbl5_fact.col2,t1)\n')
output_file.write('col1joined=fetch(db1.tbl5_dim1.col1,t2)\n')
output_file.write('a1=sum(col2joined)\n')
output_file.write('a2=avg(col1joined)\n')
output_file.write('print(a1,a2)\n')
# generate expected results
dfFactTableMask = (factTable['col2'] < int(selectivityFact * (dataSizeFact / 5)))
dfDimTableMask = (dimTable1['col3'] < int((dataSizeDim1/5) * selectivityDim1))
preJoinFact = factTable[dfFactTableMask]
preJoinDim1 = dimTable1[dfDimTableMask]
joinedTable = preJoinFact.merge(preJoinDim1, left_on = 'col1', right_on = 'col1', suffixes=('','_right'))
col2ValuesSum = joinedTable['col2'].sum()
col1ValuesMean = joinedTable['col1'].mean()
if (math.isnan(col2ValuesSum)):
exp_output_file.write('0,')
else:
exp_output_file.write('{},'.format(col2ValuesSum))
if (math.isnan(col1ValuesMean)):
exp_output_file.write('0.00\n')
else:
exp_output_file.write('{:0.2f}\n'.format(col1ValuesMean))
def generateMilestoneFourFiles(dataSizeFact, dataSizeDim1, dataSizeDim2, zipfianParam, numDistinctElements, randomSeed=47):
np.random.seed(randomSeed)
factTable, dimTable1, dimTable2 = generateDataMilestone4(dataSizeFact, dataSizeDim1, dataSizeDim2, zipfianParam, numDistinctElements)
createTest31()
# test many to 1 joins
createTest32(factTable, dimTable2, dataSizeFact, dataSizeDim2, 0.15, 0.15)
createTest33(factTable, dimTable2, dataSizeFact, dataSizeDim2, 0.15, 0.15)
# test many to many joins
createTest34(factTable, dimTable1, dataSizeFact, dataSizeDim1, 0.15, 0.15)
createTest35(factTable, dimTable1, dataSizeFact, dataSizeDim1, 0.15, 0.15)
# test both joins with much larger selectivities. This should mostly test speed.
createTest36(factTable, dimTable2, dataSizeFact, dataSizeDim2, 0.8, 0.8)
createTest37(factTable, dimTable1, dataSizeFact, dataSizeDim1, 0.8, 0.8)
def main(argv):
global TEST_BASE_DIR
global DOCKER_TEST_BASE_DIR
dataSizeFact = int(argv[0])
dataSizeDim1 = int(argv[1])
dataSizeDim2 = int(argv[2])
if len(argv) > 6:
randomSeed = int(argv[3])
zipfianParam = np.double(argv[4])
numDistinctElements = int(argv[5])
TEST_BASE_DIR = argv[6]
if len(argv) > 7:
DOCKER_TEST_BASE_DIR = argv[7]
elif len(argv) > 5:
randomSeed = argv[3]
zipfianParam = np.double(argv[4])
numDistinctElements = int(argv[5])
elif len(argv) > 3:
randomSeed = int(argv[3])
zipfianParam = 1.0
numDistinctElements = 50
else:
randomSeed = 47
zipfianParam = 1.0
numDistinctElements = 50
generateMilestoneFourFiles(dataSizeFact, dataSizeDim1, dataSizeDim2, zipfianParam, numDistinctElements, randomSeed=randomSeed)
if __name__ == "__main__":
main(sys.argv[1:])
| 54.398936
| 276
| 0.70441
| 2,650
| 20,454
| 5.273962
| 0.104528
| 0.115913
| 0.156697
| 0.111048
| 0.810031
| 0.777905
| 0.729894
| 0.709144
| 0.693045
| 0.665641
| 0
| 0.050312
| 0.138066
| 20,454
| 375
| 277
| 54.544
| 0.742428
| 0.044343
| 0
| 0.611987
| 0
| 0.018927
| 0.25111
| 0.111409
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044164
| false
| 0
| 0.031546
| 0
| 0.091483
| 0.018927
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bf91a8ba8764cffb20958748f999fb57dffcf354
| 135
|
py
|
Python
|
app/api/__init__.py
|
tba91/book-booking
|
6be828fd714caf105077eb61235dd163305caf5c
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
tba91/book-booking
|
6be828fd714caf105077eb61235dd163305caf5c
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
tba91/book-booking
|
6be828fd714caf105077eb61235dd163305caf5c
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
api_bp = Blueprint('api', __name__)
from . import authors
from . import books
from . import publishers
| 13.5
| 35
| 0.755556
| 18
| 135
| 5.388889
| 0.555556
| 0.309278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 135
| 9
| 36
| 15
| 0.873874
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0.4
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bfa141c5b6de893a999893aa0c5b29af75a2ffbb
| 306
|
py
|
Python
|
mlpipeline/base/__init__.py
|
ahmed-shariff/mlpipeline
|
03a07da44eab14171305e41e6d162def6c32c6ac
|
[
"MIT"
] | 5
|
2019-09-04T06:37:33.000Z
|
2021-02-13T14:09:37.000Z
|
mlpipeline/base/__init__.py
|
ahmed-shariff/ml-pipeline
|
ebe262443cd0f43e9eb761adbc7854990842ec8f
|
[
"MIT"
] | 1
|
2019-02-18T12:49:44.000Z
|
2019-02-18T12:49:44.000Z
|
mlpipeline/base/__init__.py
|
ahmed-shariff/mlpipeline
|
03a07da44eab14171305e41e6d162def6c32c6ac
|
[
"MIT"
] | null | null | null |
from mlpipeline.base._base import (ExperimentABC,
DataLoaderABC)
from mlpipeline.base._utils import (DataLoaderCallableWrapper,
ExperimentWrapper)
__all__ = [ExperimentABC, DataLoaderABC, DataLoaderCallableWrapper, ExperimentWrapper]
| 43.714286
| 86
| 0.660131
| 19
| 306
| 10.315789
| 0.526316
| 0.142857
| 0.183673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.287582
| 306
| 6
| 87
| 51
| 0.899083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
44c360e6160cf89bd11b7076ed791559d575f9d7
| 165
|
py
|
Python
|
pktgen/trex/trex_helpers.py
|
stevelorenz/build-vsf
|
b2d0aba770190672eb63547cd9b2d4cb8df82943
|
[
"MIT"
] | 32
|
2018-07-13T20:39:36.000Z
|
2021-12-26T07:26:54.000Z
|
pktgen/trex/trex_helpers.py
|
stevelorenz/build-vsf
|
b2d0aba770190672eb63547cd9b2d4cb8df82943
|
[
"MIT"
] | null | null | null |
pktgen/trex/trex_helpers.py
|
stevelorenz/build-vsf
|
b2d0aba770190672eb63547cd9b2d4cb8df82943
|
[
"MIT"
] | 6
|
2018-10-31T10:40:50.000Z
|
2020-08-18T08:02:53.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
TODO: Helper functions for Trex traffic generators.
"""
def th_hello():
print("Hello from Trex helpers.")
| 15
| 51
| 0.636364
| 22
| 165
| 4.727273
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014815
| 0.181818
| 165
| 10
| 52
| 16.5
| 0.755556
| 0.581818
| 0
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0.1
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
44df673bc38395d687234cd72d350f36699505a1
| 53
|
py
|
Python
|
social/backends/reddit.py
|
raccoongang/python-social-auth
|
81c0a542d158772bd3486d31834c10af5d5f08b0
|
[
"BSD-3-Clause"
] | 1,987
|
2015-01-01T16:12:45.000Z
|
2022-03-29T14:24:25.000Z
|
social/backends/reddit.py
|
raccoongang/python-social-auth
|
81c0a542d158772bd3486d31834c10af5d5f08b0
|
[
"BSD-3-Clause"
] | 731
|
2015-01-01T22:55:25.000Z
|
2022-03-10T15:07:51.000Z
|
virtual/lib/python3.6/site-packages/social/backends/reddit.py
|
dennismwaniki67/awards
|
80ed10541f5f751aee5f8285ab1ad54cfecba95f
|
[
"MIT"
] | 1,082
|
2015-01-01T16:27:26.000Z
|
2022-03-22T21:18:33.000Z
|
from social_core.backends.reddit import RedditOAuth2
| 26.5
| 52
| 0.886792
| 7
| 53
| 6.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 0.075472
| 53
| 1
| 53
| 53
| 0.918367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
44e0a499d1574a4d122eacdd34cd910b590cf705
| 135
|
py
|
Python
|
cord/__init__.py
|
zyberguy/cord19
|
3e2681fd971ff6b108d512a0e18469a56a6459c1
|
[
"Apache-2.0"
] | 35
|
2020-03-27T14:36:04.000Z
|
2022-03-13T09:08:28.000Z
|
cord/__init__.py
|
zyberguy/cord19
|
3e2681fd971ff6b108d512a0e18469a56a6459c1
|
[
"Apache-2.0"
] | 4
|
2020-04-07T05:34:46.000Z
|
2020-05-21T13:06:32.000Z
|
cord/__init__.py
|
zyberguy/cord19
|
3e2681fd971ff6b108d512a0e18469a56a6459c1
|
[
"Apache-2.0"
] | 7
|
2020-04-08T23:49:37.000Z
|
2021-07-23T07:50:31.000Z
|
from .cord19 import ResearchPapers, SearchResults
from .jsonpaper import JsonCatalog, JsonPaper
from .core import *
from .text import *
| 33.75
| 49
| 0.814815
| 16
| 135
| 6.875
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 0.125926
| 135
| 4
| 50
| 33.75
| 0.915254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
44e3764e47a4fb272e893c970436a46d75bd284a
| 39
|
py
|
Python
|
week3/task1.py
|
summ0n/TOLSTON
|
c1c39d60b0ca468ca010fe7cbddf048061472278
|
[
"MIT"
] | null | null | null |
week3/task1.py
|
summ0n/TOLSTON
|
c1c39d60b0ca468ca010fe7cbddf048061472278
|
[
"MIT"
] | null | null | null |
week3/task1.py
|
summ0n/TOLSTON
|
c1c39d60b0ca468ca010fe7cbddf048061472278
|
[
"MIT"
] | null | null | null |
##Создано виртуальное окружение venvir/
| 39
| 39
| 0.846154
| 4
| 39
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.916667
| 0.948718
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
44f22270b1c01b35fca06e60f8d919fb6f883878
| 2,940
|
py
|
Python
|
tests/core/pyspec/eth2spec/test/phase0/rewards/test_random.py
|
Manny27nyc/consensus-specs
|
d23444a2db140c8743af4d43f09296d15911ee0f
|
[
"CC0-1.0"
] | null | null | null |
tests/core/pyspec/eth2spec/test/phase0/rewards/test_random.py
|
Manny27nyc/consensus-specs
|
d23444a2db140c8743af4d43f09296d15911ee0f
|
[
"CC0-1.0"
] | null | null | null |
tests/core/pyspec/eth2spec/test/phase0/rewards/test_random.py
|
Manny27nyc/consensus-specs
|
d23444a2db140c8743af4d43f09296d15911ee0f
|
[
"CC0-1.0"
] | null | null | null |
from random import Random
from eth2spec.test.context import (
with_all_phases,
spec_test,
spec_state_test,
with_custom_state,
single_phase,
low_balances, misc_balances,
)
import eth2spec.test.helpers.rewards as rewards_helpers
from eth2spec.test.helpers.random import randomize_state, patch_state_to_non_leaking
from eth2spec.test.helpers.state import has_active_balance_differential
from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators
@with_all_phases
@spec_state_test
def test_full_random_0(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(1010))
@with_all_phases
@spec_state_test
def test_full_random_1(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(2020))
@with_all_phases
@spec_state_test
def test_full_random_2(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(3030))
@with_all_phases
@spec_state_test
def test_full_random_3(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(4040))
@with_all_phases
@spec_state_test
def test_full_random_4(spec, state):
"""
Ensure a rewards test with some exited (but not slashed) validators.
"""
rng = Random(5050)
randomize_state(spec, state, rng)
assert spec.is_in_inactivity_leak(state)
target_validators = get_unslashed_exited_validators(spec, state)
assert len(target_validators) != 0
assert has_active_balance_differential(spec, state)
yield from rewards_helpers.run_deltas(spec, state)
@with_all_phases
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
def test_full_random_low_balances_0(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(5050))
@with_all_phases
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
def test_full_random_low_balances_1(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(6060))
@with_all_phases
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
def test_full_random_misc_balances(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(7070))
@with_all_phases
@spec_state_test
def test_full_random_without_leak_0(spec, state):
rng = Random(1010)
randomize_state(spec, state, rng)
assert spec.is_in_inactivity_leak(state)
patch_state_to_non_leaking(spec, state)
assert not spec.is_in_inactivity_leak(state)
target_validators = get_unslashed_exited_validators(spec, state)
assert len(target_validators) != 0
assert has_active_balance_differential(spec, state)
yield from rewards_helpers.run_deltas(spec, state)
| 31.956522
| 101
| 0.806803
| 443
| 2,940
| 4.961625
| 0.162528
| 0.131028
| 0.101911
| 0.069609
| 0.768426
| 0.756597
| 0.734304
| 0.734304
| 0.717015
| 0.717015
| 0
| 0.019608
| 0.115306
| 2,940
| 91
| 102
| 32.307692
| 0.825452
| 0.023129
| 0
| 0.507246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101449
| 1
| 0.130435
| false
| 0
| 0.086957
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7802612b3ba6e508e0b5369cfefe8e0aa459dd3c
| 44
|
py
|
Python
|
dev.py
|
wrule/bill-analyzer
|
c9db719e721acb7b55bafd502a76645071cd2f24
|
[
"MIT"
] | null | null | null |
dev.py
|
wrule/bill-analyzer
|
c9db719e721acb7b55bafd502a76645071cd2f24
|
[
"MIT"
] | null | null | null |
dev.py
|
wrule/bill-analyzer
|
c9db719e721acb7b55bafd502a76645071cd2f24
|
[
"MIT"
] | null | null | null |
#!/opt/homebrew/bin/python3
print('你好,世界')
| 11
| 27
| 0.681818
| 7
| 44
| 4.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0.068182
| 44
| 3
| 28
| 14.666667
| 0.707317
| 0.590909
| 0
| 0
| 0
| 0
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
7819c18018d39bc34312c7ad26328eb2e24baca8
| 517
|
py
|
Python
|
bcad/bsk/settings.py
|
snegovick/bcad
|
f3230ded2b3401228db6994f2480cab90972fcbb
|
[
"MIT"
] | 3
|
2020-02-14T16:28:18.000Z
|
2020-08-18T10:52:33.000Z
|
bcad/bsk/settings.py
|
snegovick/bcad
|
f3230ded2b3401228db6994f2480cab90972fcbb
|
[
"MIT"
] | 48
|
2020-02-14T06:16:02.000Z
|
2021-09-19T17:51:47.000Z
|
bcad/bsk/settings.py
|
snegovick/bcad
|
f3230ded2b3401228db6994f2480cab90972fcbb
|
[
"MIT"
] | 1
|
2020-03-18T01:36:59.000Z
|
2020-03-18T01:36:59.000Z
|
from __future__ import absolute_import, division, print_function
class Settings(object):
def __init__(self, data=None):
if data == None:
self.centerpoint_snap = True
else:
self.deserialize(data)
def is_centerpoint_snap_enabled(self):
return self.centerpoint_snap
def serialize(self):
return {"type": "settings", "centerpoint_snap": self.centerpoint_snap}
def deserialize(self, data):
self.centerpoint_snap = data["centerpoint_snap"]
| 28.722222
| 78
| 0.673114
| 58
| 517
| 5.672414
| 0.448276
| 0.319149
| 0.231003
| 0.133739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.234043
| 517
| 17
| 79
| 30.411765
| 0.830808
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0.076923
| 0.153846
| 0.615385
| 0.076923
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
78294ef5b8513fa454981ecfd58679818c2fcbc0
| 39,165
|
py
|
Python
|
pyml/supervised/LogisticRegression.py
|
albamr09/PythonML
|
9848cf913a7cdb73d2b98a8ab7334c04f421ad87
|
[
"MIT"
] | null | null | null |
pyml/supervised/LogisticRegression.py
|
albamr09/PythonML
|
9848cf913a7cdb73d2b98a8ab7334c04f421ad87
|
[
"MIT"
] | null | null | null |
pyml/supervised/LogisticRegression.py
|
albamr09/PythonML
|
9848cf913a7cdb73d2b98a8ab7334c04f421ad87
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as op
import pandas as pd
"""
------------------------------------------------------------------------------------------------------------------------
Clase que aplica los algoritmos de Logistic Regression, es decir, algoritmos de clasificacion.
-- X: matriz de terminos independientes.
-- y: matriz fila de termino dependiente.
-- n: numero de features, numero de filas.
-- m: numero de ejemplos, numero de columnas.
-- reg: boolean indica si se aplica regularizacion.
-- theta: matriz fila de biases.
------------------------------------------------------------------------------------------------------------------------
"""
class LogisticRegression():
"""
------------------------------------------------------------------------------------------------------------------------
Funcion de iniciacion de la clase, en la cual se inicializan las variables propias de la clase.
-- axis: si 0 -> features en filas y ejemplos en columnas, si no viceversa.
------------------------------------------------------------------------------------------------------------------------
"""
def __init__(self, X, y, reg=False, axis=0, reg_par=None):
if axis == 0:
self.X = X # Inicializamos X
self.y = y # Inicializamos y
else: # Si X e y no está en el formato correcto
self.X = X.T # Inicializamos X
self.y = y.T # Inicializamos y
self.n, self.m = self.X.shape # Guardamos las dimensiones
if reg: # Si se indica aplicar regularizacion
self.X = self._map_feature(self.X)
self.reg_par = reg_par # Guardar el parametro de parametrizacion
else:
self.X = np.concatenate((np.matrix(np.ones(self.m)), self.X)) # Si no se quiere aplicar regularización se añaden 1
self.reg = reg # Se guarda si se quiere regularizar
self.n, self.m = self.X.shape # Obtenemos la nueva dimension de la matriz de datos
self.theta = np.matrix(np.zeros(self.n)) # Inicializamos los biases
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que crea un vector de 28 elementos a partir de un vector de 2 elementos.
------------------------------------------------------------------------------------------------------------------------
"""
def _map_feature(self, X):
n, m = X.shape # Obtenemos la dimension de la matriz de datos
if (n == 2): # Si tiene dos features
degree = 6; # El grado del polinomio
mapeado = np.ones(m) # Creamos una fila de 1, termino independiente
for i in range(1, degree + 1):
for j in range(0, i + 1):
multiplicacion = np.ravel(np.power(X[0, :], (i - j))) * np.ravel(np.power(X[1, :], (j))) # Calculo de polinomio
mapeado = np.vstack((mapeado, multiplicacion)) # Lo añadimos al resultado
return mapeado # Devolvemos la matriz mapeada
else:
print("Solo es un mapeado valido para dos features") # Mensaje de error si hay más o menos que dos features
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que evalua segun la funcion sigmoid los valores independientes de la matriz X.
------------------------------------------------------------------------------------------------------------------------
"""
def _sigmoid(self):
self.theta = self.theta.reshape((1, self.n)); # Hacemos que theta sea un vector fila: 1 x n
z = self.theta.dot(self.X) # Calculamos la entrada a la funcion sigmoid: theta*X
z = 1 / (1 + np.exp(-z)) # Funcion sigmoid: 1 / (1 + e^(-sum(theta*X)))
return z
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que evalua segun la funcion sigmoid los valores independientes de la matriz X, para la funcion de
minimización, que requiere la introducción de argumentos.
------------------------------------------------------------------------------------------------------------------------
"""
def _sigmoid_min(self, X, theta):
theta = theta.reshape((1, self.n)); # Hacemos que theta sea un vector fila: 1 x n
z = theta.dot(X) # Calculamos la entrada a la funcion sigmoid: theta*X
z = 1 / (1 + np.exp(-z)) # Funcion sigmoid: 1 / (1 + e^(-sum(theta*X)))
return z
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que calcula el coste tras evaluar la matriz de elementos independientes y el vector de biases.
------------------------------------------------------------------------------------------------------------------------
"""
def calculo_coste(self):
z = self._sigmoid().T # Calculamos la hipotesis: en este caso la funcion sigmoid: transpuesta para permitir la multiplicacion con y
sum = self.y.dot(np.log(z)) + (1 - self.y).dot(np.log(1 - z)) # Aplicacions la funcion de coste simplificada
if self.reg: # En caso de haber aplicado regularizacion
sum_reg = self.reg_par * np.sum(np.power(self.theta[0, 1:], 2)) / (2 * self.m) # Calculo de la regularizacion para evitar overfitting
return -np.ravel(sum)[0] / self.m + sum_reg # Devolvemos el sumatorio entre el número de muestras
else:
return -np.ravel(sum)[0] / self.m # Si no se aplica regularizacion no se añade
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que calcula el coste tras evaluar la matriz de elementos independientes y el vector de biases para la
funcion de minimizacion, ya que este requiere de argumentos.
------------------------------------------------------------------------------------------------------------------------
"""
def _calculo_coste_min(self, theta, X, y, reg_par=None):
theta = theta.reshape((1, self.n)); # Hacemos que theta sea un vector fila: 1 x n
z = self._sigmoid_min(X, theta).T # Calculamos la hipotesis: en este caso la funcion sigmoid: transpuesta para permitir la multiplicacion con y
sum = y * np.log(z) + (1 - y) * np.log(1 - z) # Aplicacions la funcion de coste simplificada
if self.reg: # En caso de haber aplicado regularizacion
sum_reg = reg_par * np.sum(np.power(theta[0, 1:], 2)) / (2 * self.m) # Calculo de la regularizacion para evitar overfitting
return -np.ravel(sum)[0] / self.m + sum_reg # Devolvemos el sumatorio entre el número de muestras
else:
return -np.ravel(sum)[0] / self.m # Si no se aplica regularizacion no se añade
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que calcula el gradiente para aplicar el descenso.
------------------------------------------------------------------------------------------------------------------------
"""
def _gradiente(self):
h = self._sigmoid() # Aplicamos la funcion sigmoid
error = h - self.y # Calculamos el error
gradiente = (error * self.X.T) / self.m # Cada columna es el gradiente de una theta, variable distinta ya que se multiplica por X.T para sumatorio
if self.reg: # En caso de que se haya aplicado regularizacion
regularizacion = (self.reg_par / self.m) * self.theta # Calculamos la regularizacion de todas las variables independientes
gradiente[0, 1:] = gradiente[0, 1:] + regularizacion[0, 1:] # Sumamos gradiente y regularizacion excepto theta0, al que no se le aplica regularizacion
return np.ravel(gradiente) # Hacemos que gradiente sea un vector fila: 1 x n
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que calcula el gradiente para aplicar el descenso para la minimizacion, que requiere de argumentos.
------------------------------------------------------------------------------------------------------------------------
"""
def _gradiente_min(self, theta, X, y, reg_par=None):
h = self._sigmoid_min(X, theta) # Aplicamos la funcion sigmoid
error = h - y # Calculamos el error
gradiente = (error * X.T) / self.m # Cada columna es el gradiente de una theta, variable distinta ya que se multiplica por X.T para sumatorio
if self.reg: # En caso de que se haya aplicado regularizacion
regularizacion = (reg_par / self.m) * theta # Calculamos la regularizacion de todas las variables independientes
gradiente = gradiente.reshape((1, self.n)); # Hacemos que gradiente sea un vector fila: 1 x n
regularizacion = regularizacion.reshape((1, self.n)); # Hacemos que regularizacion sea un vector fila: 1 x n
gradiente[0, 1:] = gradiente[0, 1:] + regularizacion[0, 1:] # Sumamos gradiente y regularizacion excepto theta0, al que no se le aplica regularizacion
return np.ravel(gradiente) # Hacemos que gradiente sea un vector fila: 1 x n
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que aplica el algoritmo de descenso de gradiente para calcular el vector de biases más optimo.
-- lr: learning rate.
-- iter: numero de iteraciones.
------------------------------------------------------------------------------------------------------------------------
"""
def gradient_descent(self, lr, iter):
coste_anterior = self.calculo_coste() # Creamos la variable que contendra el coste de la ronda anterior
for i in range(iter):
theta = self.theta - lr * self._gradiente() # Calculamos la nueva theta
coste_actual = self.calculo_coste() # Calculamos el nuevo coste
if (coste_actual > coste_anterior): # Si el nuevo coste es mayor, paramos
break
else:
self.theta = theta # Actualizamos theta
coste_anterior = coste_actual # Actualizamos el coste de la ronda anterior como preparacion para la siguiente ronda
"""
------------------------------------------------------------------------------------------------------------------------
Funcion crea una gráfica a partir de los datos.
------------------------------------------------------------------------------------------------------------------------
"""
def plot_datos(self, titulo, xlabel, ylabel, markers, color_label):
fig, ax = plt.subplots()
for marker in np.unique(markers): # Para cada categoria hacemos un scatter
ax.scatter(np.ravel(self.X[1, :])[markers == marker], np.ravel(self.X[2, :])[markers == marker],
marker=marker,
color=color_label[marker]['color'], label=color_label[marker]['label'])
plt.title(titulo) # Titulo de la grafica
plt.xlabel(xlabel) # Leyenda de las x
plt.ylabel(ylabel) # Leyenda de las y
plt.legend() # Establecemos la leyenda
plt.show() # Visualizamos la grafica
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que crea una grafica de los datos y de la linea: decision boundary.
------------------------------------------------------------------------------------------------------------------------
"""
def plot_resultados(self, titulo, xlabel, ylabel, markers, color_label):
self.theta = self.theta.reshape((1, self.n)); # Hacemos que theta sea un vector fila: 1 x n
fig, ax = plt.subplots()
for marker in np.unique(markers):
ax.scatter(np.ravel(self.X[1, :])[markers == marker], np.ravel(self.X[2, :])[markers == marker], # Para cada categoría hacemos un scatter
marker=marker,
color=color_label[marker]['color'], label=color_label[marker]['label'])
plt.title(titulo) # Titulo de la grafica
plt.xlabel(xlabel) # Leyenda de las x
plt.ylabel(ylabel) # Leyenda de las y
if self.reg: # Si se aplica regularizacion
u = np.linspace(-1, 1.5, 50) # Creamos un vector de 50 elementos
v = np.linspace(-1, 1.5, 50) # Creamos un vector de 50 elementos
z = np.zeros((len(u), len(v))) # Inicializamos una matriz de 50 elementos a 0
for i in range(len(u)):
for j in range(len(v)):
tmp = np.array([u[i:i + 1], v[j:j + 1]])
tmp = self._map_feature(tmp) # Aplicamos la regularizacion a una matriz de ejemplo
z[i, j] = np.ravel(self.theta.dot(tmp))[0] # Evaluamos la matriz
z = z.T # Transpuesta
u, v = np.meshgrid(u, v)
cs = ax.contour(u, v, z, levels=[0]) # Contour de los datos calculados
cs.collections[0].set_label("Decision boundary") # Establecemos la leyenda de decision boundary
else: # Si no se aplica regularizacion
X_plot = np.array([np.min(self.X[1, :]) - 2, np.max(self.X[1, :]) + 2]) # Un vector del elemento minimo y maximo
y_plot = (-1 / self.theta[0, 2]) * (self.theta[0, 1] * X_plot + self.theta[0, 0]) # Evaluamos el vector
plt.plot(X_plot, y_plot, label="Decision boundary") # Creamos la linea de decision boundary
plt.legend()
plt.show() # Visualizamos la grafica
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que minimiza el coste, calculande el vector de biases más optimo.
------------------------------------------------------------------------------------------------------------------------
"""
def minimize(self):
initial_theta = np.zeros(self.n); # Inicializamos una theta inicial a cero
if not self.reg: # Si no se ha aplicado regularizacion
Result = op.minimize(fun=self._calculo_coste_min, # Funcion a minimizar
x0=initial_theta, # Primer argumento
args=(self.X, self.y), # Demas argumentos
method='TNC',
jac=self._gradiente_min);
else: # Si se aplica regularizacion
Result = op.minimize(fun=self._calculo_coste_min,
x0=initial_theta,
args=(self.X, self.y, self.reg_par), # Incluir el parametro de regularizacion como argumento
method='TNC',
jac=self._gradiente_min);
self.theta = Result.x; # Actualizamos theta
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que minimiza el coste, calculande el vector de biases más optimo utilizando la ecuacion normal.
------------------------------------------------------------------------------------------------------------------------
"""
def norm_ecuacion(self):
X = self.X.T # Es necesario que las features esten en columnas en lugar de en filas
y = self.y.T
if self.reg: # Si se ha aplicado regularizacion
m_reg = np.identity(self.n) # Creamos la matriz identidad
m_reg[0, 0] = 0 # El primer elemento de la matriz es cero
self.theta = np.linalg.inv(X.T.dot(X) + self.reg_par*m_reg).dot(X.T).dot(y) # Resolvemos la ecuacion
self.theta = self.theta.reshape((1, self.n)) # Hacemos que theta sea un vector fila: 1 x n
else:
self.theta = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y) # Resolvemos la ecuacion de la normal
self.theta = self.theta.reshape((1, self.n)) # Hacemos que theta sea un vector fila: 1 x n
"""
------------------------------------------------------------------------------------------------------------------------
Clase que aplica los algoritmos de Logistic Regression, es decir, algoritmos de clasificacion permitiendo
varias categorias.
-- X: matriz de terminos independientes.
-- y: matriz de terminos dependiente.
-- y_pred: matriz original.
-- n: numero de features, numero de filas.
-- m: numero de ejemplos, numero de columnas.
-- c: numero de categorias.
-- reg: boolean indica si se aplica regularizacion.
-- reg_par: parametro de regularizacion.
-- theta: matriz fila de biases.
------------------------------------------------------------------------------------------------------------------------
"""
class MultiLogisticRegression():
"""
------------------------------------------------------------------------------------------------------------------------
Funcion de iniciacion de la clase, en la cual se inicializan las variables propias de la clase.
-- axis: si 0 -> features en filas y ejemplos en columnas, si no viceversa.
------------------------------------------------------------------------------------------------------------------------
"""
def __init__(self, X, y, reg=False, axis=0, reg_par=None, categorias=None):
if axis == 0:
self.X = X # Inicializamos X
self.y_prec = y # Guardamos los targets originales
else: # Si X e y no está en el formato correcto
self.X = X.T # Inicializamos X
self.y_prec = y.T # Guardamos los targets originales
self.n, self.m = self.X.shape # Guardamos las dimensiones
self.y = np.reshape(y, (1, self.m))
self.y = np.array(pd.get_dummies(np.ravel(y))).T
self.c, self.m = self.y.shape
if reg: # Si se indica aplicar regularizacion
self.reg_par = reg_par # Guardar el parametro de parametrizacion
self.X = np.concatenate((np.matrix(np.ones(self.m)), self.X)) # Si no se quiere aplicar regularización se añaden 1
self.categorias = categorias # Guardar los nombres de las categorias
self.reg = reg # Se guarda si se quiere regularizar
self.n, self.m = self.X.shape # Obtenemos la nueva dimension de la matriz de datos
self.theta = np.matrix(np.zeros((self.c, self.n))) # Inicializamos los biases
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que evalua segun la funcion sigmoid los valores independientes de la matriz X.
------------------------------------------------------------------------------------------------------------------------
"""
def _sigmoid(self):
z = self.theta.dot(self.X) # Calculamos la entrada a la funcion sigmoid: theta*X
z = 1 / (1 + np.exp(-z)) # Funcion sigmoid: 1 / (1 + e^(-sum(theta*X)))
return z
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que evalua segun la funcion sigmoid los valores independientes de la matriz X, para la funcion de
minimización, que requiere la introducción de argumentos.
------------------------------------------------------------------------------------------------------------------------
"""
def _sigmoid_min(self, theta, X):
z = theta.dot(X) # Calculamos la entrada a la funcion sigmoid: theta*X
z = 1 / (1 + np.exp(-z)) # Funcion sigmoid: 1 / (1 + e^(-sum(theta*X)))
return z
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que calcula el coste tras evaluar la matriz de elementos independientes y el vector de biases.
------------------------------------------------------------------------------------------------------------------------
"""
def calculo_coste(self, indice_coste=None):
z = self._sigmoid().T # Calculamos la hipotesis: en este caso la funcion sigmoid: transpuesta para permitir la multiplicacion con y
sum = self.y.dot(np.log(z)) + (1 - self.y).dot(np.log(1 - z)) # Aplicacions la funcion de coste simplificada
sum = np.diagonal(sum)
if self.reg: # En caso de haber aplicado regularizacion
sum_reg = self.reg_par * np.sum(np.power(self.theta[0, 1:], 2)) / (2 * self.m) # Calculo de la regularizacion para evitar overfitting
sum = -sum / self.m + sum_reg # Devolvemos el sumatorio entre el número de muestras
else:
sum = -sum/self.m
sum = np.reshape(sum, (1, self.c)) # Obligamos que sum sea un vector con un elemento por cada categoria
if not indice_coste: # Si no se indica alguna categoria en concreto
return sum # Devolver el vector
else:
return sum[0, indice_coste] # Devolver un elemento
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que calcula el coste tras evaluar la matriz de elementos independientes y el vector de biases para la
funcion de minimizacion, ya que este requiere de argumentos.
------------------------------------------------------------------------------------------------------------------------
"""
def _calculo_coste_min(self, theta, X, y, reg_par=None):
theta = np.reshape(theta, (self.c, self.n))
z = self._sigmoid_min(theta, X).T # Calculamos la hipotesis: en este caso la funcion sigmoid: transpuesta para permitir la multiplicacion con y
sum = y.dot(np.log(z)) + (1 - y).dot(np.log(1 - z)) # Aplicacions la funcion de coste simplificada
sum = np.diagonal(sum) # Nos quedamos solo con la diagonal de la matriz
if self.reg: # En caso de haber aplicado regularizacion
sum_reg = reg_par * np.sum(np.power(theta[0, 1:], 2)) / (2 * self.m) # Calculo de la regularizacion para evitar overfitting
sum = -sum / self.m + sum_reg # Devolvemos el sumatorio entre el número de muestras
else:
sum = -sum/self.m
sum = np.reshape(sum, (1, self.c))
return np.sum(sum)
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que calcula el gradiente para aplicar el descenso.
------------------------------------------------------------------------------------------------------------------------
"""
def _gradiente(self):
h = self._sigmoid() # Aplicamos la funcion sigmoid
error = h - self.y # Calculamos el error
gradiente = (error * self.X.T) / self.m # Cada columna es el gradiente de una theta, variable distinta ya que se multiplica por X.T para sumatorio
if self.reg: # En caso de que se haya aplicado regularizacion
regularizacion = (self.reg_par / self.m) * self.theta # Calculamos la regularizacion de todas las variables independientes
gradiente[:, 1:] = gradiente[:, 1:] + regularizacion[:, 1:] # Sumamos gradiente y regularizacion excepto theta0, al que no se le aplica regularizacion
return gradiente
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que calcula el gradiente para aplicar el descenso para la minimizacion, que requiere de argumentos.
------------------------------------------------------------------------------------------------------------------------
"""
def _gradiente_min(self, theta, X, y, reg_par=None):
theta = np.reshape(theta, (self.c, self.n))
h = self._sigmoid_min(theta, X) # Aplicamos la funcion sigmoid
error = h - y # Calculamos el error
gradiente = (error * X.T) / self.m # Cada columna es el gradiente de una theta, variable distinta ya que se multiplica por X.T para sumatorio
if self.reg: # En caso de que se haya aplicado regularizacion
regularizacion = (self.reg_par / self.m) * theta # Calculamos la regularizacion de todas las variables independientes
gradiente[:, 1:] = gradiente[:, 1:] + regularizacion[:, 1:] # Sumamos gradiente y regularizacion excepto theta0, al que no se le aplica regularizacion
return gradiente
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que aplica el algoritmo de descenso de gradiente para calcular el vector de biases más optimo.
-- lr: learning rate.
-- iter: numero de iteraciones.
------------------------------------------------------------------------------------------------------------------------
"""
def gradient_descent(self, lr, iter):
for i in range(iter):
self.theta = self.theta - lr * self._gradiente() # Calculamos la nueva theta
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que minimiza el coste, calculande el vector de biases más optimo.
------------------------------------------------------------------------------------------------------------------------
"""
def minimize(self):
initial_theta = np.matrix(np.zeros((self.c, self.n))); # Inicializamos una theta inicial a cero
if not self.reg: # Si no se ha aplicado regularizacion
Result = op.minimize(fun=self._calculo_coste_min,
x0=initial_theta,
args=(self.X, self.y), # Incluir el parametro de regularizacion como argumento
method='TNC',
jac=self._gradiente_min);
else: # Si se aplica regularizacion
Result = op.minimize(fun=self._calculo_coste_min,
x0=initial_theta,
args=(self.X, self.y, self.reg_par), # Incluir el parametro de regularizacion como argumento
method='TNC',
jac=self._gradiente_min);
self.theta = Result.x; # Actualizamos theta
self.theta = np.reshape(self.theta, (self.c, self.n))
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que minimiza el coste, calculande el vector de biases más optimo utilizando la ecuacion normal.
------------------------------------------------------------------------------------------------------------------------
"""
def norm_ecuacion(self):
X = self.X.T # Es necesario que las features esten en columnas en lugar de en filas
y = self.y.T
det = np.linalg.det(X.T.dot(X)) # Calculamos el determinante de la matriz a invertir
if det > 0:
if self.reg: # Si se ha aplicado regularizacion
m_reg = np.identity(self.n) # Creamos la matriz identidad
m_reg[0, 0] = 0 # El primer elemento de la matriz es cero
self.theta = np.linalg.inv(X.T.dot(X) + self.reg_par*m_reg).dot(X.T).dot(y) # Resolvemos la ecuacion
else:
self.theta = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y) # Resolvemos la ecuacion de la normal
else:
print("Matriz no inversible")
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que obtiene la categoria a la que pertenece un determinado conjunto de datos: ejemplo.
------------------------------------------------------------------------------------------------------------------------
"""
def prediccion(self, X_test):
predicciones = self._sigmoid_min(self.theta, X_test) # Obtenemos la probabilidad de pertenecer a cada categoria
indice = np.argmax(predicciones) # Obtenemos la mayor probabilidad
if self.categorias: # Si se han definido probabilidades
return self.categorias[indice] # Devolvemos la categoria correspondiente
else:
return indice # Devolvemos el indice
"""
------------------------------------------------------------------------------------------------------------------------
Funcion que calcula la precision de nuestro modelo.
------------------------------------------------------------------------------------------------------------------------
"""
def precision(self):
predicciones = self._sigmoid_min(self.theta, self.X) # Obtenemos las predicciones hechas por todos los modelos
indices = np.argmax(predicciones, axis=0).T + 1 # Obtenemos la prediccion mas alta y le sumamos 1
igual = np.sum(indices == self.y_prec) # Comprobamos cuantas coinciden con el original
return igual / self.m # Devolvemos la precision: correctas / total
| 60.439815
| 201
| 0.365199
| 3,214
| 39,165
| 4.403236
| 0.111699
| 0.024802
| 0.018089
| 0.010599
| 0.777134
| 0.761094
| 0.746962
| 0.716577
| 0.711207
| 0.711207
| 0
| 0.006935
| 0.414579
| 39,165
| 647
| 202
| 60.53323
| 0.610302
| 0.20457
| 0
| 0.664179
| 0
| 0
| 0.006309
| 0
| 0
| 0
| 0
| 0.001546
| 0
| 1
| 0.093284
| false
| 0
| 0.014925
| 0
| 0.186567
| 0.007463
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
785b5853eb45b39d9e708aa64a45259b10dd1316
| 602
|
py
|
Python
|
tests/data/example_10.py
|
kataev/flake8-rst
|
53ee9906661b001a6aecc06ce09cf093ce6e82df
|
[
"MIT"
] | 18
|
2018-08-27T11:39:14.000Z
|
2021-12-10T08:48:29.000Z
|
tests/data/example_10.py
|
kataev/flake8-rst
|
53ee9906661b001a6aecc06ce09cf093ce6e82df
|
[
"MIT"
] | 18
|
2018-10-26T12:32:16.000Z
|
2021-11-17T06:01:34.000Z
|
tests/data/example_10.py
|
kataev/flake8-rst
|
53ee9906661b001a6aecc06ce09cf093ce6e82df
|
[
"MIT"
] | 7
|
2018-10-19T10:28:05.000Z
|
2021-04-09T15:44:16.000Z
|
"""
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia
deserunt mollit anim id est laborum.
>>> # extract 100 LDA topics, using default parameters
>>> lda = LdaModel(corpus=mm, id2word=id2word,
... num_topics=100, distributed=distribution_required)
Intermediate output
.. code-block::
>>> # extract 100 LDA topics, using default parameters
>>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)
Final output
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia
deserunt mollit anim id est laborum.
"""
| 33.444444
| 84
| 0.73588
| 77
| 602
| 5.714286
| 0.519481
| 0.059091
| 0.095455
| 0.136364
| 0.809091
| 0.809091
| 0.809091
| 0.622727
| 0.418182
| 0.418182
| 0
| 0.032193
| 0.174419
| 602
| 17
| 85
| 35.411765
| 0.853119
| 0.98505
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
785d61fedd8d5971f981ed81986e67f8c2e4e867
| 191
|
py
|
Python
|
wagtailmenus/conf/settings.py
|
cazgp/wagtailmenus
|
b0a6acb281227c93b3b4f11265366da0dada4248
|
[
"MIT"
] | 329
|
2016-01-28T16:20:16.000Z
|
2022-01-31T03:43:54.000Z
|
wagtailmenus/conf/settings.py
|
cazgp/wagtailmenus
|
b0a6acb281227c93b3b4f11265366da0dada4248
|
[
"MIT"
] | 337
|
2016-04-15T11:09:44.000Z
|
2022-01-31T10:01:32.000Z
|
wagtailmenus/conf/settings.py
|
cazgp/wagtailmenus
|
b0a6acb281227c93b3b4f11265366da0dada4248
|
[
"MIT"
] | 105
|
2016-06-17T15:45:07.000Z
|
2022-01-21T21:23:56.000Z
|
import sys
from cogwheels import BaseAppSettingsHelper
class WagtailmenusSettingsHelper(BaseAppSettingsHelper):
deprecations = ()
sys.modules[__name__] = WagtailmenusSettingsHelper()
| 19.1
| 56
| 0.82199
| 14
| 191
| 10.928571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115183
| 191
| 9
| 57
| 21.222222
| 0.905325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
788c2a743cdbdb5f6a07a4bd43d0b9627eef031e
| 58
|
py
|
Python
|
cartomancy/games/magnum_opus/__init__.py
|
joedaws/card-player
|
6e44bcc7c3e416fbd002c1d0216cf75e213a74c1
|
[
"MIT"
] | null | null | null |
cartomancy/games/magnum_opus/__init__.py
|
joedaws/card-player
|
6e44bcc7c3e416fbd002c1d0216cf75e213a74c1
|
[
"MIT"
] | null | null | null |
cartomancy/games/magnum_opus/__init__.py
|
joedaws/card-player
|
6e44bcc7c3e416fbd002c1d0216cf75e213a74c1
|
[
"MIT"
] | null | null | null |
from cartomancy.games.magnum_opus.alchemist_card import *
| 29
| 57
| 0.862069
| 8
| 58
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 58
| 1
| 58
| 58
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
78ce5fba823d4687da5d8342f54668bd3bd3c2db
| 77,058
|
py
|
Python
|
modules/chempy/champ/amber99.py
|
hryknkgw/pymolwin
|
4a1335e90497dbcbfa789f1285a7c1ad84a051f8
|
[
"CNRI-Python"
] | 2
|
2019-05-23T22:17:29.000Z
|
2020-07-03T14:36:22.000Z
|
modules/chempy/champ/amber99.py
|
hryknkgw/pymolwin
|
4a1335e90497dbcbfa789f1285a7c1ad84a051f8
|
[
"CNRI-Python"
] | null | null | null |
modules/chempy/champ/amber99.py
|
hryknkgw/pymolwin
|
4a1335e90497dbcbfa789f1285a7c1ad84a051f8
|
[
"CNRI-Python"
] | null | null | null |
amber99_dict = {
'NHE': [
(
'N<0>([H]<1>)([H]<2>)',
{
0: ('N' , 'N' , -0.4630, 1.8240),
1: ('HN1' , 'H' , 0.2315, 0.6000),
2: ('HN2' , 'H' , 0.2315, 0.6000),
},
),
],
'NME': [
(
'N<0>([H]<1>)[C@]<2>([H]<3>)([H]<4>)[H]<5>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CH3' , 'CT' , -0.1490, 1.9080),
3: ('HH31', 'HC' , 0.0976, 1.3870),
4: ('HH32', 'HC' , 0.0976, 1.3870),
5: ('HH33', 'HC' , 0.0976, 1.3870),
},
),
],
'ACE': [
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([H]<4>)[H]<5>',
{
0: ('C' , 'C' , 0.5972, 1.9080),
1: ('O' , 'O' , -0.5679, 1.6612),
2: ('CH3' , 'CT' , -0.3662, 1.9080),
3: ('HH31', 'HC' , 0.1123, 1.4870),
4: ('HH32', 'HC' , 0.1123, 1.4870),
5: ('HH33', 'HC' , 0.1123, 1.4870),
},
),
],
'ALA': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@]<4>([H]<5>)([H]<6>)[H]<7>)C<8>=O<9>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , 0.0337, 1.9080),
3: ('HA' , 'H1' , 0.0823, 1.3870),
4: ('CB' , 'CT' , -0.1825, 1.9080),
5: ('HB3' , 'HC' , 0.0603, 1.4870),
6: ('HB2' , 'HC' , 0.0603, 1.4870),
7: ('HB1' , 'HC' , 0.0603, 1.4870),
8: ('C' , 'C' , 0.5973, 1.9080),
9: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@]<4>([H]<5>)([H]<6>)[H]<7>)[N@+]<8>([H]<9>)([H]<10>)[H]<11>',
{
0: ('C' , 'C' , 0.6163, 1.9080),
1: ('O' , 'O' , -0.5722, 1.6612),
2: ('CA' , 'CT' , 0.0962, 1.9080),
3: ('HA' , 'HP' , 0.0889, 1.1000),
4: ('CB' , 'CT' , -0.0597, 1.9080),
5: ('HB3' , 'HC' , 0.0300, 1.4870),
6: ('HB2' , 'HC' , 0.0300, 1.4870),
7: ('HB1' , 'HC' , 0.0300, 1.4870),
8: ('N' , 'N3' , 0.1414, 1.8240),
9: ('H3' , 'H' , 0.1997, 0.6000),
10: ('H2' , 'H' , 0.1997, 0.6000),
11: ('H1' , 'H' , 0.1997, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@]<4>([H]<5>)([H]<6>)[H]<7>)C<8>([O-]<9>)=O<10>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.1747, 1.9080),
3: ('HA' , 'H1' , 0.1067, 1.3870),
4: ('CB' , 'CT' , -0.2093, 1.9080),
5: ('HB3' , 'HC' , 0.0764, 1.4870),
6: ('HB2' , 'HC' , 0.0764, 1.4870),
7: ('HB1' , 'HC' , 0.0764, 1.4870),
8: ('C' , 'C' , 0.7731, 1.9080),
9: ('OXT' , 'O2' , -0.8055, 1.6612),
10: ('O' , 'O2' , -0.8055, 1.6612),
},
),
],
'ARG': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)[C@@]<10>([H]<11>)([H]<12>)N<13>([H]<14>)C<15>(N<16>([H]<17>)[H]<18>)=[N+]<19>([H]<20>)[H]<21>)C<22>=O<23>',
{
0: ('N' , 'N' , -0.3479, 1.8240),
1: ('H' , 'H' , 0.2747, 0.6000),
2: ('CA' , 'CT' , -0.2637, 1.9080),
3: ('HA' , 'H1' , 0.1560, 1.3870),
4: ('CB' , 'CT' , -0.0007, 1.9080),
5: ('HB3' , 'HC' , 0.0327, 1.4870),
6: ('HB2' , 'HC' , 0.0327, 1.4870),
7: ('CG' , 'CT' , 0.0390, 1.9080),
8: ('HG3' , 'HC' , 0.0285, 1.4870),
9: ('HG2' , 'HC' , 0.0285, 1.4870),
10: ('CD' , 'CT' , 0.0486, 1.9080),
11: ('HD3' , 'H1' , 0.0687, 1.3870),
12: ('HD2' , 'H1' , 0.0687, 1.3870),
13: ('NE' , 'N2' , -0.5295, 1.8240),
14: ('HE' , 'H' , 0.3456, 0.6000),
15: ('CZ' , 'CA' , 0.8076, 1.9080),
16: ('NH2' , 'N2' , -0.8627, 1.8240),
17: ('HH22', 'H' , 0.4478, 0.6000),
18: ('HH21', 'H' , 0.4478, 0.6000),
19: ('NH1' , 'N2' , -0.8627, 1.8240),
20: ('HH12', 'H' , 0.4478, 0.6000),
21: ('HH11', 'H' , 0.4478, 0.6000),
22: ('C' , 'C' , 0.7341, 1.9080),
23: ('O' , 'O' , -0.5894, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)[C@@]<10>([H]<11>)([H]<12>)N<13>([H]<14>)C<15>(N<16>([H]<17>)[H]<18>)=[N+]<19>([H]<20>)[H]<21>)[N@+]<22>([H]<23>)([H]<24>)[H]<25>',
{
0: ('C' , 'C' , 0.7214, 1.9080),
1: ('O' , 'O' , -0.6013, 1.6612),
2: ('CA' , 'CT' , -0.0223, 1.9080),
3: ('HA' , 'HP' , 0.1242, 1.1000),
4: ('CB' , 'CT' , 0.0118, 1.9080),
5: ('HB3' , 'HC' , 0.0226, 1.4870),
6: ('HB2' , 'HC' , 0.0226, 1.4870),
7: ('CG' , 'CT' , 0.0236, 1.9080),
8: ('HG3' , 'HC' , 0.0309, 1.4870),
9: ('HG2' , 'HC' , 0.0309, 1.4870),
10: ('CD' , 'CT' , 0.0935, 1.9080),
11: ('HD3' , 'H1' , 0.0527, 1.3870),
12: ('HD2' , 'H1' , 0.0527, 1.3870),
13: ('NE' , 'N2' , -0.5650, 1.8240),
14: ('HE' , 'H' , 0.3592, 0.6000),
15: ('CZ' , 'CA' , 0.8281, 1.9080),
16: ('NH2' , 'N2' , -0.8693, 1.8240),
17: ('HH22', 'H' , 0.4494, 0.6000),
18: ('HH21', 'H' , 0.4494, 0.6000),
19: ('NH1' , 'N2' , -0.8693, 1.8240),
20: ('HH12', 'H' , 0.4494, 0.6000),
21: ('HH11', 'H' , 0.4494, 0.6000),
22: ('N' , 'N3' , 0.1305, 1.8240),
23: ('H3' , 'H' , 0.2083, 0.6000),
24: ('H2' , 'H' , 0.2083, 0.6000),
25: ('H1' , 'H' , 0.2083, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)[C@@]<10>([H]<11>)([H]<12>)N<13>([H]<14>)C<15>(N<16>([H]<17>)[H]<18>)=[N+]<19>([H]<20>)[H]<21>)C<22>([O-]<23>)=O<24>',
{
0: ('N' , 'N' , -0.3481, 1.8240),
1: ('H' , 'H' , 0.2764, 0.6000),
2: ('CA' , 'CT' , -0.3068, 1.9080),
3: ('HA' , 'H1' , 0.1447, 1.3870),
4: ('CB' , 'CT' , -0.0374, 1.9080),
5: ('HB3' , 'HC' , 0.0371, 1.4870),
6: ('HB2' , 'HC' , 0.0371, 1.4870),
7: ('CG' , 'CT' , 0.0744, 1.9080),
8: ('HG3' , 'HC' , 0.0185, 1.4870),
9: ('HG2' , 'HC' , 0.0185, 1.4870),
10: ('CD' , 'CT' , 0.1114, 1.9080),
11: ('HD3' , 'H1' , 0.0468, 1.3870),
12: ('HD2' , 'H1' , 0.0468, 1.3870),
13: ('NE' , 'N2' , -0.5564, 1.8240),
14: ('HE' , 'H' , 0.3479, 0.6000),
15: ('CZ' , 'CA' , 0.8368, 1.9080),
16: ('NH2' , 'N2' , -0.8737, 1.8240),
17: ('HH22', 'H' , 0.4493, 0.6000),
18: ('HH21', 'H' , 0.4493, 0.6000),
19: ('NH1' , 'N2' , -0.8737, 1.8240),
20: ('HH12', 'H' , 0.4493, 0.6000),
21: ('HH11', 'H' , 0.4493, 0.6000),
22: ('C' , 'C' , 0.8557, 1.9080),
23: ('OXT' , 'O2' , -0.8266, 1.6612),
24: ('O' , 'O2' , -0.8266, 1.6612),
},
),
],
'ASP': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>([O-]<8>)=O<9>)C<10>=O<11>',
{
0: ('N' , 'N' , -0.5163, 1.8240),
1: ('H' , 'H' , 0.2936, 0.6000),
2: ('CA' , 'CT' , 0.0381, 1.9080),
3: ('HA' , 'H1' , 0.0880, 1.3870),
4: ('CB' , 'CT' , -0.0303, 1.9080),
5: ('HB3' , 'HC' , -0.0122, 1.4870),
6: ('HB2' , 'HC' , -0.0122, 1.4870),
7: ('CG' , 'C' , 0.7994, 1.9080),
8: ('OD2' , 'O2' , -0.8014, 1.6612),
9: ('OD1' , 'O2' , -0.8014, 1.6612),
10: ('C' , 'C' , 0.5366, 1.9080),
11: ('O' , 'O' , -0.5819, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>([O-]<8>)=O<9>)[N@+]<10>([H]<11>)([H]<12>)[H]<13>',
{
0: ('C' , 'C' , 0.5621, 1.9080),
1: ('O' , 'O' , -0.5889, 1.6612),
2: ('CA' , 'CT' , 0.0292, 1.9080),
3: ('HA' , 'HP' , 0.1141, 1.1000),
4: ('CB' , 'CT' , -0.0235, 1.9080),
5: ('HB3' , 'HC' , -0.0169, 1.4870),
6: ('HB2' , 'HC' , -0.0169, 1.4870),
7: ('CG' , 'C' , 0.8194, 1.9080),
8: ('OD2' , 'O2' , -0.8084, 1.6612),
9: ('OD1' , 'O2' , -0.8084, 1.6612),
10: ('N' , 'N3' , 0.0782, 1.8240),
11: ('H3' , 'H' , 0.2200, 0.6000),
12: ('H2' , 'H' , 0.2200, 0.6000),
13: ('H1' , 'H' , 0.2200, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>([O-]<8>)=O<9>)C<10>([O-]<11>)=O<12>',
{
0: ('N' , 'N' , -0.5192, 1.8240),
1: ('H' , 'H' , 0.3055, 0.6000),
2: ('CA' , 'CT' , -0.1817, 1.9080),
3: ('HA' , 'H1' , 0.1046, 1.3870),
4: ('CB' , 'CT' , -0.0677, 1.9080),
5: ('HB3' , 'HC' , -0.0212, 1.4870),
6: ('HB2' , 'HC' , -0.0212, 1.4870),
7: ('CG' , 'C' , 0.8851, 1.9080),
8: ('OD2' , 'O2' , -0.8162, 1.6612),
9: ('OD1' , 'O2' , -0.8162, 1.6612),
10: ('C' , 'C' , 0.7256, 1.9080),
11: ('OXT' , 'O2' , -0.7887, 1.6612),
12: ('O' , 'O2' , -0.7887, 1.6612),
},
),
],
'ASN': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>(=O<8>)N<9>([H]<10>)[H]<11>)C<12>=O<13>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , 0.0143, 1.9080),
3: ('HA' , 'H1' , 0.1048, 1.3870),
4: ('CB' , 'CT' , -0.2041, 1.9080),
5: ('HB3' , 'HC' , 0.0797, 1.4870),
6: ('HB2' , 'HC' , 0.0797, 1.4870),
7: ('CG' , 'C' , 0.7130, 1.9080),
8: ('OD1' , 'O' , -0.5931, 1.6612),
9: ('ND2' , 'N' , -0.9191, 1.8240),
10: ('HD22', 'H' , 0.4196, 0.6000),
11: ('HD21', 'H' , 0.4196, 0.6000),
12: ('C' , 'C' , 0.5973, 1.9080),
13: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>(=O<8>)N<9>([H]<10>)[H]<11>)[N@+]<12>([H]<13>)([H]<14>)[H]<15>',
{
0: ('C' , 'C' , 0.6163, 1.9080),
1: ('O' , 'O' , -0.5722, 1.6612),
2: ('CA' , 'CT' , 0.0368, 1.9080),
3: ('HA' , 'HP' , 0.1231, 1.1000),
4: ('CB' , 'CT' , -0.0283, 1.9080),
5: ('HB3' , 'HC' , 0.0515, 1.4870),
6: ('HB2' , 'HC' , 0.0515, 1.4870),
7: ('CG' , 'C' , 0.5833, 1.9080),
8: ('OD1' , 'O' , -0.5744, 1.6612),
9: ('ND2' , 'N' , -0.8634, 1.8240),
10: ('HD22', 'H' , 0.4097, 0.6000),
11: ('HD21', 'H' , 0.4097, 0.6000),
12: ('N' , 'N3' , 0.1801, 1.8240),
13: ('H3' , 'H' , 0.1921, 0.6000),
14: ('H2' , 'H' , 0.1921, 0.6000),
15: ('H1' , 'H' , 0.1921, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>(=O<8>)N<9>([H]<10>)[H]<11>)C<12>([O-]<13>)=O<14>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.2080, 1.9080),
3: ('HA' , 'H1' , 0.1358, 1.3870),
4: ('CB' , 'CT' , -0.2299, 1.9080),
5: ('HB3' , 'HC' , 0.1023, 1.4870),
6: ('HB2' , 'HC' , 0.1023, 1.4870),
7: ('CG' , 'C' , 0.7153, 1.9080),
8: ('OD1' , 'O' , -0.6010, 1.6612),
9: ('ND2' , 'N' , -0.9084, 1.8240),
10: ('HD22', 'H' , 0.4150, 0.6000),
11: ('HD21', 'H' , 0.4150, 0.6000),
12: ('C' , 'C' , 0.8050, 1.9080),
13: ('OXT' , 'O2' , -0.8147, 1.6612),
14: ('O' , 'O2' , -0.8147, 1.6612),
},
),
],
'CYS': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)S<7>[H]<8>)C<9>=O<10>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , 0.0213, 1.9080),
3: ('HA' , 'H1' , 0.1124, 1.3870),
4: ('CB' , 'CT' , -0.1231, 1.9080),
5: ('HB3' , 'H1' , 0.1112, 1.3870),
6: ('HB2' , 'H1' , 0.1112, 1.3870),
7: ('SG' , 'SH' , -0.3119, 2.0000),
8: ('HG' , 'HS' , 0.1933, 0.6000),
9: ('C' , 'C' , 0.5973, 1.9080),
10: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)S<7>[H]<8>)[N@+]<9>([H]<10>)([H]<11>)[H]<12>',
{
0: ('C' , 'C' , 0.6123, 1.9080),
1: ('O' , 'O' , -0.5713, 1.6612),
2: ('CA' , 'CT' , 0.0927, 1.9080),
3: ('HA' , 'HP' , 0.1411, 1.1000),
4: ('CB' , 'CT' , -0.1195, 1.9080),
5: ('HB3' , 'H1' , 0.1188, 1.3870),
6: ('HB2' , 'H1' , 0.1188, 1.3870),
7: ('SG' , 'SH' , -0.3298, 2.0000),
8: ('HG' , 'HS' , 0.1975, 0.6000),
9: ('N' , 'N3' , 0.1325, 1.8240),
10: ('H3' , 'H' , 0.2023, 0.6000),
11: ('H2' , 'H' , 0.2023, 0.6000),
12: ('H1' , 'H' , 0.2023, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)S<7>[H]<8>)C<9>([O-]<10>)=O<11>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.1635, 1.9080),
3: ('HA' , 'H1' , 0.1396, 1.3870),
4: ('CB' , 'CT' , -0.1996, 1.9080),
5: ('HB3' , 'H1' , 0.1437, 1.3870),
6: ('HB2' , 'H1' , 0.1437, 1.3870),
7: ('SG' , 'SH' , -0.3102, 2.0000),
8: ('HG' , 'HS' , 0.2068, 0.6000),
9: ('C' , 'C' , 0.7497, 1.9080),
10: ('OXT' , 'O2' , -0.7981, 1.6612),
11: ('O' , 'O2' , -0.7981, 1.6612),
},
),
( # disulfide bonded
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)S<7>S<7>)C<9>=O<10>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , 0.0429, 1.9080),
3: ('HA' , 'H1' , 0.0766, 1.3870),
4: ('CB' , 'CT' , -0.0790, 1.9080),
5: ('HB3' , 'H1' , 0.0910, 1.3870),
6: ('HB2' , 'H1' , 0.0910, 1.3870),
7: ('SG' , 'S' , -0.1081, 2.0000),
9: ('C' , 'C' , 0.5973, 1.9080),
10: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)S<7>S<7>)[N@+]<9>([H]<10>)([H]<11>)[H]<12>',
{
0: ('C' , 'C' , 0.6123, 1.9080),
1: ('O' , 'O' , -0.5713, 1.6612),
2: ('CA' , 'CT' , 0.1055, 1.9080),
3: ('HA' , 'HP' , 0.0922, 1.1000),
4: ('CB' , 'CT' , -0.0277, 1.9080),
5: ('HB3' , 'H1' , 0.0680, 1.3870),
6: ('HB2' , 'H1' , 0.0680, 1.3870),
7: ('SG' , 'S' , -0.0984, 2.0000),
9: ('N' , 'N3' , 0.2069, 1.8240),
10: ('H3' , 'H' , 0.1815, 0.6000),
11: ('H2' , 'H' , 0.1815, 0.6000),
12: ('H1' , 'H' , 0.1815, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)S<7>S<7>)C<9>([O-]<10>)=O<11>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.1318, 1.9080),
3: ('HA' , 'H1' , 0.0938, 1.3870),
4: ('CB' , 'CT' , -0.1934, 1.9080),
5: ('HB3' , 'H1' , 0.1228, 1.3870),
6: ('HB2' , 'H1' , 0.1228, 1.3870),
7: ('SG' , 'S' , -0.0529, 2.0000),
9: ('C' , 'C' , 0.7618, 1.9080),
10: ('OXT' , 'O2' , -0.8041, 1.6612),
11: ('O' , 'O2' , -0.8041, 1.6612),
},
),
],
'CYX': [
( # disulfide bonded
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)S<7>S<7>)C<9>=O<10>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , 0.0429, 1.9080),
3: ('HA' , 'H1' , 0.0766, 1.3870),
4: ('CB' , 'CT' , -0.0790, 1.9080),
5: ('HB3' , 'H1' , 0.0910, 1.3870),
6: ('HB2' , 'H1' , 0.0910, 1.3870),
7: ('SG' , 'S' , -0.1081, 2.0000),
9: ('C' , 'C' , 0.5973, 1.9080),
10: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)S<7>S<7>)[N@+]<9>([H]<10>)([H]<11>)[H]<12>',
{
0: ('C' , 'C' , 0.6123, 1.9080),
1: ('O' , 'O' , -0.5713, 1.6612),
2: ('CA' , 'CT' , 0.1055, 1.9080),
3: ('HA' , 'HP' , 0.0922, 1.1000),
4: ('CB' , 'CT' , -0.0277, 1.9080),
5: ('HB3' , 'H1' , 0.0680, 1.3870),
6: ('HB2' , 'H1' , 0.0680, 1.3870),
7: ('SG' , 'S' , -0.0984, 2.0000),
9: ('N' , 'N3' , 0.2069, 1.8240),
10: ('H3' , 'H' , 0.1815, 0.6000),
11: ('H2' , 'H' , 0.1815, 0.6000),
12: ('H1' , 'H' , 0.1815, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)S<7>S<7>)C<9>([O-]<10>)=O<11>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.1318, 1.9080),
3: ('HA' , 'H1' , 0.0938, 1.3870),
4: ('CB' , 'CT' , -0.1934, 1.9080),
5: ('HB3' , 'H1' , 0.1228, 1.3870),
6: ('HB2' , 'H1' , 0.1228, 1.3870),
7: ('SG' , 'S' , -0.0529, 2.0000),
9: ('C' , 'C' , 0.7618, 1.9080),
10: ('OXT' , 'O2' , -0.8041, 1.6612),
11: ('O' , 'O2' , -0.8041, 1.6612),
},
),
],
'GLN': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)C<10>(=O<11>)N<12>([H]<13>)[H]<14>)C<15>=O<16>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , -0.0031, 1.9080),
3: ('HA' , 'H1' , 0.0850, 1.3870),
4: ('CB' , 'CT' , -0.0036, 1.9080),
5: ('HB3' , 'HC' , 0.0171, 1.4870),
6: ('HB2' , 'HC' , 0.0171, 1.4870),
7: ('CG' , 'CT' , -0.0645, 1.9080),
8: ('HG3' , 'HC' , 0.0352, 1.4870),
9: ('HG2' , 'HC' , 0.0352, 1.4870),
10: ('CD' , 'C' , 0.6951, 1.9080),
11: ('OE1' , 'O' , -0.6086, 1.6612),
12: ('NE2' , 'N' , -0.9407, 1.8240),
13: ('HE22', 'H' , 0.4251, 0.6000),
14: ('HE21', 'H' , 0.4251, 0.6000),
15: ('C' , 'C' , 0.5973, 1.9080),
16: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)C<10>(=O<11>)N<12>([H]<13>)[H]<14>)[N@+]<15>([H]<16>)([H]<17>)[H]<18>',
{
0: ('C' , 'C' , 0.6123, 1.9080),
1: ('O' , 'O' , -0.5713, 1.6612),
2: ('CA' , 'CT' , 0.0536, 1.9080),
3: ('HA' , 'HP' , 0.1015, 1.1000),
4: ('CB' , 'CT' , 0.0651, 1.9080),
5: ('HB3' , 'HC' , 0.0050, 1.4870),
6: ('HB2' , 'HC' , 0.0050, 1.4870),
7: ('CG' , 'CT' , -0.0903, 1.9080),
8: ('HG3' , 'HC' , 0.0331, 1.4870),
9: ('HG2' , 'HC' , 0.0331, 1.4870),
10: ('CD' , 'C' , 0.7354, 1.9080),
11: ('OE1' , 'O' , -0.6133, 1.6612),
12: ('NE2' , 'N' , -1.0031, 1.8240),
13: ('HE22', 'H' , 0.4429, 0.6000),
14: ('HE21', 'H' , 0.4429, 0.6000),
15: ('N' , 'N3' , 0.1493, 1.8240),
16: ('H3' , 'H' , 0.1996, 0.6000),
17: ('H2' , 'H' , 0.1996, 0.6000),
18: ('H1' , 'H' , 0.1996, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)C<10>(=O<11>)N<12>([H]<13>)[H]<14>)C<15>([O-]<16>)=O<17>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.2248, 1.9080),
3: ('HA' , 'H1' , 0.1232, 1.3870),
4: ('CB' , 'CT' , -0.0664, 1.9080),
5: ('HB3' , 'HC' , 0.0452, 1.4870),
6: ('HB2' , 'HC' , 0.0452, 1.4870),
7: ('CG' , 'CT' , -0.0210, 1.9080),
8: ('HG3' , 'HC' , 0.0203, 1.4870),
9: ('HG2' , 'HC' , 0.0203, 1.4870),
10: ('CD' , 'C' , 0.7093, 1.9080),
11: ('OE1' , 'O' , -0.6098, 1.6612),
12: ('NE2' , 'N' , -0.9574, 1.8240),
13: ('HE22', 'H' , 0.4304, 0.6000),
14: ('HE21', 'H' , 0.4304, 0.6000),
15: ('C' , 'C' , 0.7775, 1.9080),
16: ('OXT' , 'O2' , -0.8042, 1.6612),
17: ('O' , 'O2' , -0.8042, 1.6612),
},
),
],
'GLU': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)C<10>([O-]<11>)=O<12>)C<13>=O<14>',
{
0: ('N' , 'N' , -0.5163, 1.8240),
1: ('H' , 'H' , 0.2936, 0.6000),
2: ('CA' , 'CT' , 0.0397, 1.9080),
3: ('HA' , 'H1' , 0.1105, 1.3870),
4: ('CB' , 'CT' , 0.0560, 1.9080),
5: ('HB3' , 'HC' , -0.0173, 1.4870),
6: ('HB2' , 'HC' , -0.0173, 1.4870),
7: ('CG' , 'CT' , 0.0136, 1.9080),
8: ('HG3' , 'HC' , -0.0425, 1.4870),
9: ('HG2' , 'HC' , -0.0425, 1.4870),
10: ('CD' , 'C' , 0.8054, 1.9080),
11: ('OE2' , 'O2' , -0.8188, 1.6612),
12: ('OE1' , 'O2' , -0.8188, 1.6612),
13: ('C' , 'C' , 0.5366, 1.9080),
14: ('O' , 'O' , -0.5819, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)C<10>([O-]<11>)=O<12>)[N@+]<13>([H]<14>)([H]<15>)[H]<16>',
{
0: ('C' , 'C' , 0.5621, 1.9080),
1: ('O' , 'O' , -0.5889, 1.6612),
2: ('CA' , 'CT' , 0.0588, 1.9080),
3: ('HA' , 'HP' , 0.1202, 1.1000),
4: ('CB' , 'CT' , 0.0909, 1.9080),
5: ('HB3' , 'HC' , -0.0232, 1.4870),
6: ('HB2' , 'HC' , -0.0232, 1.4870),
7: ('CG' , 'CT' , -0.0236, 1.9080),
8: ('HG3' , 'HC' , -0.0315, 1.4870),
9: ('HG2' , 'HC' , -0.0315, 1.4870),
10: ('CD' , 'C' , 0.8087, 1.9080),
11: ('OE2' , 'O2' , -0.8189, 1.6612),
12: ('OE1' , 'O2' , -0.8189, 1.6612),
13: ('N' , 'N3' , 0.0017, 1.8240),
14: ('H3' , 'H' , 0.2391, 0.6000),
15: ('H2' , 'H' , 0.2391, 0.6000),
16: ('H1' , 'H' , 0.2391, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)C<10>([O-]<11>)=O<12>)C<13>([O-]<14>)=O<15>',
{
0: ('N' , 'N' , -0.5192, 1.8240),
1: ('H' , 'H' , 0.3055, 0.6000),
2: ('CA' , 'CT' , -0.2059, 1.9080),
3: ('HA' , 'H1' , 0.1399, 1.3870),
4: ('CB' , 'CT' , 0.0071, 1.9080),
5: ('HB3' , 'HC' , -0.0078, 1.4870),
6: ('HB2' , 'HC' , -0.0078, 1.4870),
7: ('CG' , 'CT' , 0.0675, 1.9080),
8: ('HG3' , 'HC' , -0.0548, 1.4870),
9: ('HG2' , 'HC' , -0.0548, 1.4870),
10: ('CD' , 'C' , 0.8183, 1.9080),
11: ('OE2' , 'O2' , -0.8220, 1.6612),
12: ('OE1' , 'O2' , -0.8220, 1.6612),
13: ('C' , 'C' , 0.7420, 1.9080),
14: ('OXT' , 'O2' , -0.7930, 1.6612),
15: ('O' , 'O2' , -0.7930, 1.6612),
},
),
],
'GLY': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([H]<4>)C<5>=O<6>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , -0.0252, 1.9080),
3: ('HA' , 'H1' , 0.0698, 1.3870),
4: ('HA3' , 'H1' , 0.0698, 1.3870),
5: ('C' , 'C' , 0.5973, 1.9080),
6: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([H]<4>)[N@+]<5>([H]<6>)([H]<7>)[H]<8>',
{
0: ('C' , 'C' , 0.6163, 1.9080),
1: ('O' , 'O' , -0.5722, 1.6612),
2: ('CA' , 'CT' , -0.0100, 1.9080),
3: ('HA' , 'H1' , 0.0895, 1.1000),
4: ('HA3' , 'H1' , 0.0895, 1.1000),
5: ('N' , 'N' , 0.2943, 1.8240),
6: ('H2' , 'H' , 0.1642, 0.6000),
7: ('H1' , 'H' , 0.1642, 0.6000),
8: ('H3' , 'H' , 0.1642, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([H]<4>)C<5>([O-]<6>)=O<7>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.2493, 1.9080),
3: ('HA' , 'H1' , 0.1056, 1.3870),
4: ('HA3' , 'H1' , 0.1056, 1.3870),
5: ('C' , 'C' , 0.7231, 1.9080),
6: ('OXT' , 'O2' , -0.7855, 1.6612),
7: ('O' , 'O' , -0.7855, 1.6612),
},
),
],
'HIS': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1N<8>=C<9>([H]<10>)N<11>([H]<12>)C<13>=1[H]<14>)C<15>=O<16>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , -0.0581, 1.9080),
3: ('HA' , 'H1' , 0.1360, 1.3870),
4: ('CB' , 'CT' , -0.0074, 1.9080),
5: ('HB3' , 'HC' , 0.0367, 1.4870),
6: ('HB2' , 'HC' , 0.0367, 1.4870),
7: ('CG' , 'CC' , 0.1868, 1.9080),
8: ('ND1' , 'NB' , -0.5432, 1.8240),
9: ('CE1' , 'CR' , 0.1635, 1.9080),
10: ('HE1' , 'H5' , 0.1435, 1.3590),
11: ('NE2' , 'NA' , -0.2795, 1.8240),
12: ('HE2' , 'H' , 0.3339, 0.6000),
13: ('CD2' , 'CW' , -0.2207, 1.9080),
14: ('HD2' , 'H4' , 0.1862, 1.4090),
15: ('C' , 'C' , 0.5973, 1.9080),
16: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1N<8>=C<9>([H]<10>)N<11>([H]<12>)C<13>=1[H]<14>)[N@+]<15>([H]<16>)([H]<17>)[H]<18>',
{
0: ('C' , 'C' , 0.6123, 1.9080),
1: ('O' , 'O' , -0.5713, 1.6612),
2: ('CA' , 'CT' , 0.0236, 1.9080),
3: ('HA' , 'HP' , 0.1380, 1.1000),
4: ('CB' , 'CT' , 0.0489, 1.9080),
5: ('HB3' , 'HC' , 0.0223, 1.4870),
6: ('HB2' , 'HC' , 0.0223, 1.4870),
7: ('CG' , 'CC' , 0.1740, 1.9080),
8: ('ND1' , 'NB' , -0.5579, 1.8240),
9: ('CE1' , 'CR' , 0.1804, 1.9080),
10: ('HE1' , 'H5' , 0.1397, 1.3590),
11: ('NE2' , 'NA' , -0.2781, 1.8240),
12: ('HE2' , 'H' , 0.3324, 0.6000),
13: ('CD2' , 'CW' , -0.2349, 1.9080),
14: ('HD2' , 'H4' , 0.1963, 1.4090),
15: ('N' , 'N3' , 0.1472, 1.8240),
16: ('H3' , 'H' , 0.2016, 0.6000),
17: ('H2' , 'H' , 0.2016, 0.6000),
18: ('H1' , 'H' , 0.2016, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1N<8>=C<9>([H]<10>)N<11>([H]<12>)C<13>=1[H]<14>)C<15>([O-]<16>)=O<17>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.2699, 1.9080),
3: ('HA' , 'H1' , 0.1650, 1.3870),
4: ('CB' , 'CT' , -0.1068, 1.9080),
5: ('HB3' , 'HC' , 0.0620, 1.4870),
6: ('HB2' , 'HC' , 0.0620, 1.4870),
7: ('CG' , 'CC' , 0.2724, 1.9080),
8: ('ND1' , 'NB' , -0.5517, 1.8240),
9: ('CE1' , 'CR' , 0.1558, 1.9080),
10: ('HE1' , 'H5' , 0.1448, 1.3590),
11: ('NE2' , 'NA' , -0.2670, 1.8240),
12: ('HE2' , 'H' , 0.3319, 0.6000),
13: ('CD2' , 'CW' , -0.2588, 1.9080),
14: ('HD2' , 'H4' , 0.1957, 1.4090),
15: ('C' , 'C' , 0.7916, 1.9080),
16: ('OXT' , 'O2' , -0.8065, 1.6612),
17: ('O' , 'O2' , -0.8065, 1.6612),
},
),
],
'HIP': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1[N+]<8>([H]<9>)=C<10>([H]<11>)N<12>([H]<13>)C<14>=1[H]<15>)C<16>=O<17>',
{
0: ('N' , 'N' , -0.3479, 1.8240),
1: ('H' , 'H' , 0.2747, 0.6000),
2: ('CA' , 'CT' , -0.1354, 1.9080),
3: ('HA' , 'H1' , 0.1212, 1.3870),
4: ('CB' , 'CT' , -0.0414, 1.9080),
5: ('HB3' , 'HC' , 0.0810, 1.4870),
6: ('HB2' , 'HC' , 0.0810, 1.4870),
7: ('CG' , 'CC' , -0.0012, 1.9080),
8: ('ND1' , 'NA' , -0.1513, 1.8240),
9: ('HD1' , 'H' , 0.3866, 0.6000),
10: ('CE1' , 'CR' , -0.0170, 1.9080),
11: ('HE1' , 'H5' , 0.2681, 1.3590),
12: ('NE2' , 'NA' , -0.1718, 1.8240),
13: ('HE2' , 'H' , 0.3911, 0.6000),
14: ('CD2' , 'CW' , -0.1141, 1.9080),
15: ('HD2' , 'H4' , 0.2317, 1.4090),
16: ('C' , 'C' , 0.7341, 1.9080),
17: ('O' , 'O' , -0.5894, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1[N+]<8>([H]<9>)=C<10>([H]<11>)N<12>([H]<13>)C<14>=1[H]<15>)[N@+]<16>([H]<17>)([H]<18>)[H]<19>',
{
0: ('C' , 'C' , 0.7214, 1.9080),
1: ('O' , 'O' , -0.6013, 1.6612),
2: ('CA' , 'CT' , 0.0581, 1.9080),
3: ('HA' , 'HP' , 0.1047, 1.1000),
4: ('CB' , 'CT' , 0.0484, 1.9080),
5: ('HB3' , 'HC' , 0.0531, 1.4870),
6: ('HB2' , 'HC' , 0.0531, 1.4870),
7: ('CG' , 'CC' , -0.0236, 1.9080),
8: ('ND1' , 'NA' , -0.1510, 1.8240),
9: ('HD1' , 'H' , 0.3821, 0.6000),
10: ('CE1' , 'CR' , -0.0011, 1.9080),
11: ('HE1' , 'H5' , 0.2645, 1.3590),
12: ('NE2' , 'NA' , -0.1739, 1.8240),
13: ('HE2' , 'H' , 0.3921, 0.6000),
14: ('CD2' , 'CW' , -0.1433, 1.9080),
15: ('HD2' , 'H4' , 0.2495, 1.4090),
16: ('N' , 'N3' , 0.2560, 1.8240),
17: ('H3' , 'H' , 0.1704, 0.6000),
18: ('H2' , 'H' , 0.1704, 0.6000),
19: ('H1' , 'H' , 0.1704, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1[N+]<8>([H]<9>)=C<10>([H]<11>)N<12>([H]<13>)C<14>=1[H]<15>)C<16>([O-]<17>)=O<18>',
{
0: ('N' , 'N' , -0.3481, 1.8240),
1: ('H' , 'H' , 0.2764, 0.6000),
2: ('CA' , 'CT' , -0.1445, 1.9080),
3: ('HA' , 'H1' , 0.1115, 1.3870),
4: ('CB' , 'CT' , -0.0800, 1.9080),
5: ('HB3' , 'HC' , 0.0868, 1.4870),
6: ('HB2' , 'HC' , 0.0868, 1.4870),
7: ('CG' , 'CC' , 0.0298, 1.9080),
8: ('ND1' , 'NA' , -0.1501, 1.8240),
9: ('HD1' , 'H' , 0.3883, 0.6000),
10: ('CE1' , 'CR' , -0.0251, 1.9080),
11: ('HE1' , 'H5' , 0.2694, 1.3590),
12: ('NE2' , 'NA' , -0.1683, 1.8240),
13: ('HE2' , 'H' , 0.3913, 0.6000),
14: ('CD2' , 'CW' , -0.1256, 1.9080),
15: ('HD2' , 'H4' , 0.2336, 1.4090),
16: ('C' , 'C' , 0.8032, 1.9080),
17: ('OXT' , 'O2' , -0.8177, 1.6612),
18: ('O' , 'O2' , -0.8177, 1.6612),
},
),
],
'HID': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1N<8>([H]<9>)C<10>([H]<11>)=N<12>C<13>=1[H]<14>)C<15>=O<16>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , 0.0188, 1.9080),
3: ('HA' , 'H1' , 0.0881, 1.3870),
4: ('CB' , 'CT' , -0.0462, 1.9080),
5: ('HB3' , 'HC' , 0.0402, 1.4870),
6: ('HB2' , 'HC' , 0.0402, 1.4870),
7: ('CG' , 'CC' , -0.0266, 1.9080),
8: ('ND1' , 'NA' , -0.3811, 1.8240),
9: ('HD1' , 'H' , 0.3649, 0.6000),
10: ('CE1' , 'CR' , 0.2057, 1.9080),
11: ('HE1' , 'H5' , 0.1392, 1.3590),
12: ('NE2' , 'NB' , -0.5727, 1.8240),
13: ('CD2' , 'CV' , 0.1292, 1.9080),
14: ('HD2' , 'H4' , 0.1147, 1.4090),
15: ('C' , 'C' , 0.5973, 1.9080),
16: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1N<8>([H]<9>)C<10>([H]<11>)=N<12>C<13>=1[H]<14>)[N@+]<15>([H]<16>)([H]<17>)[H]<18>',
{
0: ('C' , 'C' , 0.6123, 1.9080),
1: ('O' , 'O' , -0.5713, 1.6612),
2: ('CA' , 'CT' , 0.0964, 1.9080),
3: ('HA' , 'HP' , 0.0958, 1.1000),
4: ('CB' , 'CT' , 0.0259, 1.9080),
5: ('HB3' , 'HC' , 0.0209, 1.4870),
6: ('HB2' , 'HC' , 0.0209, 1.4870),
7: ('CG' , 'CC' , -0.0399, 1.9080),
8: ('ND1' , 'NA' , -0.3819, 1.8240),
9: ('HD1' , 'H' , 0.3632, 0.6000),
10: ('CE1' , 'CR' , 0.2127, 1.9080),
11: ('HE1' , 'H5' , 0.1385, 1.3590),
12: ('NE2' , 'NB' , -0.5711, 1.8240),
13: ('CD2' , 'CV' , 0.1046, 1.9080),
14: ('HD2' , 'H4' , 0.1299, 1.4090),
15: ('N' , 'N3' , 0.1542, 1.8240),
16: ('H3' , 'H' , 0.1963, 0.6000),
17: ('H2' , 'H' , 0.1963, 0.6000),
18: ('H1' , 'H' , 0.1963, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1N<8>([H]<9>)C<10>([H]<11>)=N<12>C<13>=1[H]<14>)C<15>([O-]<16>)=O<17>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.1739, 1.9080),
3: ('HA' , 'H1' , 0.1100, 1.3870),
4: ('CB' , 'CT' , -0.1046, 1.9080),
5: ('HB3' , 'HC' , 0.0565, 1.4870),
6: ('HB2' , 'HC' , 0.0565, 1.4870),
7: ('CG' , 'CC' , 0.0293, 1.9080),
8: ('ND1' , 'NA' , -0.3892, 1.8240),
9: ('HD1' , 'H' , 0.3755, 0.6000),
10: ('CE1' , 'CR' , 0.1925, 1.9080),
11: ('HE1' , 'H5' , 0.1418, 1.3590),
12: ('NE2' , 'NB' , -0.5629, 1.8240),
13: ('CD2' , 'CV' , 0.1001, 1.9080),
14: ('HD2' , 'H4' , 0.1241, 1.4090),
15: ('C' , 'C' , 0.7615, 1.9080),
16: ('OXT' , 'O2' , -0.8016, 1.6612),
17: ('O' , 'O2' , -0.8016, 1.6612),
},
),
],
'ILE': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([C@]<6>([H]<7>)([H]<8>)[H]<9>)[C@@]<10>([H]<11>)([H]<12>)[C@]<13>([H]<14>)([H]<15>)[H]<16>)C<17>=O<18>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , -0.0597, 1.9080),
3: ('HA' , 'H1' , 0.0869, 1.3870),
4: ('CB' , 'CT' , 0.1303, 1.9080),
5: ('HB' , 'HC' , 0.0187, 1.4870),
6: ('CG2' , 'CT' , -0.3204, 1.9080),
7: ('HG23', 'HC' , 0.0882, 1.4870),
8: ('HG22', 'HC' , 0.0882, 1.4870),
9: ('HG21', 'HC' , 0.0882, 1.4870),
10: ('CG1' , 'CT' , -0.0430, 1.9080),
11: ('HG13', 'HC' , 0.0236, 1.4870),
12: ('HG12', 'HC' , 0.0236, 1.4870),
13: ('CD1' , 'CT' , -0.0660, 1.9080),
14: ('HD13', 'HC' , 0.0186, 1.4870),
15: ('HD12', 'HC' , 0.0186, 1.4870),
16: ('HD11', 'HC' , 0.0186, 1.4870),
17: ('C' , 'C' , 0.5973, 1.9080),
18: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([C@]<6>([H]<7>)([H]<8>)[H]<9>)[C@@]<10>([H]<11>)([H]<12>)[C@]<13>([H]<14>)([H]<15>)[H]<16>)[N@+]<17>([H]<18>)([H]<19>)[H]<20>',
{
0: ('C' , 'C' , 0.6123, 1.9080),
1: ('O' , 'O' , -0.5713, 1.6612),
2: ('CA' , 'CT' , 0.0257, 1.9080),
3: ('HA' , 'HP' , 0.1031, 1.1000),
4: ('CB' , 'CT' , 0.1885, 1.9080),
5: ('HB' , 'HC' , 0.0213, 1.4870),
6: ('CG2' , 'CT' , -0.3720, 1.9080),
7: ('HG23', 'HC' , 0.0947, 1.4870),
8: ('HG22', 'HC' , 0.0947, 1.4870),
9: ('HG21', 'HC' , 0.0947, 1.4870),
10: ('CG1' , 'CT' , -0.0387, 1.9080),
11: ('HG13', 'HC' , 0.0201, 1.4870),
12: ('HG12', 'HC' , 0.0201, 1.4870),
13: ('CD1' , 'CT' , -0.0908, 1.9080),
14: ('HD13', 'HC' , 0.0226, 1.4870),
15: ('HD12', 'HC' , 0.0226, 1.4870),
16: ('HD11', 'HC' , 0.0226, 1.4870),
17: ('N' , 'N3' , 0.0311, 1.8240),
18: ('H3' , 'H' , 0.2329, 0.6000),
19: ('H2' , 'H' , 0.2329, 0.6000),
20: ('H1' , 'H' , 0.2329, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([C@]<6>([H]<7>)([H]<8>)[H]<9>)[C@@]<10>([H]<11>)([H]<12>)[C@]<13>([H]<14>)([H]<15>)[H]<16>)C<17>([O-]<18>)=O<19>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.3100, 1.9080),
3: ('HA' , 'H1' , 0.1375, 1.3870),
4: ('CB' , 'CT' , 0.0363, 1.9080),
5: ('HB' , 'HC' , 0.0766, 1.4870),
6: ('CG2' , 'CT' , -0.3498, 1.9080),
7: ('HG23', 'HC' , 0.1021, 1.4870),
8: ('HG22', 'HC' , 0.1021, 1.4870),
9: ('HG21', 'HC' , 0.1021, 1.4870),
10: ('CG1' , 'CT' , -0.0323, 1.9080),
11: ('HG13', 'HC' , 0.0321, 1.4870),
12: ('HG12', 'HC' , 0.0321, 1.4870),
13: ('CD1' , 'CT' , -0.0699, 1.9080),
14: ('HD13', 'HC' , 0.0196, 1.4870),
15: ('HD12', 'HC' , 0.0196, 1.4870),
16: ('HD11', 'HC' , 0.0196, 1.4870),
17: ('C' , 'C' , 0.8343, 1.9080),
18: ('OXT' , 'O2' , -0.8190, 1.6612),
19: ('O' , 'O2' , -0.8190, 1.6612),
},
),
],
'LEU': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@]<7>([H]<8>)([C@]<9>([H]<10>)([H]<11>)[H]<12>)[C@]<13>([H]<14>)([H]<15>)[H]<16>)C<17>=O<18>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , -0.0518, 1.9080),
3: ('HA' , 'H1' , 0.0922, 1.3870),
4: ('CB' , 'CT' , -0.1102, 1.9080),
5: ('HB3' , 'HC' , 0.0457, 1.4870),
6: ('HB2' , 'HC' , 0.0457, 1.4870),
7: ('CG' , 'CT' , 0.3531, 1.9080),
8: ('HG' , 'HC' , -0.0361, 1.4870),
9: ('CD2' , 'CT' , -0.4121, 1.9080),
10: ('HD23', 'HC' , 0.1000, 1.4870),
11: ('HD22', 'HC' , 0.1000, 1.4870),
12: ('HD21', 'HC' , 0.1000, 1.4870),
13: ('CD1' , 'CT' , -0.4121, 1.9080),
14: ('HD13', 'HC' , 0.1000, 1.4870),
15: ('HD12', 'HC' , 0.1000, 1.4870),
16: ('HD11', 'HC' , 0.1000, 1.4870),
17: ('C' , 'C' , 0.5973, 1.9080),
18: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@]<7>([H]<8>)([C@]<9>([H]<10>)([H]<11>)[H]<12>)[C@]<13>([H]<14>)([H]<15>)[H]<16>)[N@+]<17>([H]<18>)([H]<19>)[H]<20>',
{
0: ('C' , 'C' , 0.6123, 1.9080),
1: ('O' , 'O' , -0.5713, 1.6612),
2: ('CA' , 'CT' , 0.0104, 1.9080),
3: ('HA' , 'HP' , 0.1053, 1.1000),
4: ('CB' , 'CT' , -0.0244, 1.9080),
5: ('HB3' , 'HC' , 0.0256, 1.4870),
6: ('HB2' , 'HC' , 0.0256, 1.4870),
7: ('CG' , 'CT' , 0.3421, 1.9080),
8: ('HG' , 'HC' , -0.0380, 1.4870),
9: ('CD2' , 'CT' , -0.4104, 1.9080),
10: ('HD23', 'HC' , 0.0980, 1.4870),
11: ('HD22', 'HC' , 0.0980, 1.4870),
12: ('HD21', 'HC' , 0.0980, 1.4870),
13: ('CD1' , 'CT' , -0.4106, 1.9080),
14: ('HD13', 'HC' , 0.0980, 1.4870),
15: ('HD12', 'HC' , 0.0980, 1.4870),
16: ('HD11', 'HC' , 0.0980, 1.4870),
17: ('N' , 'N3' , 0.1010, 1.8240),
18: ('H3' , 'H' , 0.2148, 0.6000),
19: ('H2' , 'H' , 0.2148, 0.6000),
20: ('H1' , 'H' , 0.2148, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@]<7>([H]<8>)([C@]<9>([H]<10>)([H]<11>)[H]<12>)[C@]<13>([H]<14>)([H]<15>)[H]<16>)C<17>([O-]<18>)=O<19>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.2847, 1.9080),
3: ('HA' , 'H1' , 0.1346, 1.3870),
4: ('CB' , 'CT' , -0.2469, 1.9080),
5: ('HB3' , 'HC' , 0.0974, 1.4870),
6: ('HB2' , 'HC' , 0.0974, 1.4870),
7: ('CG' , 'CT' , 0.3706, 1.9080),
8: ('HG' , 'HC' , -0.0374, 1.4870),
9: ('CD2' , 'CT' , -0.4163, 1.9080),
10: ('HD23', 'HC' , 0.1038, 1.4870),
11: ('HD22', 'HC' , 0.1038, 1.4870),
12: ('HD21', 'HC' , 0.1038, 1.4870),
13: ('CD1' , 'CT' , -0.4163, 1.9080),
14: ('HD13', 'HC' , 0.1038, 1.4870),
15: ('HD12', 'HC' , 0.1038, 1.4870),
16: ('HD11', 'HC' , 0.1038, 1.4870),
17: ('C' , 'C' , 0.8326, 1.9080),
18: ('OXT' , 'O2' , -0.8199, 1.6612),
19: ('O' , 'O2' , -0.8199, 1.6612),
},
),
],
'LYS': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)[C@@]<10>([H]<11>)([H]<12>)[C@@]<13>([H]<14>)([H]<15>)[N@+]<16>([H]<17>)([H]<18>)[H]<19>)C<20>=O<21>',
{
0: ('N' , 'N' , -0.3479, 1.8240),
1: ('H' , 'H' , 0.2747, 0.6000),
2: ('CA' , 'CT' , -0.2400, 1.9080),
3: ('HA' , 'H1' , 0.1426, 1.3870),
4: ('CB' , 'CT' , -0.0094, 1.9080),
5: ('HB3' , 'HC' , 0.0362, 1.4870),
6: ('HB2' , 'HC' , 0.0362, 1.4870),
7: ('CG' , 'CT' , 0.0187, 1.9080),
8: ('HG3' , 'HC' , 0.0103, 1.4870),
9: ('HG2' , 'HC' , 0.0103, 1.4870),
10: ('CD' , 'CT' , -0.0479, 1.9080),
11: ('HD3' , 'HC' , 0.0621, 1.4870),
12: ('HD2' , 'HC' , 0.0621, 1.4870),
13: ('CE' , 'CT' , -0.0143, 1.9080),
14: ('HE3' , 'HP' , 0.1135, 1.1000),
15: ('HE2' , 'HP' , 0.1135, 1.1000),
16: ('NZ' , 'N3' , -0.3854, 1.8240),
17: ('HZ2' , 'H' , 0.3400, 0.6000),
18: ('HZ1' , 'H' , 0.3400, 0.6000),
19: ('HZ3' , 'H' , 0.3400, 0.6000),
20: ('C' , 'C' , 0.7341, 1.9080),
21: ('O' , 'O' , -0.5894, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)[C@@]<10>([H]<11>)([H]<12>)[C@@]<13>([H]<14>)([H]<15>)[N@+]<16>([H]<17>)([H]<18>)[H]<19>)[N@+]<20>([H]<21>)([H]<22>)[H]<23>',
{
0: ('C' , 'C' , 0.7214, 1.9080),
1: ('O' , 'O' , -0.6013, 1.6612),
2: ('CA' , 'CT' , -0.0015, 1.9080),
3: ('HA' , 'HP' , 0.1180, 1.1000),
4: ('CB' , 'CT' , 0.0212, 1.9080),
5: ('HB3' , 'HC' , 0.0283, 1.4870),
6: ('HB2' , 'HC' , 0.0283, 1.4870),
7: ('CG' , 'CT' , -0.0048, 1.9080),
8: ('HG3' , 'HC' , 0.0121, 1.4870),
9: ('HG2' , 'HC' , 0.0121, 1.4870),
10: ('CD' , 'CT' , -0.0608, 1.9080),
11: ('HD3' , 'HC' , 0.0633, 1.4870),
12: ('HD2' , 'HC' , 0.0633, 1.4870),
13: ('CE' , 'CT' , -0.0181, 1.9080),
14: ('HE3' , 'HP' , 0.1171, 1.1000),
15: ('HE2' , 'HP' , 0.1171, 1.1000),
16: ('NZ' , 'N3' , -0.3764, 1.8240),
17: ('HZ2' , 'H' , 0.3382, 0.6000),
18: ('HZ1' , 'H' , 0.3382, 0.6000),
19: ('HZ3' , 'H' , 0.3382, 0.6000),
20: ('N' , 'N3' , 0.0966, 1.8240),
21: ('H3' , 'H' , 0.2165, 0.6000),
22: ('H2' , 'H' , 0.2165, 0.6000),
23: ('H1' , 'H' , 0.2165, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)[C@@]<10>([H]<11>)([H]<12>)[C@@]<13>([H]<14>)([H]<15>)[N@+]<16>([H]<17>)([H]<18>)[H]<19>)C<20>([O-]<21>)=O<22>',
{
0: ('N' , 'N' , -0.3481, 1.8240),
1: ('H' , 'H' , 0.2764, 0.6000),
2: ('CA' , 'CT' , -0.2903, 1.9080),
3: ('HA' , 'H1' , 0.1438, 1.3870),
4: ('CB' , 'CT' , -0.0538, 1.9080),
5: ('HB3' , 'HC' , 0.0482, 1.4870),
6: ('HB2' , 'HC' , 0.0482, 1.4870),
7: ('CG' , 'CT' , 0.0227, 1.9080),
8: ('HG3' , 'HC' , 0.0134, 1.4870),
9: ('HG2' , 'HC' , 0.0134, 1.4870),
10: ('CD' , 'CT' , -0.0392, 1.9080),
11: ('HD3' , 'HC' , 0.0611, 1.4870),
12: ('HD2' , 'HC' , 0.0611, 1.4870),
13: ('CE' , 'CT' , -0.0176, 1.9080),
14: ('HE3' , 'HP' , 0.1121, 1.1000),
15: ('HE2' , 'HP' , 0.1121, 1.1000),
16: ('NZ' , 'N3' , -0.3741, 1.8240),
17: ('HZ2' , 'H' , 0.3374, 0.6000),
18: ('HZ1' , 'H' , 0.3374, 0.6000),
19: ('HZ3' , 'H' , 0.3374, 0.6000),
20: ('C' , 'C' , 0.8488, 1.9080),
21: ('OXT' , 'O2' , -0.8252, 1.6612),
22: ('O' , 'O2' , -0.8252, 1.6612),
},
),
],
'MET': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)S<10>[C@]<11>([H]<12>)([H]<13>)[H]<14>)C<15>=O<16>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , -0.0237, 1.9080),
3: ('HA' , 'H1' , 0.0880, 1.3870),
4: ('CB' , 'CT' , 0.0342, 1.9080),
5: ('HB3' , 'HC' , 0.0241, 1.4870),
6: ('HB2' , 'HC' , 0.0241, 1.4870),
7: ('CG' , 'CT' , 0.0018, 1.9080),
8: ('HG3' , 'H1' , 0.0440, 1.3870),
9: ('HG2' , 'H1' , 0.0440, 1.3870),
10: ('SD' , 'S' , -0.2737, 2.0000),
11: ('CE' , 'CT' , -0.0536, 1.9080),
12: ('HE3' , 'H1' , 0.0684, 1.3870),
13: ('HE2' , 'H1' , 0.0684, 1.3870),
14: ('HE1' , 'H1' , 0.0684, 1.3870),
15: ('C' , 'C' , 0.5973, 1.9080),
16: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)S<10>[C@]<11>([H]<12>)([H]<13>)[H]<14>)[N@+]<15>([H]<16>)([H]<17>)[H]<18>',
{
0: ('C' , 'C' , 0.6123, 1.9080),
1: ('O' , 'O' , -0.5713, 1.6612),
2: ('CA' , 'CT' , 0.0221, 1.9080),
3: ('HA' , 'HP' , 0.1116, 1.1000),
4: ('CB' , 'CT' , 0.0865, 1.9080),
5: ('HB3' , 'HC' , 0.0125, 1.4870),
6: ('HB2' , 'HC' , 0.0125, 1.4870),
7: ('CG' , 'CT' , 0.0334, 1.9080),
8: ('HG3' , 'H1' , 0.0292, 1.3870),
9: ('HG2' , 'H1' , 0.0292, 1.3870),
10: ('SD' , 'S' , -0.2774, 2.0000),
11: ('CE' , 'CT' , -0.0341, 1.9080),
12: ('HE3' , 'H1' , 0.0597, 1.3870),
13: ('HE2' , 'H1' , 0.0597, 1.3870),
14: ('HE1' , 'H1' , 0.0597, 1.3870),
15: ('N' , 'N3' , 0.1592, 1.8240),
16: ('H3' , 'H' , 0.1984, 0.6000),
17: ('H2' , 'H' , 0.1984, 0.6000),
18: ('H1' , 'H' , 0.1984, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)S<10>[C@]<11>([H]<12>)([H]<13>)[H]<14>)C<15>([O-]<16>)=O<17>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.2597, 1.9080),
3: ('HA' , 'H1' , 0.1277, 1.3870),
4: ('CB' , 'CT' , -0.0236, 1.9080),
5: ('HB3' , 'HC' , 0.0480, 1.4870),
6: ('HB2' , 'HC' , 0.0480, 1.4870),
7: ('CG' , 'CT' , 0.0492, 1.9080),
8: ('HG3' , 'H1' , 0.0317, 1.3870),
9: ('HG2' , 'H1' , 0.0317, 1.3870),
10: ('SD' , 'S' , -0.2692, 2.0000),
11: ('CE' , 'CT' , -0.0376, 1.9080),
12: ('HE3' , 'H1' , 0.0625, 1.3870),
13: ('HE2' , 'H1' , 0.0625, 1.3870),
14: ('HE1' , 'H1' , 0.0625, 1.3870),
15: ('C' , 'C' , 0.8013, 1.9080),
16: ('OXT' , 'O2' , -0.8105, 1.6612),
17: ('O' , 'O2' , -0.8105, 1.6612),
},
),
],
'PHE': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1C<8>([H]<9>)=C<10>([H]<11>)C<12>([H]<13>)=C<14>([H]<15>)C<16>=1[H]<17>)C<18>=O<19>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , -0.0024, 1.9080),
3: ('HA' , 'H1' , 0.0978, 1.3870),
4: ('CB' , 'CT' , -0.0343, 1.9080),
5: ('HB3' , 'HC' , 0.0295, 1.4870),
6: ('HB2' , 'HC' , 0.0295, 1.4870),
7: ('CG' , 'CA' , 0.0118, 1.9080),
8: ('CD2' , 'CA' , -0.1256, 1.9080),
9: ('HD2' , 'HA' , 0.1330, 1.4590),
10: ('CE2' , 'CA' , -0.1704, 1.9080),
11: ('HE2' , 'HA' , 0.1430, 1.4590),
12: ('CZ' , 'CA' , -0.1072, 1.9080),
13: ('HZ' , 'HA' , 0.1297, 1.4590),
14: ('CE1' , 'CA' , -0.1704, 1.9080),
15: ('HE1' , 'HA' , 0.1430, 1.4590),
16: ('CD1' , 'CA' , -0.1256, 1.9080),
17: ('HD1' , 'HA' , 0.1330, 1.4590),
18: ('C' , 'C' , 0.5973, 1.9080),
19: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1C<8>([H]<9>)=C<10>([H]<11>)C<12>([H]<13>)=C<14>([H]<15>)C<16>=1[H]<17>)[N@+]<18>([H]<19>)([H]<20>)[H]<21>',
{
0: ('C' , 'C' , 0.6123, 1.9080),
1: ('O' , 'O' , -0.5713, 1.6612),
2: ('CA' , 'CT' , 0.0733, 1.9080),
3: ('HA' , 'HP' , 0.1041, 1.1000),
4: ('CB' , 'CT' , 0.0330, 1.9080),
5: ('HB3' , 'HC' , 0.0104, 1.4870),
6: ('HB2' , 'HC' , 0.0104, 1.4870),
7: ('CG' , 'CA' , 0.0031, 1.9080),
8: ('CD2' , 'CA' , -0.1391, 1.9080),
9: ('HD2' , 'HA' , 0.1374, 1.4590),
10: ('CE2' , 'CA' , -0.1603, 1.9080),
11: ('HE2' , 'HA' , 0.1433, 1.4590),
12: ('CZ' , 'CA' , -0.1208, 1.9080),
13: ('HZ' , 'HA' , 0.1329, 1.4590),
14: ('CE1' , 'CA' , -0.1602, 1.9080),
15: ('HE1' , 'HA' , 0.1433, 1.4590),
16: ('CD1' , 'CA' , -0.1392, 1.9080),
17: ('HD1' , 'HA' , 0.1374, 1.4590),
18: ('N' , 'N3' , 0.1737, 1.8240),
19: ('H3' , 'H' , 0.1921, 0.6000),
20: ('H2' , 'H' , 0.1921, 0.6000),
21: ('H1' , 'H' , 0.1921, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1C<8>([H]<9>)=C<10>([H]<11>)C<12>([H]<13>)=C<14>([H]<15>)C<16>=1[H]<17>)C<18>([O-]<19>)=O<20>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.1825, 1.9080),
3: ('HA' , 'H1' , 0.1098, 1.3870),
4: ('CB' , 'CT' , -0.0959, 1.9080),
5: ('HB3' , 'HC' , 0.0443, 1.4870),
6: ('HB2' , 'HC' , 0.0443, 1.4870),
7: ('CG' , 'CA' , 0.0552, 1.9080),
8: ('CD2' , 'CA' , -0.1300, 1.9080),
9: ('HD2' , 'HA' , 0.1408, 1.4590),
10: ('CE2' , 'CA' , -0.1847, 1.9080),
11: ('HE2' , 'HA' , 0.1461, 1.4590),
12: ('CZ' , 'CA' , -0.0944, 1.9080),
13: ('HZ' , 'HA' , 0.1280, 1.4590),
14: ('CE1' , 'CA' , -0.1847, 1.9080),
15: ('HE1' , 'HA' , 0.1461, 1.4590),
16: ('CD1' , 'CA' , -0.1300, 1.9080),
17: ('HD1' , 'HA' , 0.1408, 1.4590),
18: ('C' , 'C' , 0.7660, 1.9080),
19: ('OXT' , 'O2' , -0.8026, 1.6612),
20: ('O' , 'O2' , -0.8026, 1.6612),
},
),
],
'PRO': [
(
'N<0>1[C@]<1>([H]<2>)([H]<3>)[C@]<4>([H]<5>)([H]<6>)[C@]<7>([H]<8>)([H]<9>)[C@@]<10>1([H]<11>)C<12>=O<13>',
{
0: ('N' , 'N' , -0.2548, 1.8240),
1: ('CD' , 'CT' , 0.0192, 1.9080),
2: ('HD3' , 'H1' , 0.0391, 1.3870),
3: ('HD2' , 'H1' , 0.0391, 1.3870),
4: ('CG' , 'CT' , 0.0189, 1.9080),
5: ('HG3' , 'HC' , 0.0213, 1.4870),
6: ('HG2' , 'HC' , 0.0213, 1.4870),
7: ('CB' , 'CT' , -0.0070, 1.9080),
8: ('HB3' , 'HC' , 0.0253, 1.4870),
9: ('HB2' , 'HC' , 0.0253, 1.4870),
10: ('CA' , 'CT' , -0.0266, 1.9080),
11: ('HA' , 'H1' , 0.0641, 1.3870),
12: ('C' , 'C' , 0.5896, 1.9080),
13: ('O' , 'O' , -0.5748, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>1([H]<3>)[C@@]<4>([H]<5>)([H]<6>)[C@@]<7>([H]<8>)([H]<9>)[C@@]<10>([H]<11>)([H]<12>)[N@@+]<13>1([H]<14>)[H]<15>',
{
0: ('C' , 'C' , 0.5260, 1.9080),
1: ('O' , 'O' , -0.5000, 1.6612),
2: ('CA' , 'CT' , 0.1000, 1.9080),
3: ('HA' , 'HP' , 0.1000, 1.1000),
4: ('CB' , 'CT' , -0.1150, 1.9080),
5: ('HB3' , 'HC' , 0.1000, 1.4870),
6: ('HB2' , 'HC' , 0.1000, 1.4870),
7: ('CG' , 'CT' , -0.1210, 1.9080),
8: ('HG3' , 'HC' , 0.1000, 1.4870),
9: ('HG2' , 'HC' , 0.1000, 1.4870),
10: ('CD' , 'CT' , -0.0120, 1.9080),
11: ('HD3' , 'H1' , 0.1000, 1.1000),
12: ('HD2' , 'H1' , 0.1000, 1.1000),
13: ('N' , 'N3' , -0.2020, 1.8240),
14: ('H3' , 'H' , 0.3120, 0.6000),
15: ('H2' , 'H' , 0.3120, 0.6000),
},
),
(
'N<0>1[C@]<1>([H]<2>)([H]<3>)[C@]<4>([H]<5>)([H]<6>)[C@]<7>([H]<8>)([H]<9>)[C@@]<10>1([H]<11>)C<12>([O-]<13>)=O<14>',
{
0: ('N' , 'N' , -0.2802, 1.8240),
1: ('CD' , 'CT' , 0.0434, 1.9080),
2: ('HD3' , 'H1' , 0.0331, 1.3870),
3: ('HD2' , 'H1' , 0.0331, 1.3870),
4: ('CG' , 'CT' , 0.0466, 1.9080),
5: ('HG3' , 'HC' , 0.0172, 1.4870),
6: ('HG2' , 'HC' , 0.0172, 1.4870),
7: ('CB' , 'CT' , -0.0543, 1.9080),
8: ('HB3' , 'HC' , 0.0381, 1.4870),
9: ('HB2' , 'HC' , 0.0381, 1.4870),
10: ('CA' , 'CT' , -0.1336, 1.9080),
11: ('HA' , 'H1' , 0.0776, 1.3870),
12: ('C' , 'C' , 0.6631, 1.9080),
13: ('OXT' , 'O2' , -0.7697, 1.6612),
14: ('O' , 'O2' , -0.7697, 1.6612),
},
),
],
'SER': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)O<7>[H]<8>)C<9>=O<10>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , -0.0249, 1.9080),
3: ('HA' , 'H1' , 0.0843, 1.3870),
4: ('CB' , 'CT' , 0.2117, 1.9080),
5: ('HB3' , 'H1' , 0.0352, 1.3870),
6: ('HB2' , 'H1' , 0.0352, 1.3870),
7: ('OG' , 'OH' , -0.6546, 1.7210),
8: ('HG' , 'HO' , 0.4275, 0.0000),
9: ('C' , 'C' , 0.5973, 1.9080),
10: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)O<7>[H]<8>)[N@+]<9>([H]<10>)([H]<11>)[H]<12>',
{
0: ('C' , 'C' , 0.6163, 1.9080),
1: ('O' , 'O' , -0.5722, 1.6612),
2: ('CA' , 'CT' , 0.0567, 1.9080),
3: ('HA' , 'HP' , 0.0782, 1.1000),
4: ('CB' , 'CT' , 0.2596, 1.9080),
5: ('HB3' , 'H1' , 0.0273, 1.3870),
6: ('HB2' , 'H1' , 0.0273, 1.3870),
7: ('OG' , 'OH' , -0.6714, 1.7210),
8: ('HG' , 'HO' , 0.4239, 0.0000),
9: ('N' , 'N3' , 0.1849, 1.8240),
10: ('H3' , 'H' , 0.1898, 0.6000),
11: ('H2' , 'H' , 0.1898, 0.6000),
12: ('H1' , 'H' , 0.1898, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)O<7>[H]<8>)C<9>([O-]<10>)=O<11>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.2722, 1.9080),
3: ('HA' , 'H1' , 0.1304, 1.3870),
4: ('CB' , 'CT' , 0.1123, 1.9080),
5: ('HB3' , 'H1' , 0.0813, 1.3870),
6: ('HB2' , 'H1' , 0.0813, 1.3870),
7: ('OG' , 'OH' , -0.6514, 1.7210),
8: ('HG' , 'HO' , 0.4474, 0.0000),
9: ('C' , 'C' , 0.8113, 1.9080),
10: ('OXT' , 'O2' , -0.8132, 1.6612),
11: ('O' , 'O2' , -0.8132, 1.6612),
},
),
],
'THR': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@]<4>([H]<5>)(O<6>[H]<7>)[C@]<8>([H]<9>)([H]<10>)[H]<11>)C<12>=O<13>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , -0.0389, 1.9080),
3: ('HA' , 'H1' , 0.1007, 1.3870),
4: ('CB' , 'CT' , 0.3654, 1.9080),
5: ('HB' , 'H1' , 0.0043, 1.3870),
6: ('OG1' , 'OH' , -0.6761, 1.7210),
7: ('HG1' , 'HO' , 0.4102, 0.0000),
8: ('CG2' , 'CT' , -0.2438, 1.9080),
9: ('HG23', 'HC' , 0.0642, 1.4870),
10: ('HG22', 'HC' , 0.0642, 1.4870),
11: ('HG21', 'HC' , 0.0642, 1.4870),
12: ('C' , 'C' , 0.5973, 1.9080),
13: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@]<4>([H]<5>)(O<6>[H]<7>)[C@]<8>([H]<9>)([H]<10>)[H]<11>)[N@+]<12>([H]<13>)([H]<14>)[H]<15>',
{
0: ('C' , 'C' , 0.6163, 1.9080),
1: ('O' , 'O' , -0.5722, 1.6612),
2: ('CA' , 'CT' , 0.0034, 1.9080),
3: ('HA' , 'HP' , 0.1087, 1.1000),
4: ('CB' , 'CT' , 0.4514, 1.9080),
5: ('HB' , 'H1' , -0.0323, 1.3870),
6: ('OG1' , 'OH' , -0.6764, 1.7210),
7: ('HG1' , 'HO' , 0.4070, 0.0000),
8: ('CG2' , 'CT' , -0.2554, 1.9080),
9: ('HG23', 'HC' , 0.0627, 1.4870),
10: ('HG22', 'HC' , 0.0627, 1.4870),
11: ('HG21', 'HC' , 0.0627, 1.4870),
12: ('N' , 'N3' , 0.1812, 1.8240),
13: ('H3' , 'H' , 0.1934, 0.6000),
14: ('H2' , 'H' , 0.1934, 0.6000),
15: ('H1' , 'H' , 0.1934, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@]<4>([H]<5>)(O<6>[H]<7>)[C@]<8>([H]<9>)([H]<10>)[H]<11>)C<12>([O-]<13>)=O<14>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.2420, 1.9080),
3: ('HA' , 'H1' , 0.1207, 1.3870),
4: ('CB' , 'CT' , 0.3025, 1.9080),
5: ('HB' , 'H1' , 0.0078, 1.3870),
6: ('OG1' , 'OH' , -0.6496, 1.7210),
7: ('HG1' , 'HO' , 0.4119, 0.0000),
8: ('CG2' , 'CT' , -0.1853, 1.9080),
9: ('HG23', 'HC' , 0.0586, 1.4870),
10: ('HG22', 'HC' , 0.0586, 1.4870),
11: ('HG21', 'HC' , 0.0586, 1.4870),
12: ('C' , 'C' , 0.7810, 1.9080),
13: ('OXT' , 'O2' , -0.8044, 1.6612),
14: ('O' , 'O2' , -0.8044, 1.6612),
},
),
],
'TRP': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>1=C<8>([H]<9>)N<10>([H]<11>)C<12>=2C<13>([H]<14>)=C<15>([H]<16>)C<17>([H]<18>)=C<19>([H]<20>)C<21>1=2)C<22>=O<23>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , -0.0275, 1.9080),
3: ('HA' , 'H1' , 0.1123, 1.3870),
4: ('CB' , 'CT' , -0.0050, 1.9080),
5: ('HB3' , 'HC' , 0.0339, 1.4870),
6: ('HB2' , 'HC' , 0.0339, 1.4870),
7: ('CG' , 'C*' , -0.1415, 1.9080),
8: ('CD1' , 'CW' , -0.1638, 1.9080),
9: ('HD1' , 'H4' , 0.2062, 1.4090),
10: ('NE1' , 'NA' , -0.3418, 1.8240),
11: ('HE1' , 'H' , 0.3412, 0.6000),
12: ('CE2' , 'CN' , 0.1380, 1.9080),
13: ('CZ2' , 'CA' , -0.2601, 1.9080),
14: ('HZ2' , 'HA' , 0.1572, 1.4590),
15: ('CH2' , 'CA' , -0.1134, 1.9080),
16: ('HH2' , 'HA' , 0.1417, 1.4590),
17: ('CZ3' , 'CA' , -0.1972, 1.9080),
18: ('HZ3' , 'HA' , 0.1447, 1.4590),
19: ('CE3' , 'CA' , -0.2387, 1.9080),
20: ('HE3' , 'HA' , 0.1700, 1.4590),
21: ('CD2' , 'CB' , 0.1243, 1.9080),
22: ('C' , 'C' , 0.5973, 1.9080),
23: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>1=C<8>([H]<9>)N<10>([H]<11>)C<12>=2C<13>([H]<14>)=C<15>([H]<16>)C<17>([H]<18>)=C<19>([H]<20>)C<21>1=2)[N@+]<22>([H]<23>)([H]<24>)[H]<25>',
{
0: ('C' , 'C' , 0.6123, 1.9080),
1: ('O' , 'O' , -0.5713, 1.6612),
2: ('CA' , 'CT' , 0.0421, 1.9080),
3: ('HA' , 'HP' , 0.1162, 1.1000),
4: ('CB' , 'CT' , 0.0543, 1.9080),
5: ('HB3' , 'HC' , 0.0222, 1.4870),
6: ('HB2' , 'HC' , 0.0222, 1.4870),
7: ('CG' , 'C*' , -0.1654, 1.9080),
8: ('CD1' , 'CW' , -0.1788, 1.9080),
9: ('HD1' , 'H4' , 0.2195, 1.4090),
10: ('NE1' , 'NA' , -0.3444, 1.8240),
11: ('HE1' , 'H' , 0.3412, 0.6000),
12: ('CE2' , 'CN' , 0.1575, 1.9080),
13: ('CZ2' , 'CA' , -0.2710, 1.9080),
14: ('HZ2' , 'HA' , 0.1589, 1.4590),
15: ('CH2' , 'CA' , -0.1080, 1.9080),
16: ('HH2' , 'HA' , 0.1411, 1.4590),
17: ('CZ3' , 'CA' , -0.2034, 1.9080),
18: ('HZ3' , 'HA' , 0.1458, 1.4590),
19: ('CE3' , 'CA' , -0.2265, 1.9080),
20: ('HE3' , 'HA' , 0.1646, 1.4590),
21: ('CD2' , 'CB' , 0.1132, 1.9080),
22: ('N' , 'N3' , 0.1913, 1.8240),
23: ('H3' , 'H' , 0.1888, 0.6000),
24: ('H2' , 'H' , 0.1888, 0.6000),
25: ('H1' , 'H' , 0.1888, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>1=C<8>([H]<9>)N<10>([H]<11>)C<12>=2C<13>([H]<14>)=C<15>([H]<16>)C<17>([H]<18>)=C<19>([H]<20>)C<21>1=2)C<22>([O-]<23>)=O<24>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.2084, 1.9080),
3: ('HA' , 'H1' , 0.1272, 1.3870),
4: ('CB' , 'CT' , -0.0742, 1.9080),
5: ('HB3' , 'HC' , 0.0497, 1.4870),
6: ('HB2' , 'HC' , 0.0497, 1.4870),
7: ('CG' , 'C*' , -0.0796, 1.9080),
8: ('CD1' , 'CW' , -0.1808, 1.9080),
9: ('HD1' , 'H4' , 0.2043, 1.4090),
10: ('NE1' , 'NA' , -0.3316, 1.8240),
11: ('HE1' , 'H' , 0.3413, 0.6000),
12: ('CE2' , 'CN' , 0.1222, 1.9080),
13: ('CZ2' , 'CA' , -0.2594, 1.9080),
14: ('HZ2' , 'HA' , 0.1567, 1.4590),
15: ('CH2' , 'CA' , -0.1020, 1.9080),
16: ('HH2' , 'HA' , 0.1401, 1.4590),
17: ('CZ3' , 'CA' , -0.2287, 1.9080),
18: ('HZ3' , 'HA' , 0.1507, 1.4590),
19: ('CE3' , 'CA' , -0.1837, 1.9080),
20: ('HE3' , 'HA' , 0.1491, 1.4590),
21: ('CD2' , 'CB' , 0.1078, 1.9080),
22: ('C' , 'C' , 0.7658, 1.9080),
23: ('OXT' , 'O2' , -0.8011, 1.6612),
24: ('O' , 'O2' , -0.8011, 1.6612),
},
),
],
'TYR': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1C<8>([H]<9>)=C<10>([H]<11>)C<12>(O<13>[H]<14>)=C<15>([H]<16>)C<17>=1[H]<18>)C<19>=O<20>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , -0.0014, 1.9080),
3: ('HA' , 'H1' , 0.0876, 1.3870),
4: ('CB' , 'CT' , -0.0152, 1.9080),
5: ('HB3' , 'HC' , 0.0295, 1.4870),
6: ('HB2' , 'HC' , 0.0295, 1.4870),
7: ('CG' , 'CA' , -0.0011, 1.9080),
8: ('CD2' , 'CA' , -0.1906, 1.9080),
9: ('HD2' , 'HA' , 0.1699, 1.4590),
10: ('CE2' , 'CA' , -0.2341, 1.9080),
11: ('HE2' , 'HA' , 0.1656, 1.4590),
12: ('CZ' , 'CA' , 0.3226, 1.9080),
13: ('OH' , 'OH' , -0.5579, 1.7210),
14: ('HH' , 'HO' , 0.3992, 0.0000),
15: ('CE1' , 'CA' , -0.2341, 1.9080),
16: ('HE1' , 'HA' , 0.1656, 1.4590),
17: ('CD1' , 'CA' , -0.1906, 1.9080),
18: ('HD1' , 'HA' , 0.1699, 1.4590),
19: ('C' , 'C' , 0.5973, 1.9080),
20: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1C<8>([H]<9>)=C<10>([H]<11>)C<12>(O<13>[H]<14>)=C<15>([H]<16>)C<17>=1[H]<18>)[N@+]<19>([H]<20>)([H]<21>)[H]<22>',
{
0: ('C' , 'C' , 0.6123, 1.9080),
1: ('O' , 'O' , -0.5713, 1.6612),
2: ('CA' , 'CT' , 0.0570, 1.9080),
3: ('HA' , 'H1' , 0.0983, 1.1000),
4: ('CB' , 'CT' , 0.0659, 1.9080),
5: ('HB3' , 'HC' , 0.0102, 1.4870),
6: ('HB2' , 'HC' , 0.0102, 1.4870),
7: ('CG' , 'CA' , -0.0205, 1.9080),
8: ('CD2' , 'CA' , -0.2002, 1.9080),
9: ('HD2' , 'HA' , 0.1720, 1.4590),
10: ('CE2' , 'CA' , -0.2239, 1.9080),
11: ('HE2' , 'HA' , 0.1650, 1.4590),
12: ('CZ' , 'CA' , 0.3139, 1.9080),
13: ('OH' , 'OH' , -0.5578, 1.7210),
14: ('HH' , 'HO' , 0.4001, 0.0000),
15: ('CE1' , 'CA' , -0.2239, 1.9080),
16: ('HE1' , 'HA' , 0.1650, 1.4590),
17: ('CD1' , 'CA' , -0.2002, 1.9080),
18: ('HD1' , 'HA' , 0.1720, 1.4590),
19: ('N' , 'N' , 0.1940, 1.8240),
20: ('H3' , 'H' , 0.1873, 0.6000),
21: ('H2' , 'H' , 0.1873, 0.6000),
22: ('H1' , 'H' , 0.1873, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@@]<4>([H]<5>)([H]<6>)C<7>=1C<8>([H]<9>)=C<10>([H]<11>)C<12>(O<13>[H]<14>)=C<15>([H]<16>)C<17>=1[H]<18>)C<19>([O-]<20>)=O<21>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.2015, 1.9080),
3: ('HA' , 'H1' , 0.1092, 1.3870),
4: ('CB' , 'CT' , -0.0752, 1.9080),
5: ('HB3' , 'HC' , 0.0490, 1.4870),
6: ('HB2' , 'HC' , 0.0490, 1.4870),
7: ('CG' , 'CA' , 0.0243, 1.9080),
8: ('CD2' , 'CA' , -0.1922, 1.9080),
9: ('HD2' , 'HA' , 0.1780, 1.4590),
10: ('CE2' , 'CA' , -0.2458, 1.9080),
11: ('HE2' , 'HA' , 0.1673, 1.4590),
12: ('CZ' , 'CA' , 0.3395, 1.9080),
13: ('OH' , 'OH' , -0.5643, 1.7210),
14: ('HH' , 'HO' , 0.4017, 0.0000),
15: ('CE1' , 'CA' , -0.2458, 1.9080),
16: ('HE1' , 'HA' , 0.1673, 1.4590),
17: ('CD1' , 'CA' , -0.1922, 1.9080),
18: ('HD1' , 'HA' , 0.1780, 1.4590),
19: ('C' , 'C' , 0.7817, 1.9080),
20: ('OXT' , 'O2' , -0.8070, 1.6612),
21: ('O' , 'O' , -0.8070, 1.6612),
},
),
],
'VAL': [
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@]<4>([H]<5>)([C@]<6>([H]<7>)([H]<8>)[H]<9>)[C@]<10>([H]<11>)([H]<12>)[H]<13>)C<14>=O<15>',
{
0: ('N' , 'N' , -0.4157, 1.8240),
1: ('H' , 'H' , 0.2719, 0.6000),
2: ('CA' , 'CT' , -0.0875, 1.9080),
3: ('HA' , 'H1' , 0.0969, 1.3870),
4: ('CB' , 'CT' , 0.2985, 1.9080),
5: ('HB' , 'HC' , -0.0297, 1.4870),
6: ('CG2' , 'CT' , -0.3192, 1.9080),
7: ('HG23', 'HC' , 0.0791, 1.4870),
8: ('HG22', 'HC' , 0.0791, 1.4870),
9: ('HG21', 'HC' , 0.0791, 1.4870),
10: ('CG1' , 'CT' , -0.3192, 1.9080),
11: ('HG13', 'HC' , 0.0791, 1.4870),
12: ('HG12', 'HC' , 0.0791, 1.4870),
13: ('HG11', 'HC' , 0.0791, 1.4870),
14: ('C' , 'C' , 0.5973, 1.9080),
15: ('O' , 'O' , -0.5679, 1.6612),
},
),
(
'C<0>(=O<1>)[C@]<2>([H]<3>)([C@]<4>([H]<5>)([C@]<6>([H]<7>)([H]<8>)[H]<9>)[C@]<10>([H]<11>)([H]<12>)[H]<13>)[N@+]<14>([H]<15>)([H]<16>)[H]<17>',
{
0: ('C' , 'C' , 0.6163, 1.9080),
1: ('O' , 'O' , -0.5722, 1.6612),
2: ('CA' , 'CT' , -0.0054, 1.9080),
3: ('HA' , 'HP' , 0.1093, 1.1000),
4: ('CB' , 'CT' , 0.3196, 1.9080),
5: ('HB' , 'HC' , -0.0221, 1.4870),
6: ('CG2' , 'CT' , -0.3129, 1.9080),
7: ('HG23', 'HC' , 0.0735, 1.4870),
8: ('HG22', 'HC' , 0.0735, 1.4870),
9: ('HG21', 'HC' , 0.0735, 1.4870),
10: ('CG1' , 'CT' , -0.3129, 1.9080),
11: ('HG13', 'HC' , 0.0735, 1.4870),
12: ('HG12', 'HC' , 0.0735, 1.4870),
13: ('HG11', 'HC' , 0.0735, 1.4870),
14: ('N' , 'N3' , 0.0577, 1.8240),
15: ('H3' , 'H' , 0.2272, 0.6000),
16: ('H2' , 'H' , 0.2272, 0.6000),
17: ('H1' , 'H' , 0.2272, 0.6000),
},
),
(
'N<0>([H]<1>)[C@@]<2>([H]<3>)([C@]<4>([H]<5>)([C@]<6>([H]<7>)([H]<8>)[H]<9>)[C@]<10>([H]<11>)([H]<12>)[H]<13>)C<14>([O-]<15>)=O<16>',
{
0: ('N' , 'N' , -0.3821, 1.8240),
1: ('H' , 'H' , 0.2681, 0.6000),
2: ('CA' , 'CT' , -0.3438, 1.9080),
3: ('HA' , 'H1' , 0.1438, 1.3870),
4: ('CB' , 'CT' , 0.1940, 1.9080),
5: ('HB' , 'HC' , 0.0308, 1.4870),
6: ('CG2' , 'CT' , -0.3064, 1.9080),
7: ('HG23', 'HC' , 0.0836, 1.4870),
8: ('HG22', 'HC' , 0.0836, 1.4870),
9: ('HG21', 'HC' , 0.0836, 1.4870),
10: ('CG1' , 'CT' , -0.3064, 1.9080),
11: ('HG13', 'HC' , 0.0836, 1.4870),
12: ('HG12', 'HC' , 0.0836, 1.4870),
13: ('HG11', 'HC' , 0.0836, 1.4870),
14: ('C' , 'C' , 0.8350, 1.9080),
15: ('OXT' , 'O2' , -0.8173, 1.6612),
16: ('O' , 'O2' , -0.8173, 1.6612),
},
),
],
'WAT': [
(
'O<0>([H]<1>)[H]<2>',
{
0: ('O' , 'OW' , -0.8340, 1.6612),
1: ('H1' , 'HW' , 0.4170, 0.0000),
2: ('H2' , 'HW' , 0.4170, 0.0000),
},
),
],
'HOH': [
(
'O<0>([H]<1>)[H]<2>',
{
0: ('O' , 'OW' , -0.8340, 1.6612),
1: ('H1' , 'HW' , 0.4170, 0.0000),
2: ('H2' , 'HW' , 0.4170, 0.0000),
},
),
],
'TIP': [
(
'O<0>([H]<1>)[H]<2>',
{
0: ('O' , 'OW' , -0.8340, 1.6612),
1: ('H1' , 'HW' , 0.4170, 0.0000),
2: ('H2' , 'HW' , 0.4170, 0.0000),
},
),
],
}
# also want commong residues like PTyr, PSer,
# missing neutrals GLUH/GLUN,GLH, ASPH/ASH/ASPN, LYSN, ARGN
for alias in (
( 'HIE', 'HIS'), # default HIS is HISE
( 'HISE', 'HIS'),
( 'HISD', 'HID'),
( 'HISP', 'HIP'),
( 'GLUM', 'GLU'), # default -1
( 'ASPM', 'ASP'), # default -1
( 'LYSP', 'LYS'), # default +1
( 'ARGP', 'ARG'), # default +1
):
amber99_dict[alias[0]] = amber99_dict[alias[1]]
| 45.274971
| 213
| 0.290353
| 11,233
| 77,058
| 1.991543
| 0.08021
| 0.084484
| 0.009789
| 0.012695
| 0.876358
| 0.76921
| 0.314336
| 0.312011
| 0.31067
| 0.309597
| 0
| 0.365452
| 0.404033
| 77,058
| 1,701
| 214
| 45.301587
| 0.121679
| 0.002595
| 0
| 0.206021
| 0
| 0.043684
| 0.184728
| 0.117791
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1567a1c422d1524ad5c100bb11b22a7bc71cc90c
| 85
|
py
|
Python
|
lona/default_routes.py
|
korantu/lona
|
5039fa59f37cc32b9c789753af2ed8a8670ab611
|
[
"MIT"
] | 230
|
2021-08-15T20:46:24.000Z
|
2022-03-30T10:17:43.000Z
|
lona/default_routes.py
|
korantu/lona
|
5039fa59f37cc32b9c789753af2ed8a8670ab611
|
[
"MIT"
] | 176
|
2021-08-18T08:19:37.000Z
|
2022-03-29T16:45:06.000Z
|
lona/default_routes.py
|
korantu/lona
|
5039fa59f37cc32b9c789753af2ed8a8670ab611
|
[
"MIT"
] | 13
|
2021-08-20T10:35:04.000Z
|
2022-01-17T15:49:40.000Z
|
from __future__ import annotations
from lona import Route
routes: list[Route] = []
| 14.166667
| 34
| 0.764706
| 11
| 85
| 5.545455
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164706
| 85
| 5
| 35
| 17
| 0.859155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1588bc48cce6f2ee36190c2b62869f83808ebcac
| 60
|
py
|
Python
|
hello.py
|
franTarkenton/rfc-git-demo
|
16d85d474fc26cf06fe0dbb8e957c05a795713bc
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
franTarkenton/rfc-git-demo
|
16d85d474fc26cf06fe0dbb8e957c05a795713bc
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
franTarkenton/rfc-git-demo
|
16d85d474fc26cf06fe0dbb8e957c05a795713bc
|
[
"Apache-2.0"
] | null | null | null |
import os
print(os.environ['SOURCE_URL'])
print("hello")
| 8.571429
| 31
| 0.7
| 9
| 60
| 4.555556
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116667
| 60
| 6
| 32
| 10
| 0.773585
| 0
| 0
| 0
| 0
| 0
| 0.258621
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
159d650b5df63b461ffc991e6eadad6b15b94394
| 181
|
py
|
Python
|
functions.py
|
HamzaBamohammed/hambam-unconstraint-optimization
|
f710f31883ec60d231ec6e8bf168805f7d455a98
|
[
"MIT"
] | 4
|
2022-02-19T03:54:23.000Z
|
2022-02-25T00:03:14.000Z
|
functions.py
|
HamzaBamohammed/hambam-unconstraint-optimization
|
f710f31883ec60d231ec6e8bf168805f7d455a98
|
[
"MIT"
] | null | null | null |
functions.py
|
HamzaBamohammed/hambam-unconstraint-optimization
|
f710f31883ec60d231ec6e8bf168805f7d455a98
|
[
"MIT"
] | null | null | null |
def g(x):
return x**2
def gp(x):
return 2*x
def gpp(x):
return 2
def h(x):
return 0.5*(x[0]**2 + x[1]**2)
def H(X,Y):
return 0.5*(X**2 + Y**2)
| 12.066667
| 35
| 0.430939
| 40
| 181
| 1.95
| 0.3
| 0.358974
| 0.205128
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109244
| 0.342541
| 181
| 14
| 36
| 12.928571
| 0.546218
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
ec6818e98047f397f8496b01b2835aa825925404
| 194
|
py
|
Python
|
DeepAlignmentNetwork/menpofit/math/__init__.py
|
chiawei-liu/DeepAlignmentNetwork
|
52621cd2f697abe372b88c9ea0ee08f0d93b43d8
|
[
"MIT"
] | 220
|
2019-09-01T01:52:04.000Z
|
2022-03-28T12:52:07.000Z
|
DeepAlignmentNetwork/menpofit/math/__init__.py
|
chiawei-liu/DeepAlignmentNetwork
|
52621cd2f697abe372b88c9ea0ee08f0d93b43d8
|
[
"MIT"
] | 80
|
2015-01-05T16:17:39.000Z
|
2020-11-22T13:42:00.000Z
|
DeepAlignmentNetwork/menpofit/math/__init__.py
|
chiawei-liu/DeepAlignmentNetwork
|
52621cd2f697abe372b88c9ea0ee08f0d93b43d8
|
[
"MIT"
] | 64
|
2015-02-02T15:11:38.000Z
|
2022-02-28T06:19:31.000Z
|
from .regression import (IRLRegression, IIRLRegression, PCRRegression,
OptimalLinearRegression, OPPRegression)
from .correlationfilter import mccf, imccf, mosse, imosse
| 48.5
| 70
| 0.737113
| 15
| 194
| 9.533333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206186
| 194
| 3
| 71
| 64.666667
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ec70af5d460d4c7090cd2308c7cf3f9f600dc196
| 42
|
py
|
Python
|
dyno.py
|
Nouhelgod/python_JOBA_telegram
|
c8cfb9cb1fd69aa306cef1287075566c78e62c0d
|
[
"MIT"
] | 2
|
2021-04-07T15:12:32.000Z
|
2021-04-09T20:47:17.000Z
|
dyno.py
|
Nouhelgod/python_JOBA_telegram
|
c8cfb9cb1fd69aa306cef1287075566c78e62c0d
|
[
"MIT"
] | null | null | null |
dyno.py
|
Nouhelgod/python_JOBA_telegram
|
c8cfb9cb1fd69aa306cef1287075566c78e62c0d
|
[
"MIT"
] | 1
|
2021-04-07T15:12:16.000Z
|
2021-04-07T15:12:16.000Z
|
import os
os.system('python src/main.py')
| 14
| 31
| 0.738095
| 8
| 42
| 3.875
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 3
| 31
| 14
| 0.815789
| 0
| 0
| 0
| 0
| 0
| 0.418605
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.