hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
381aff7075f7d0c6469db5d793171d22d50b86cd
| 242
|
py
|
Python
|
src/rating/admin.py
|
krismwas/pata-app
|
2c7e29b7801f17047ccfa0c824a77b0415a8f029
|
[
"MIT"
] | null | null | null |
src/rating/admin.py
|
krismwas/pata-app
|
2c7e29b7801f17047ccfa0c824a77b0415a8f029
|
[
"MIT"
] | null | null | null |
src/rating/admin.py
|
krismwas/pata-app
|
2c7e29b7801f17047ccfa0c824a77b0415a8f029
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Rating, DoctorReviewOpinionCount, ReportedAbuse
admin.site.register(Rating)
admin.site.register(DoctorReviewOpinionCount)
admin.site.register(ReportedAbuse)
| 24.2
| 67
| 0.838843
| 27
| 242
| 7.518519
| 0.481481
| 0.133005
| 0.251232
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086777
| 242
| 10
| 68
| 24.2
| 0.918552
| 0.107438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
3839f674333d5f6f3a94c13c1171991ba7da1212
| 14,492
|
py
|
Python
|
2019/2019_23b.py
|
davidxiao93/Advent-of-Code
|
29503100ae4eb46b048fc3ab68ff0181c6f00ee5
|
[
"MIT"
] | null | null | null |
2019/2019_23b.py
|
davidxiao93/Advent-of-Code
|
29503100ae4eb46b048fc3ab68ff0181c6f00ee5
|
[
"MIT"
] | null | null | null |
2019/2019_23b.py
|
davidxiao93/Advent-of-Code
|
29503100ae4eb46b048fc3ab68ff0181c6f00ee5
|
[
"MIT"
] | null | null | null |
from typing import List, Tuple, Dict
from collections import namedtuple
input = """3,62,1001,62,11,10,109,2229,105,1,0,1392,1198,1169,736,2060,1136,767,1041,928,1072,1996,1662,1567,994,800,1363,571,2091,1928,1493,2192,705,1695,1800,1596,1464,868,965,1328,1631,1965,835,1262,2029,1101,1433,1829,639,899,1726,2161,1227,672,2122,1899,1526,1763,1295,604,1868,0,0,0,0,0,0,0,0,0,0,0,0,3,64,1008,64,-1,62,1006,62,88,1006,61,170,1106,0,73,3,65,20101,0,64,1,20102,1,66,2,21102,1,105,0,1106,0,436,1201,1,-1,64,1007,64,0,62,1005,62,73,7,64,67,62,1006,62,73,1002,64,2,133,1,133,68,133,102,1,0,62,1001,133,1,140,8,0,65,63,2,63,62,62,1005,62,73,1002,64,2,161,1,161,68,161,1101,1,0,0,1001,161,1,169,102,1,65,0,1102,1,1,61,1102,1,0,63,7,63,67,62,1006,62,203,1002,63,2,194,1,68,194,194,1006,0,73,1001,63,1,63,1105,1,178,21101,0,210,0,105,1,69,2101,0,1,70,1101,0,0,63,7,63,71,62,1006,62,250,1002,63,2,234,1,72,234,234,4,0,101,1,234,240,4,0,4,70,1001,63,1,63,1105,1,218,1105,1,73,109,4,21102,0,1,-3,21101,0,0,-2,20207,-2,67,-1,1206,-1,293,1202,-2,2,283,101,1,283,283,1,68,283,283,22001,0,-3,-3,21201,-2,1,-2,1106,0,263,21202,-3,1,-3,109,-4,2105,1,0,109,4,21102,1,1,-3,21102,0,1,-2,20207,-2,67,-1,1206,-1,342,1202,-2,2,332,101,1,332,332,1,68,332,332,22002,0,-3,-3,21201,-2,1,-2,1106,0,312,22102,1,-3,-3,109,-4,2106,0,0,109,1,101,1,68,358,21002,0,1,1,101,3,68,367,20101,0,0,2,21101,376,0,0,1105,1,436,21201,1,0,0,109,-1,2106,0,0,1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768,65536,131072,262144,524288,1048576,2097152,4194304,8388608,16777216,33554432,67108864,134217728,268435456,536870912,1073741824,2147483648,4294967296,8589934592,17179869184,34359738368,68719476736,137438953472,274877906944,549755813888,1099511627776,2199023255552,4398046511104,8796093022208,17592186044416,35184372088832,70368744177664,140737488355328,281474976710656,562949953421312,1125899906842624,109,8,21202,-6,10,-5,22207,-7,-5,-5,1205,-5,521,21102,1,0,-4,21102,0,1,-3,21101,51,0,-2,21201,-2,-1,-2,1201,-2,385,470,21001,0,0,-1,21202,-3,2,-3,22207,-7,-1,-5,1205,-5,496,21201,-3,1,-3,22102,-1,-1,-5,22201,-7,-5,-7,22207,-3,-6,-5,1205,-5,515,22102,-1,-6,-5,22201,-3,-5,-3,22201,-1,-4,-4,1205,-2,461,1106,0,547,21102,-1,1,-4,21202,-6,-1,-6,21207,-7,0,-5,1205,-5,547,22201,-7,-6,-7,21201,-4,1,-4,1106,0,529,22102,1,-4,-7,109,-8,2106,0,0,109,1,101,1,68,563,21001,0,0,0,109,-1,2105,1,0,1101,100267,0,66,1101,0,2,67,1102,598,1,68,1101,0,302,69,1101,0,1,71,1102,602,1,72,1105,1,73,0,0,0,0,32,91138,1101,51977,0,66,1102,3,1,67,1101,0,631,68,1102,302,1,69,1101,0,1,71,1102,637,1,72,1105,1,73,0,0,0,0,0,0,39,141837,1102,1,20297,66,1101,2,0,67,1101,666,0,68,1102,302,1,69,1102,1,1,71,1102,1,670,72,1105,1,73,0,0,0,0,31,202718,1101,0,30181,66,1102,1,1,67,1102,1,699,68,1102,556,1,69,1101,2,0,71,1101,701,0,72,1105,1,73,1,10,46,194762,45,241828,1102,84659,1,66,1101,1,0,67,1102,1,732,68,1102,556,1,69,1101,0,1,71,1101,0,734,72,1106,0,73,1,-24,41,54983,1101,14563,0,66,1101,0,1,67,1102,763,1,68,1101,556,0,69,1101,0,1,71,1101,0,765,72,1106,0,73,1,43,18,305589,1102,1,27259,66,1101,1,0,67,1101,794,0,68,1102,1,556,69,1101,0,2,71,1101,0,796,72,1105,1,73,1,2053,24,189586,41,164949,1102,23279,1,66,1101,3,0,67,1101,827,0,68,1102,1,302,69,1101,1,0,71,1101,0,833,72,1106,0,73,0,0,0,0,0,0,8,35158,1101,0,101359,66,1102,2,1,67,1101,862,0,68,1101,302,0,69,1101,0,1,71,1102,866,1,72,1105,1,73,0,0,0,0,39,189116,1101,81869,0,66,1102,1,1,67,1102,1,895,68,1102,556,1,69,1101,0,1,71,1102,897,1,72,1106,0,73,1,4127,34,291513,1102,73681,1,66,1102,1,1,67,1101,926,0,68,1101,0,556,69,1102,0,1,71,1101,928,0,72,1105,1,73,1,1053,1101,17579,0,66,1101,4,0,67,1101,0,955,68,1102,253,1,69,1102,1,1,71,1101,0,963,72,1106,0,73,0,0,0,0,0,0,0,0,16,100267,1102,102677,1,66,1101,0,1,67,1102,1,992,68,1101,0,556,69,1101,0,0,71,1101,0,994,72,1105,1,73,1,1635,1102,21481,1,66,1102,1,1,67,1101,0,1021,68,1102,556,1,69,1101,9,0,71,1102,1,1023,72,1105,1,73,1,2,32,45569,24,94793,10,83786,19,92671,11,14891,5,8087,37,20297,45,60457,45,302285,1101,88853,0,66,1102,1,1,67,1102,1068,1,68,1102,556,1,69,1102,1,1,71,1102,1,1070,72,1106,0,73,1,59,20,143578,1101,57793,0,66,1102,1,1,67,1102,1,1099,68,1102,556,1,69,1101,0,0,71,1102,1101,1,72,1106,0,73,1,1975,1102,97171,1,66,1101,0,3,67,1101,0,1128,68,1101,0,302,69,1101,0,1,71,1101,0,1134,72,1106,0,73,0,0,0,0,0,0,8,70316,1102,8087,1,66,1101,0,2,67,1102,1163,1,68,1101,0,302,69,1101,1,0,71,1102,1,1167,72,1105,1,73,0,0,0,0,37,40594,1102,58271,1,66,1101,1,0,67,1102,1196,1,68,1101,0,556,69,1102,1,0,71,1102,1,1198,72,1105,1,73,1,1468,1102,1,87991,66,1102,1,1,67,1102,1225,1,68,1102,556,1,69,1102,0,1,71,1101,1227,0,72,1106,0,73,1,1066,1101,0,54983,66,1101,3,0,67,1102,1,1254,68,1102,302,1,69,1102,1,1,71,1102,1260,1,72,1106,0,73,0,0,0,0,0,0,48,51977,1101,0,45569,66,1102,1,2,67,1102,1289,1,68,1101,302,0,69,1102,1,1,71,1102,1,1293,72,1105,1,73,0,0,0,0,24,284379,1101,87359,0,66,1102,1,2,67,1102,1,1322,68,1101,351,0,69,1102,1,1,71,1102,1326,1,72,1106,0,73,0,0,0,0,255,17599,1102,1,20399,66,1102,1,3,67,1101,0,1355,68,1101,0,302,69,1102,1,1,71,1102,1361,1,72,1105,1,73,0,0,0,0,0,0,39,47279,1102,1,13177,66,1101,1,0,67,1101,1390,0,68,1101,556,0,69,1102,0,1,71,1101,1392,0,72,1105,1,73,1,1410,1102,17599,1,66,1101,0,1,67,1101,0,1419,68,1102,556,1,69,1102,1,6,71,1101,1421,0,72,1105,1,73,1,23694,31,101359,48,103954,48,155931,28,20399,28,40798,28,61197,1101,0,56369,66,1101,0,1,67,1102,1460,1,68,1102,1,556,69,1101,0,1,71,1102,1462,1,72,1106,0,73,1,47163854,10,41893,1101,0,36263,66,1101,0,1,67,1102,1,1491,68,1102,556,1,69,1102,0,1,71,1101,1493,0,72,1105,1,73,1,1357,1102,1,92671,66,1102,2,1,67,1101,1520,0,68,1102,1,302,69,1102,1,1,71,1102,1524,1,72,1106,0,73,0,0,0,0,11,29782,1102,60457,1,66,1101,0,6,67,1102,1,1553,68,1102,302,1,69,1102,1,1,71,1101,1565,0,72,1105,1,73,0,0,0,0,0,0,0,0,0,0,0,0,47,174718,1102,4957,1,66,1102,1,1,67,1101,0,1594,68,1102,556,1,69,1102,0,1,71,1101,0,1596,72,1105,1,73,1,1623,1101,94793,0,66,1101,0,3,67,1102,1,1623,68,1101,0,302,69,1102,1,1,71,1101,1629,0,72,1106,0,73,0,0,0,0,0,0,39,94558,1101,31387,0,66,1102,1,1,67,1102,1,1658,68,1101,0,556,69,1101,0,1,71,1101,0,1660,72,1105,1,73,1,192,14,46558,1101,0,14891,66,1101,0,2,67,1101,0,1689,68,1102,1,302,69,1102,1,1,71,1102,1,1693,72,1106,0,73,0,0,0,0,5,16174,1102,1,46457,66,1102,1,1,67,1101,1722,0,68,1102,556,1,69,1102,1,1,71,1102,1,1724,72,1105,1,73,1,2341,14,69837,1102,47279,1,66,1101,0,4,67,1101,1753,0,68,1101,253,0,69,1102,1,1,71,1102,1761,1,72,1105,1,73,0,0,0,0,0,0,0,0,47,87359,1101,0,97381,66,1101,4,0,67,1101,0,1790,68,1101,0,302,69,1102,1,1,71,1101,0,1798,72,1106,0,73,0,0,0,0,0,0,0,0,45,120914,1102,68597,1,66,1102,1,1,67,1102,1827,1,68,1101,556,0,69,1101,0,0,71,1102,1,1829,72,1105,1,73,1,1465,1101,0,48857,66,1102,1,1,67,1102,1,1856,68,1102,556,1,69,1101,0,5,71,1101,1858,0,72,1106,0,73,1,1,20,215367,18,203726,34,97171,14,23279,41,109966,1101,3463,0,66,1102,1,1,67,1102,1,1895,68,1101,0,556,69,1101,1,0,71,1102,1,1897,72,1106,0,73,1,160,45,362742,1102,1,52181,66,1102,1,1,67,1101,0,1926,68,1101,556,0,69,1102,1,0,71,1101,1928,0,72,1105,1,73,1,1521,1101,0,101863,66,1102,4,1,67,1102,1,1955,68,1102,302,1,69,1102,1,1,71,1101,0,1963,72,1106,0,73,0,0,0,0,0,0,0,0,8,17579,1101,14173,0,66,1101,0,1,67,1102,1992,1,68,1102,1,556,69,1102,1,1,71,1101,0,1994,72,1105,1,73,1,-3,34,194342,1101,0,41893,66,1102,1,2,67,1102,2023,1,68,1102,302,1,69,1101,0,1,71,1102,2027,1,72,1106,0,73,0,0,0,0,19,185342,1101,0,57787,66,1102,1,1,67,1101,2056,0,68,1101,0,556,69,1102,1,1,71,1102,1,2058,72,1106,0,73,1,-209,18,101863,1102,1,66359,66,1102,1,1,67,1101,2087,0,68,1101,556,0,69,1101,1,0,71,1101,2089,0,72,1106,0,73,1,273,20,71789,1101,0,44293,66,1102,1,1,67,1102,2118,1,68,1102,1,556,69,1101,0,1,71,1102,1,2120,72,1106,0,73,1,125,46,292143,1101,0,95561,66,1101,1,0,67,1102,1,2149,68,1102,1,556,69,1102,1,5,71,1101,0,2151,72,1105,1,73,1,5,20,287156,18,407452,46,97381,46,389524,45,181371,1102,32869,1,66,1101,0,1,67,1102,2188,1,68,1102,1,556,69,1102,1,1,71,1101,0,2190,72,1105,1,73,1,128,16,200534,1101,0,71789,66,1101,4,0,67,1101,0,2219,68,1101,302,0,69,1101,0,1,71,1101,2227,0,72,1105,1,73,0,0,0,0,0,0,0,0,8,52737"""
int_codes = [int(x) for x in input.split(",")]
Param = namedtuple("Param", ["mode", "value"])
opcodes = {
1: (3, "add"),
2: (3, "multiply"),
3: (1, "read-input"),
4: (1, "write-output"),
5: (2, "jump-if-true"),
6: (2, "jump-if-false"),
7: (3, "less-than"),
8: (3, "equals"),
9: (1, "adjust-rel-base"),
99: (0, "halt")
}
class Computer:
def __init__(self, int_codes: List[int]):
self.int_codes = {
i: v
for i, v in enumerate(int_codes)
}
self.current_instruction = 0
self.inputs_queue = []
self.outputs_queue = []
self.halted = False
self.waiting_for_input = False
self.relative_base = 0
def _parse_next_instruction(self) -> Tuple[str, List[Param]]:
opcode = self.int_codes[self.current_instruction] % 100
if opcode not in opcodes:
print("unknown opcode")
exit(1)
params = []
parameter_mode = self.int_codes[self.current_instruction] // 100
param_values = [
self.int_codes[x]
for x in range(
self.current_instruction + 1,
self.current_instruction + 1 + opcodes[opcode][0]
)
]
for param_value in param_values:
param_mode = parameter_mode % 10
params.append(Param(param_mode, param_value))
parameter_mode = parameter_mode // 10
return opcodes[opcode][1], params
def _read_param(self, param: Param):
if param.mode == 0:
key = param.value
elif param.mode == 1:
return param.value
elif param.mode == 2:
key = param.value + self.relative_base
else:
print("Unknown param mode for reading")
exit(1)
return # shut up the linter
if key < 0:
print("Cannot read from negative address")
exit(1)
if key not in self.int_codes:
self.int_codes[key] = 0
return self.int_codes[key]
def _write_param(self, param: Param, value: int):
if param.mode == 0:
key = param.value
elif param.mode == 1:
print("Cannot write when param mode is 1")
exit(1)
return # shut up the linter
elif param.mode == 2:
key = param.value + self.relative_base
else:
print("Unknwon param mode for writing")
exit(1)
return # shut up the linter
if key < 0:
print("Cannot write to negative address")
exit(1)
self.int_codes[key] = value
def put_input(self, i):
self.inputs_queue.append(i)
self.waiting_for_input = False
def has_output(self):
return len(self.outputs_queue) > 0
def pop_next_output(self):
if len(self.outputs_queue) > 0:
return self.outputs_queue.pop(0)
return None
def run_step(self):
if self.halted:
return
opcode, params = self._parse_next_instruction()
jumped = False
if opcode == "add":
self._write_param(params[2], self._read_param(params[0]) + self._read_param(params[1]))
elif opcode == "multiply":
self._write_param(params[2], self._read_param(params[0]) * self._read_param(params[1]))
elif opcode == "read-input":
if len(self.inputs_queue) == 0:
self._write_param(params[0], -1)
self.waiting_for_input = True
else:
self._write_param(params[0], self.inputs_queue.pop(0))
self.waiting_for_input = False
elif opcode == "write-output":
self.outputs_queue.append(self._read_param(params[0]))
elif opcode == "jump-if-true":
if self._read_param(params[0]) != 0:
self.current_instruction = self._read_param(params[1])
jumped = True
elif opcode == "jump-if-false":
if self._read_param(params[0]) == 0:
self.current_instruction = self._read_param(params[1])
jumped = True
elif opcode == "less-than":
self._write_param(params[2], 1 if self._read_param(params[0]) < self._read_param(params[1]) else 0)
elif opcode == "equals":
self._write_param(params[2], 1 if self._read_param(params[0]) == self._read_param(params[1]) else 0)
elif opcode == "halt":
self.halted = True
elif opcode == "adjust-rel-base":
self.relative_base += self._read_param(params[0])
else:
print("unknown opcode")
exit(1)
if not jumped:
self.current_instruction += 1 + len(params)
network: Dict[int, Computer] = {}
output_buffers: [int, List[int]] = {}
for i in range(50):
network[i] = Computer(int_codes)
network[i].put_input(i) # give computer its network address
output_buffers[i] = []
nat_x = None
nat_y = None
sent_nat_y = []
time_since_last_activity = 0
while True:
# run single step for all computers
for i in range(50):
network[i].run_step()
if network[i].has_output():
output_buffers[i].append(network[i].pop_next_output())
# send messages if needed
message_activity = False
for i in range(50):
if len(output_buffers[i]) == 3:
message_activity = True
address = output_buffers[i][0]
if address == 255:
nat_x = output_buffers[i][1]
nat_y = output_buffers[i][2]
else:
network[address].put_input(output_buffers[i][1])
network[address].put_input(output_buffers[i][2])
output_buffers[i] = []
if not message_activity:
# check if entire network is idle
is_idle = True
for i in range(50):
is_idle = is_idle and network[i].waiting_for_input
if is_idle:
time_since_last_activity += 1
else:
time_since_last_activity = 0
else:
time_since_last_activity = 0
if time_since_last_activity > 100:
if len(sent_nat_y) > 1 and sent_nat_y[-1] == nat_y:
print(nat_y)
exit(0)
sent_nat_y.append(nat_y)
network[0].put_input(nat_x)
network[0].put_input(nat_y)
time_since_last_activity = 0
| 73.191919
| 7,984
| 0.642423
| 3,071
| 14,492
| 2.975252
| 0.171931
| 0.028675
| 0.030535
| 0.030645
| 0.438218
| 0.371347
| 0.301302
| 0.208602
| 0.182773
| 0.15574
| 0
| 0.466176
| 0.146219
| 14,492
| 197
| 7,985
| 73.563452
| 0.272286
| 0.012421
| 0
| 0.28
| 0
| 0.005714
| 0.584266
| 0.557343
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045714
| false
| 0
| 0.011429
| 0.005714
| 0.12
| 0.045714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
699eb888d3ab7a330ed11811a7c5919493e3467f
| 23
|
py
|
Python
|
BDMesh/_version.py
|
bond-anton/BDMesh
|
e72f1ec96828c41274b82ba67fd06b44fa8b511d
|
[
"Apache-2.0"
] | 49
|
2021-12-12T04:13:24.000Z
|
2022-03-31T12:58:57.000Z
|
enex2notion/version.py
|
vzhd1701/enex2notion
|
d9e0811af6f2c779caf1328c6daa0d6f81290fb3
|
[
"MIT"
] | 11
|
2021-12-03T10:49:54.000Z
|
2022-03-29T20:00:30.000Z
|
BDMesh/_version.py
|
bond-anton/BDMesh
|
e72f1ec96828c41274b82ba67fd06b44fa8b511d
|
[
"Apache-2.0"
] | 3
|
2022-02-04T13:25:21.000Z
|
2022-03-07T17:54:36.000Z
|
__version__ = "0.2.16"
| 11.5
| 22
| 0.652174
| 4
| 23
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0.130435
| 23
| 1
| 23
| 23
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
69f64aaea0d268a893bf1170af178c227098845b
| 454
|
py
|
Python
|
algorithms_selection.py
|
chasingbob/Python_For_Kids
|
807509858bec8b5b28cf9680360487488bc033a8
|
[
"MIT"
] | 1
|
2017-03-24T06:30:52.000Z
|
2017-03-24T06:30:52.000Z
|
algorithms_selection.py
|
chasingbob/Python_For_Kids
|
807509858bec8b5b28cf9680360487488bc033a8
|
[
"MIT"
] | null | null | null |
algorithms_selection.py
|
chasingbob/Python_For_Kids
|
807509858bec8b5b28cf9680360487488bc033a8
|
[
"MIT"
] | null | null | null |
def print_appropriate_message(age, name):
if (age < 10):
print('{} is in primary school'.format(name))
elif (age >= 10 and age < 20):
print('{} is most likely in high school'.format(name))
else:
print('{} is too old for school'.format(name))
age = 17
name = "John"
print_appropriate_message(age, name)
print_appropriate_message(8, 'Angie')
print_appropriate_message(88, 'Grandpa Jack')
| 21.619048
| 63
| 0.61674
| 60
| 454
| 4.533333
| 0.5
| 0.235294
| 0.338235
| 0.191176
| 0.220588
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032448
| 0.253304
| 454
| 20
| 64
| 22.7
| 0.769912
| 0
| 0
| 0
| 0
| 0
| 0.231481
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.083333
| 0.583333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
0e1078575a686c18dc3eb28d77bba9b293a8c442
| 335
|
py
|
Python
|
PyStacks/test/test_iot.py
|
0xack13/PyStacks
|
13136c43089c241680beb216a233d1846119dd7c
|
[
"MIT"
] | 11
|
2018-02-15T04:27:05.000Z
|
2020-10-02T11:20:08.000Z
|
PyStacks/test/test_iot.py
|
0xack13/PyStacks
|
13136c43089c241680beb216a233d1846119dd7c
|
[
"MIT"
] | 3
|
2018-02-15T05:46:54.000Z
|
2018-03-05T04:46:51.000Z
|
PyStacks/test/test_iot.py
|
0xack13/PyStacks
|
13136c43089c241680beb216a233d1846119dd7c
|
[
"MIT"
] | 8
|
2018-03-05T04:40:41.000Z
|
2021-02-22T08:07:58.000Z
|
import unittest
from mock import MagicMock
from PyStacks.PyStacks.iot import IoTAPI
class TestIot(unittest.TestCase):
def test_create_thing(self):
pass
def test_create_keys_and_certificate(self):
pass
def test_attach_thing_principal(self):
pass
def test_attach_policy(self):
pass
| 16.75
| 47
| 0.710448
| 43
| 335
| 5.27907
| 0.534884
| 0.123348
| 0.145374
| 0.198238
| 0.185022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235821
| 335
| 19
| 48
| 17.631579
| 0.886719
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0.25
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
0e1432b0932c3c8dbd9df5da5cfda6edda0dbee6
| 337
|
py
|
Python
|
commish_paths_nersc.py
|
dstndstn/desi-commish
|
71d95c0e20a1a730dbd75bdd1731c9baace6a0ed
|
[
"MIT"
] | null | null | null |
commish_paths_nersc.py
|
dstndstn/desi-commish
|
71d95c0e20a1a730dbd75bdd1731c9baace6a0ed
|
[
"MIT"
] | null | null | null |
commish_paths_nersc.py
|
dstndstn/desi-commish
|
71d95c0e20a1a730dbd75bdd1731c9baace6a0ed
|
[
"MIT"
] | null | null | null |
an_config_filename = '/global/project/projectdirs/cosmo/work/users/dstn/index-5000/cfg'
an_path = '/global/homes/d/dstn/astrometry-installed'
an_py_path = None #'/global/homes/d/dstn/astrometry-installed/lib/python'
desi_dir = '/global/project/projectdirs/desi/spectro/data'
gfa_good_pix_map_dir = '/global/cscratch1/sd/dstn/gfa-wcs'
| 37.444444
| 87
| 0.789318
| 52
| 337
| 4.923077
| 0.634615
| 0.101563
| 0.1875
| 0.125
| 0.273438
| 0.273438
| 0
| 0
| 0
| 0
| 0
| 0.015723
| 0.05638
| 337
| 8
| 88
| 42.125
| 0.789308
| 0.160237
| 0
| 0
| 0
| 0
| 0.648936
| 0.648936
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
385b89342b5c3c1eaad0c40664054aa0f689a4e1
| 356
|
py
|
Python
|
src/pygerber/parser/blender/apertures/custom.py
|
Argmaster/pygerber
|
4761a5aa60ff1d11512fb44aabd103246d9a3019
|
[
"MIT"
] | 3
|
2021-08-30T07:07:59.000Z
|
2021-09-29T22:14:43.000Z
|
src/pygerber/parser/blender/apertures/custom.py
|
Argmaster/pygerber
|
4761a5aa60ff1d11512fb44aabd103246d9a3019
|
[
"MIT"
] | 1
|
2021-09-26T13:28:49.000Z
|
2021-09-26T13:28:49.000Z
|
src/pygerber/parser/blender/apertures/custom.py
|
Argmaster/pygerber
|
4761a5aa60ff1d11512fb44aabd103246d9a3019
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import annotations
from pygerber.parser.blender.apertures.arc_mixin import ArcUtilMixinBlender
from pygerber.parser.blender.apertures.util import BlenderUtilMethods
from pygerber.renderer.aperture.custom import CustomAperture
class BlenderCustom(ArcUtilMixinBlender, BlenderUtilMethods, CustomAperture):
pass
| 32.363636
| 77
| 0.837079
| 37
| 356
| 7.918919
| 0.621622
| 0.122867
| 0.122867
| 0.170648
| 0.232082
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003096
| 0.092697
| 356
| 10
| 78
| 35.6
| 0.904025
| 0.058989
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.666667
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
3876c0d838a6bd316aa8ea81a71519f99bbabefd
| 119
|
py
|
Python
|
ibug/face_pose_augmentation/__init__.py
|
ibug-group/face_pose_augmentation
|
96df817c194854bba261893703d26502feec1c22
|
[
"MIT"
] | 3
|
2021-03-22T11:58:18.000Z
|
2022-03-11T20:29:22.000Z
|
ibug/face_pose_augmentation/__init__.py
|
ibug-group/face_pose_augmentation
|
96df817c194854bba261893703d26502feec1c22
|
[
"MIT"
] | null | null | null |
ibug/face_pose_augmentation/__init__.py
|
ibug-group/face_pose_augmentation
|
96df817c194854bba261893703d26502feec1c22
|
[
"MIT"
] | 1
|
2022-03-11T20:28:45.000Z
|
2022-03-11T20:28:45.000Z
|
from .tddfa_predictor import TDDFAPredictor
from .face_pose_augmentor import FacePoseAugmentor
__version__ = '0.1.0'
| 19.833333
| 50
| 0.831933
| 15
| 119
| 6.133333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028302
| 0.109244
| 119
| 5
| 51
| 23.8
| 0.839623
| 0
| 0
| 0
| 0
| 0
| 0.042017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
3882122c32447e7fa11be90ca09b8deed29cf0e5
| 2,640
|
py
|
Python
|
selenium_tests/Pages/RegisterPage.py
|
avielfedida/RoboAdvisor
|
ca8ba3c479f5fd3ae1e468f11f09ecf08e2a0cf9
|
[
"MIT"
] | null | null | null |
selenium_tests/Pages/RegisterPage.py
|
avielfedida/RoboAdvisor
|
ca8ba3c479f5fd3ae1e468f11f09ecf08e2a0cf9
|
[
"MIT"
] | 3
|
2020-12-31T08:03:30.000Z
|
2021-03-30T06:39:50.000Z
|
selenium_tests/Pages/RegisterPage.py
|
avielfedida/RoboAdvisor
|
ca8ba3c479f5fd3ae1e468f11f09ecf08e2a0cf9
|
[
"MIT"
] | 1
|
2021-06-20T09:13:03.000Z
|
2021-06-20T09:13:03.000Z
|
from selenium.webdriver.common.by import By
from selenium_tests.Config.config import TestData
from selenium_tests.Data.Paths import Paths
from selenium_tests.Pages.BasePage import BasePage
class RegisterPage(BasePage):
EMAIL = (By.ID, "email_field")
PASSWORD = (By.ID, "password_field")
PASSWORD_REPEAT = (By.ID, "password_repeat_field")
FIRST_NAME = (By.ID, "first_name_field")
LAST_NAME = (By.ID, "last_name_field")
BIRTH_DATE = (By.ID, "birth_date_field")
SUBMIT_BTN = (By.ID, "submit_btn")
ERROR_MESSAGE = (By.XPATH, "//span[@data-variant='danger']")
SUCCESS_MESSAGE = (By.XPATH, "//span[@data-variant='success']")
PAGE_PATH = Paths.REGISTER
def __init__(self, driver):
super().__init__(driver)
self.driver.get(TestData.BASE_URL + self.PAGE_PATH)
def do_register(self, username, password, first_name, last_name, birth_date):
self.do_send_keys(self.EMAIL, username)
self.do_send_keys(self.PASSWORD, password)
self.do_send_keys(self.PASSWORD_REPEAT, password)
self.do_send_keys(self.FIRST_NAME, first_name)
self.do_send_keys(self.LAST_NAME, last_name)
self.do_send_keys(self.BIRTH_DATE, birth_date)
self.do_click(self.SUBMIT_BTN)
def reset_fields(self):
self.do_clear(self.EMAIL)
self.do_clear(self.PASSWORD)
self.do_clear(self.PASSWORD_REPEAT)
self.do_clear(self.FIRST_NAME)
self.do_clear(self.LAST_NAME)
self.do_clear(self.BIRTH_DATE)
def is_register_btn_exists(self):
return self.is_visible(self.SUBMIT_BTN)
def is_email_field_exists(self):
return self.is_visible(self.EMAIL)
def is_password_field_exists(self):
return self.is_visible(self.PASSWORD)
def is_password_repeat_field_exists(self):
return self.is_visible(self.PASSWORD_REPEAT)
def is_birth_date_field_exists(self):
return self.is_visible(self.BIRTH_DATE)
def is_first_name_field_exists(self):
return self.is_visible(self.FIRST_NAME)
def is_last_name_field_exists(self):
return self.is_visible(self.LAST_NAME)
def is_danger_presented(self):
return self.is_visible(self.ERROR_MESSAGE)
def is_danger_not_presented(self):
return self.is_invisible(self.ERROR_MESSAGE)
def is_success_presented(self):
return self.is_visible(self.SUCCESS_MESSAGE)
def is_danger_message_equal(self, text):
return self.get_element_text(self.ERROR_MESSAGE) == text
def is_success_message_equal(self, text):
return self.get_element_text(self.SUCCESS_MESSAGE) == text
| 34.285714
| 81
| 0.71553
| 378
| 2,640
| 4.653439
| 0.166667
| 0.044343
| 0.079591
| 0.090961
| 0.474133
| 0.364412
| 0.257533
| 0.19784
| 0.154633
| 0.054576
| 0
| 0
| 0.182197
| 2,640
| 76
| 82
| 34.736842
| 0.814729
| 0
| 0
| 0
| 0
| 0
| 0.062121
| 0.031061
| 0
| 0
| 0
| 0
| 0
| 1
| 0.263158
| false
| 0.192982
| 0.070175
| 0.210526
| 0.736842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
3898b905761b2eb901f47f7af0f4f160795f09dc
| 141
|
py
|
Python
|
reddit2telegram/channels/~inactive/r_childfree/app.py
|
mainyordle/reddit2telegram
|
1163e15aed3b6ff0fba65b222d3d9798f644c386
|
[
"MIT"
] | 187
|
2016-09-20T09:15:54.000Z
|
2022-03-29T12:22:33.000Z
|
reddit2telegram/channels/~inactive/r_childfree/app.py
|
mainyordle/reddit2telegram
|
1163e15aed3b6ff0fba65b222d3d9798f644c386
|
[
"MIT"
] | 84
|
2016-09-22T14:25:07.000Z
|
2022-03-19T01:26:17.000Z
|
reddit2telegram/channels/~inactive/r_childfree/app.py
|
mainyordle/reddit2telegram
|
1163e15aed3b6ff0fba65b222d3d9798f644c386
|
[
"MIT"
] | 172
|
2016-09-21T15:39:39.000Z
|
2022-03-16T15:15:58.000Z
|
#encoding:utf-8
subreddit = 'childfree'
t_channel = '@r_Childfree'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| 15.666667
| 38
| 0.744681
| 19
| 141
| 5.315789
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02459
| 0.134752
| 141
| 8
| 39
| 17.625
| 0.803279
| 0.099291
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
c7fcc4062776a78be53c864a39cbb51e9625d5ca
| 1,621
|
py
|
Python
|
jpylib/jmath/_bits.py
|
JiniousChoi/encyclopedia-in-code
|
77bc551a03a2a3e3808e50016ece14adb5cfbd96
|
[
"MIT"
] | 2
|
2018-07-20T10:15:49.000Z
|
2018-07-20T10:16:54.000Z
|
jpylib/jmath/_bits.py
|
JiniousChoi/encyclopedia-in-code
|
77bc551a03a2a3e3808e50016ece14adb5cfbd96
|
[
"MIT"
] | 2
|
2018-06-26T09:12:44.000Z
|
2019-12-18T00:09:14.000Z
|
jpylib/jmath/_bits.py
|
JiniousChoi/encyclopedia-in-code
|
77bc551a03a2a3e3808e50016ece14adb5cfbd96
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
## author: jinchoiseoul@gmail.com
from math import inf
def single_bit_ceil(n):
''' @return largest `1<<m` satisfying `2**m >= n`
Note: 1<<m == 2**m '''
if n==0: return 0
p = 1
while p < n:
p <<= 1
assert p >= n
return p
def single_bit_floor(n):
''' @return largest `1<<m` satisfying `2**m <= n`
Note: 1<<m == 2**m '''
if n==0: return 0
assert n > 0
nxt, cur = n, inf
while 0 < nxt:
cur = nxt
nxt &= nxt-1
assert 0 == nxt < cur <= n
return cur
import unittest
class BitsTest(unittest.TestCase):
def test_single_bit_ceil(self):
self.assertEqual(single_bit_floor(0), 0)
self.assertEqual(single_bit_ceil(1), 1)
self.assertEqual(single_bit_ceil(2), 2)
self.assertEqual(single_bit_ceil(3), 4)
self.assertEqual(single_bit_ceil(4), 4)
self.assertEqual(single_bit_ceil(5), 8)
self.assertEqual(single_bit_ceil(6), 8)
self.assertEqual(single_bit_ceil(7), 8)
self.assertEqual(single_bit_ceil(8), 8)
def test_single_bit_floor(self):
self.assertEqual(single_bit_floor(0), 0)
self.assertEqual(single_bit_floor(1), 1)
self.assertEqual(single_bit_floor(2), 2)
self.assertEqual(single_bit_floor(3), 2)
self.assertEqual(single_bit_floor(4), 4)
self.assertEqual(single_bit_floor(5), 4)
self.assertEqual(single_bit_floor(6), 4)
self.assertEqual(single_bit_floor(7), 4)
self.assertEqual(single_bit_floor(8), 8)
if __name__ == "__main__":
unittest.main()
| 23.157143
| 53
| 0.611968
| 240
| 1,621
| 3.908333
| 0.191667
| 0.211087
| 0.402985
| 0.460554
| 0.684435
| 0.660981
| 0.228145
| 0.228145
| 0.228145
| 0.228145
| 0
| 0.045417
| 0.25293
| 1,621
| 69
| 54
| 23.492754
| 0.729149
| 0.111043
| 0
| 0.097561
| 0
| 0
| 0.005674
| 0
| 0
| 0
| 0
| 0
| 0.512195
| 1
| 0.097561
| false
| 0
| 0.04878
| 0
| 0.219512
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2a4984d704a1a5fb8b24f5f06d43dfb39f03cfd3
| 207
|
py
|
Python
|
pygdpr/policies/translate_file_policy.py
|
GDPRxiv/crawler
|
178ef9ff6c3641ba8b761a49e42c2579e453c1ca
|
[
"MIT"
] | null | null | null |
pygdpr/policies/translate_file_policy.py
|
GDPRxiv/crawler
|
178ef9ff6c3641ba8b761a49e42c2579e453c1ca
|
[
"MIT"
] | 2
|
2022-02-19T06:56:03.000Z
|
2022-02-19T07:00:00.000Z
|
pygdpr/policies/translate_file_policy.py
|
GDPRxiv/crawler
|
178ef9ff6c3641ba8b761a49e42c2579e453c1ca
|
[
"MIT"
] | null | null | null |
class TranslateFilePolicy():
allowed_formats = ['txt']
def is_allowed(self, filename):
split = filename.split('.')
format_ = split[1]
return (format_ in self.allowed_formats)
| 29.571429
| 48
| 0.637681
| 22
| 207
| 5.772727
| 0.636364
| 0.220472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006369
| 0.241546
| 207
| 6
| 49
| 34.5
| 0.802548
| 0
| 0
| 0
| 0
| 0
| 0.019324
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
2a589c16a6797b585f5993e3415df4b65fc2f68b
| 714
|
py
|
Python
|
main.py
|
dun933/FudanOCR
|
fd79b679044ea23fd9eb30691453ed0805d2e98b
|
[
"MIT"
] | 1
|
2020-03-19T02:29:02.000Z
|
2020-03-19T02:29:02.000Z
|
main.py
|
dun933/FudanOCR
|
fd79b679044ea23fd9eb30691453ed0805d2e98b
|
[
"MIT"
] | null | null | null |
main.py
|
dun933/FudanOCR
|
fd79b679044ea23fd9eb30691453ed0805d2e98b
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
from engine.trainer import Trainer
from engine.env import Env
from data.build import build_dataloader
from engine.trainer_collection.MORAN import MORAN_Trainer
from engine.trainer_collection.GRCNN import GRCNN_Trainer
from engine.trainer_collection.RARE import RARE_Trainer
from engine.trainer_collection.CRNN import CRNN_Trainer
from engine.trainer_collection.PixelLink import PixelLink_Trainer
from engine.trainer_collection.LSN import LSN_Trainer
env = Env()
train_loader, test_loader = build_dataloader(env.opt)
newTrainer = GRCNN_Trainer(modelObject=env.model, opt=env.opt, train_loader=train_loader,
val_loader=test_loader).train()
| 34
| 90
| 0.785714
| 95
| 714
| 5.684211
| 0.273684
| 0.148148
| 0.22037
| 0.3
| 0.314815
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001645
| 0.148459
| 714
| 20
| 91
| 35.7
| 0.886513
| 0.028011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.692308
| 0
| 0.692308
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2a791e28efed57765e637fb2d20894ed487edaf0
| 20
|
py
|
Python
|
test/login.py
|
zhujinyu828/cy
|
3cdf51125fafed4986ba7243b6b43fcc30df9370
|
[
"MIT"
] | null | null | null |
test/login.py
|
zhujinyu828/cy
|
3cdf51125fafed4986ba7243b6b43fcc30df9370
|
[
"MIT"
] | null | null | null |
test/login.py
|
zhujinyu828/cy
|
3cdf51125fafed4986ba7243b6b43fcc30df9370
|
[
"MIT"
] | null | null | null |
1.woshint
2.niyeshi
| 6.666667
| 9
| 0.8
| 4
| 20
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0.1
| 20
| 2
| 10
| 10
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2a91c0e2d77d62db226fe8a9c3b6fce9d02908d6
| 275
|
py
|
Python
|
controllers/machines.py
|
kbj2060/plantpointNutrients-backend
|
20c2e1e277368a186ed46844818f1e650d25fa03
|
[
"MIT"
] | null | null | null |
controllers/machines.py
|
kbj2060/plantpointNutrients-backend
|
20c2e1e277368a186ed46844818f1e650d25fa03
|
[
"MIT"
] | null | null | null |
controllers/machines.py
|
kbj2060/plantpointNutrients-backend
|
20c2e1e277368a186ed46844818f1e650d25fa03
|
[
"MIT"
] | null | null | null |
from controllers.app import app
from repository.machine_repo import machineRepository
@app.get("/machines")
def read_machines():
return machineRepository.read_machines()
@app.post("/machines/create")
def create_machines():
return machineRepository.create_machine()
| 25
| 53
| 0.792727
| 32
| 275
| 6.65625
| 0.46875
| 0.112676
| 0.29108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101818
| 275
| 11
| 54
| 25
| 0.862348
| 0
| 0
| 0
| 0
| 0
| 0.09058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
aa491fe6cff45225c11b3413a4de43d5604b9b2c
| 418
|
py
|
Python
|
pingparsing/error.py
|
chuongntv/pingparsing
|
45f0c510bd992128e29096527791cd66c0250f78
|
[
"MIT"
] | null | null | null |
pingparsing/error.py
|
chuongntv/pingparsing
|
45f0c510bd992128e29096527791cd66c0250f78
|
[
"MIT"
] | null | null | null |
pingparsing/error.py
|
chuongntv/pingparsing
|
45f0c510bd992128e29096527791cd66c0250f78
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import
class PingStatisticsHeaderNotFoundError(Exception):
"""
Exception raised when a ping statistics header not found in
a parsing text.
"""
class EmptyPingStatisticsError(Exception):
"""
Exception raised when a ping statistics is empty in a parsing text.
"""
| 19.904762
| 71
| 0.715311
| 46
| 418
| 6.391304
| 0.630435
| 0.108844
| 0.163265
| 0.190476
| 0.292517
| 0.292517
| 0.292517
| 0
| 0
| 0
| 0
| 0.002967
| 0.19378
| 418
| 20
| 72
| 20.9
| 0.869436
| 0.535885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
aa5d31eeb3767f9550433ac72e62b859f805a5d4
| 128
|
py
|
Python
|
molecule/podman/tests/test_podman.py
|
iquzart/ansible-role-mariadb
|
7a852ccf88177d9c075d8577319a903d5edcfe1b
|
[
"MIT"
] | null | null | null |
molecule/podman/tests/test_podman.py
|
iquzart/ansible-role-mariadb
|
7a852ccf88177d9c075d8577319a903d5edcfe1b
|
[
"MIT"
] | null | null | null |
molecule/podman/tests/test_podman.py
|
iquzart/ansible-role-mariadb
|
7a852ccf88177d9c075d8577319a903d5edcfe1b
|
[
"MIT"
] | 1
|
2021-06-13T07:40:40.000Z
|
2021-06-13T07:40:40.000Z
|
"""Role testing files using testinfra."""
def test_mariadb_service_active(host):
assert host.service('mariadb').is_running
| 25.6
| 45
| 0.765625
| 17
| 128
| 5.529412
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 128
| 5
| 45
| 25.6
| 0.824561
| 0.273438
| 0
| 0
| 0
| 0
| 0.079545
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
aa6245c0d00aafac3688c843b5c6ed960ffb518d
| 241
|
py
|
Python
|
main.py
|
jeremy886/crossword2019
|
e7fd5b8a79a3834b2d3c7fd51da336b23f9cfc8f
|
[
"MIT"
] | null | null | null |
main.py
|
jeremy886/crossword2019
|
e7fd5b8a79a3834b2d3c7fd51da336b23f9cfc8f
|
[
"MIT"
] | null | null | null |
main.py
|
jeremy886/crossword2019
|
e7fd5b8a79a3834b2d3c7fd51da336b23f9cfc8f
|
[
"MIT"
] | null | null | null |
import tex_printable
"""
Step 1:
- Create a new word list in words.txt
- Run crosswords.py to generate crosswords_out.txt
Step 2:
- Run this program to call text_printable.py to generate crossword_puzzle.text
"""
tex_printable.print_tex()
| 20.083333
| 78
| 0.775934
| 40
| 241
| 4.525
| 0.675
| 0.132597
| 0.132597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009709
| 0.145228
| 241
| 12
| 79
| 20.083333
| 0.868932
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 4
|
aa7c8b76035ace6b3e85e3ca1f6aa78318a12480
| 535
|
py
|
Python
|
Edabit/NoDuplicateLetters-Hard.py
|
JLJTECH/TutorialTesting
|
f2dbbd49a86b3b086d0fc156ac3369fb74727f86
|
[
"MIT"
] | null | null | null |
Edabit/NoDuplicateLetters-Hard.py
|
JLJTECH/TutorialTesting
|
f2dbbd49a86b3b086d0fc156ac3369fb74727f86
|
[
"MIT"
] | null | null | null |
Edabit/NoDuplicateLetters-Hard.py
|
JLJTECH/TutorialTesting
|
f2dbbd49a86b3b086d0fc156ac3369fb74727f86
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
'''
Given a common phrase, return False if any individual word in the phrase contains duplicate letters.
Return True otherwise.
'''
def no_duplicate_letters(phrase):
val = [phrase]
nlst = ' '.join(val).split()
st = [len(i) for i in nlst]
ev = [len(set(i)) for i in nlst]
return st == ev
#Alternative solutions
def no_duplicate_letters(phrase):
return all(i.count(j)==1 for i in phrase.split() for j in i)
def no_duplicate_letters(phrase):
return all([len(set(i))==len(i) for i in phrase.split(' ')])
| 26.75
| 101
| 0.697196
| 91
| 535
| 4.032967
| 0.428571
| 0.174387
| 0.065395
| 0.171662
| 0.441417
| 0.196185
| 0.196185
| 0
| 0
| 0
| 0
| 0.004444
| 0.158879
| 535
| 19
| 102
| 28.157895
| 0.811111
| 0.31215
| 0
| 0.3
| 0
| 0
| 0.005571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0
| 0
| 0.2
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
aa838313d513935184b5edad399628aa9a7191f4
| 459
|
py
|
Python
|
gem_controllers/current_controller.py
|
upb-lea/gem_control
|
242393dce0bbcb3a982594318daa1757687b1d3a
|
[
"MIT"
] | 15
|
2021-05-03T18:31:22.000Z
|
2022-03-15T01:56:33.000Z
|
gem_controllers/current_controller.py
|
upb-lea/gem_control
|
242393dce0bbcb3a982594318daa1757687b1d3a
|
[
"MIT"
] | 5
|
2021-05-09T19:16:35.000Z
|
2021-06-08T20:44:22.000Z
|
gem_controllers/current_controller.py
|
upb-lea/gem_control
|
242393dce0bbcb3a982594318daa1757687b1d3a
|
[
"MIT"
] | null | null | null |
import numpy as np
import gym_electric_motor as gem
import gem_controllers as gc
class CurrentController(gc.GemController):
def control(self, state, reference):
raise NotImplementedError
def tune(self, env, env_id, **kwargs):
raise NotImplementedError
@property
def voltage_reference(self) -> np.ndarray:
raise NotImplementedError
@property
def t_n(self) -> np.ndarray:
raise NotImplementedError
| 20.863636
| 46
| 0.705882
| 53
| 459
| 6
| 0.54717
| 0.301887
| 0.201258
| 0.220126
| 0.232704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22658
| 459
| 21
| 47
| 21.857143
| 0.895775
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.214286
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
aab343076828abe4a972220f2691735f2cb73eea
| 86
|
py
|
Python
|
plugins/greet_spanish.py
|
kyclark/python_plugins
|
da2f433b2f0323389ca2c24ac47e5fb097ef805d
|
[
"MIT"
] | null | null | null |
plugins/greet_spanish.py
|
kyclark/python_plugins
|
da2f433b2f0323389ca2c24ac47e5fb097ef805d
|
[
"MIT"
] | null | null | null |
plugins/greet_spanish.py
|
kyclark/python_plugins
|
da2f433b2f0323389ca2c24ac47e5fb097ef805d
|
[
"MIT"
] | null | null | null |
def greet(name: str) -> str:
"""Greet in Spanish"""
return f"¡Hola, {name}!"
| 17.2
| 28
| 0.546512
| 13
| 86
| 3.692308
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.232558
| 86
| 4
| 29
| 21.5
| 0.712121
| 0.186047
| 0
| 0
| 0
| 0
| 0.21875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
aace60065c8ca87d2ea84fce28749351770617c0
| 73
|
py
|
Python
|
backend/routes/__init__.py
|
ffont/FreesoundExplorer2
|
635f2c70741a4218ff2118c49b747b6d33266999
|
[
"MIT"
] | 14
|
2016-11-22T14:56:19.000Z
|
2021-01-04T11:43:27.000Z
|
backend/routes/__init__.py
|
noVaSon/freesound-explorer
|
92181855d59e181da0f5b140369d7e0067761ab3
|
[
"MIT"
] | 63
|
2016-09-04T08:43:34.000Z
|
2021-05-24T09:52:28.000Z
|
backend/routes/__init__.py
|
noVaSon/freesound-explorer
|
92181855d59e181da0f5b140369d7e0067761ab3
|
[
"MIT"
] | 5
|
2017-01-16T16:56:05.000Z
|
2019-02-25T16:03:10.000Z
|
from backend.routes import main
from social.apps.flask_app import routes
| 24.333333
| 40
| 0.849315
| 12
| 73
| 5.083333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 73
| 2
| 41
| 36.5
| 0.938462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
aad771b2b741a1418146c73959718eb7b27b52e0
| 763
|
py
|
Python
|
src/checklisting/provider/__init__.py
|
michalbachowski/python-checklisting
|
31cbe1fb87683ffbb21104585e318d844fc7c52b
|
[
"MIT"
] | null | null | null |
src/checklisting/provider/__init__.py
|
michalbachowski/python-checklisting
|
31cbe1fb87683ffbb21104585e318d844fc7c52b
|
[
"MIT"
] | null | null | null |
src/checklisting/provider/__init__.py
|
michalbachowski/python-checklisting
|
31cbe1fb87683ffbb21104585e318d844fc7c52b
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import Callable, Iterable, Iterator
from checklisting.task import Checklist
class BaseChecklistsProvider(ABC):
@abstractmethod
def get_all(self) -> Iterator[Checklist]:
pass
@abstractmethod
def get_filtered(self, predicate: Callable[[Checklist], bool]) -> Iterator[Checklist]:
pass
class StaticChecklistsProvider(BaseChecklistsProvider):
def __init__(self, checklists: Iterable[Checklist]) -> None:
self._checklists = list(checklists)
def get_all(self) -> Iterator[Checklist]:
return iter(self._checklists)
def get_filtered(self, predicate: Callable[[Checklist], bool]) -> Iterator[Checklist]:
return filter(predicate, self._checklists)
| 27.25
| 90
| 0.72346
| 78
| 763
| 6.935897
| 0.371795
| 0.044362
| 0.073937
| 0.048059
| 0.351201
| 0.351201
| 0.240296
| 0.240296
| 0.240296
| 0.240296
| 0
| 0
| 0.179554
| 763
| 27
| 91
| 28.259259
| 0.864217
| 0
| 0
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0.117647
| 0.176471
| 0.117647
| 0.705882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
2aa09506073f1ca7be0491d71c745533c0e0cbb6
| 104
|
py
|
Python
|
py_tdlib/constructors/chat_action_uploading_document.py
|
Mr-TelegramBot/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 24
|
2018-10-05T13:04:30.000Z
|
2020-05-12T08:45:34.000Z
|
py_tdlib/constructors/chat_action_uploading_document.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 3
|
2019-06-26T07:20:20.000Z
|
2021-05-24T13:06:56.000Z
|
py_tdlib/constructors/chat_action_uploading_document.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 5
|
2018-10-05T14:29:28.000Z
|
2020-08-11T15:04:10.000Z
|
from ..factory import Type
class chatActionUploadingDocument(Type):
progress = None # type: "int32"
| 17.333333
| 40
| 0.75
| 11
| 104
| 7.090909
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.153846
| 104
| 5
| 41
| 20.8
| 0.863636
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2ad2f753b5e41049e9fa33803137672b1fa7ad67
| 37
|
py
|
Python
|
foiamachine/local/lib/python2.7/encodings/cp437.py
|
dwillis/foiamachine
|
26d3b02870227696cdaab639c39d47b2a7a42ae5
|
[
"Unlicense",
"MIT"
] | 3
|
2021-08-07T04:01:55.000Z
|
2021-08-07T05:12:11.000Z
|
foiamachine/local/lib/python2.7/encodings/cp437.py
|
dwillis/foiamachine
|
26d3b02870227696cdaab639c39d47b2a7a42ae5
|
[
"Unlicense",
"MIT"
] | null | null | null |
foiamachine/local/lib/python2.7/encodings/cp437.py
|
dwillis/foiamachine
|
26d3b02870227696cdaab639c39d47b2a7a42ae5
|
[
"Unlicense",
"MIT"
] | 1
|
2021-08-05T22:51:14.000Z
|
2021-08-05T22:51:14.000Z
|
/usr/lib/python2.7/encodings/cp437.py
| 37
| 37
| 0.810811
| 7
| 37
| 4.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 0
| 37
| 1
| 37
| 37
| 0.675676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
630d07fcb74213c8cad6b3fc82978bac583b6ecc
| 2,078
|
py
|
Python
|
tests/test_zad2.py
|
TestowanieAutomatyczneUG/laboratorium-9-wiktormorawski
|
f4067f7dd8d3c88d95cadddd58a10269f981603f
|
[
"MIT"
] | null | null | null |
tests/test_zad2.py
|
TestowanieAutomatyczneUG/laboratorium-9-wiktormorawski
|
f4067f7dd8d3c88d95cadddd58a10269f981603f
|
[
"MIT"
] | null | null | null |
tests/test_zad2.py
|
TestowanieAutomatyczneUG/laboratorium-9-wiktormorawski
|
f4067f7dd8d3c88d95cadddd58a10269f981603f
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import *
from zad2 import Car
class TestCarMock(unittest.TestCase):
def setUp(self):
self.test_object = Car()
"""needsFuel TESTS"""
@patch.object(Car, 'needsFuel')
def test_needsFuel_dont(self, mock_method):
mock_method.return_value = False
result = self.test_object.needsFuel()
self.assertEqual(mock_method.return_value, result, "return value of needFuel not equal with False")
@patch.object(Car, 'needsFuel')
def test_needsFuel_do(self, mock_method):
mock_method.return_value = True
result = self.test_object.needsFuel()
self.assertEqual(mock_method.return_value, result, "return value of needFuel not equal with True")
"""DriveTo TESTS"""
@patch.object(Car, 'driveTo')
def test_driveTo(self, mock_method):
destination = 'Olsztyn'
mock_method.return_value = 'On my way to ' + destination
result = self.test_object.driveTo(destination)
self.assertEqual(mock_method.return_value, result)
"""GetEngineTemperature TESTS"""
@patch.object(Car, 'getEngineTemperature')
def test_getEngineTemperature_temp_under_normal(self, mock_method):
mock_method.return_value = 'Temperature under normal'
result = self.test_object.getEngineTemperature()
self.assertEqual(mock_method.return_value, result, 'Temp not below OK')
@patch.object(Car, 'getEngineTemperature')
def test_getEngineTemperature_temp_ok(self, mock_method):
mock_method.return_value = 'Temperature normal'
result = self.test_object.getEngineTemperature()
self.assertEqual(mock_method.return_value, result, 'Temp not OK')
@patch.object(Car, 'getEngineTemperature')
def test_getEngineTemperature_temp_above_ok(self, mock_method):
mock_method.return_value = 'Temperature abovenormal'
result = self.test_object.getEngineTemperature()
self.assertEqual(mock_method.return_value, result, 'Temp not above OK')
def tearDown(self):
self.test_object = None
| 38.481481
| 107
| 0.714148
| 244
| 2,078
| 5.860656
| 0.20082
| 0.125874
| 0.134266
| 0.176224
| 0.711888
| 0.711888
| 0.711888
| 0.579021
| 0.501399
| 0.340559
| 0
| 0.000593
| 0.189124
| 2,078
| 54
| 108
| 38.481481
| 0.848071
| 0
| 0
| 0.25641
| 0
| 0
| 0.15147
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.205128
| false
| 0
| 0.076923
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
631eb81af083eb6035d58f09389173814fc6c567
| 351
|
py
|
Python
|
classes/expense.py
|
GuillaumeDmns/cost-balancing
|
b9515af360336bb2dceb596b9a93b23642fe4bea
|
[
"MIT"
] | null | null | null |
classes/expense.py
|
GuillaumeDmns/cost-balancing
|
b9515af360336bb2dceb596b9a93b23642fe4bea
|
[
"MIT"
] | null | null | null |
classes/expense.py
|
GuillaumeDmns/cost-balancing
|
b9515af360336bb2dceb596b9a93b23642fe4bea
|
[
"MIT"
] | null | null | null |
class Expense:
def __init__(self, name, amount):
self._name = name
self._amount = amount
def get_name(self):
return self._name
def set_name(self, new_name):
self._name = new_name
def get_amount(self):
return self._amount
def set_amount(self, new_amount):
self._name = new_amount
| 20.647059
| 37
| 0.618234
| 47
| 351
| 4.234043
| 0.234043
| 0.201005
| 0.140704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.296296
| 351
| 16
| 38
| 21.9375
| 0.805668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416667
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
63253f1b9f1e96715e620c791b9425ed58245e91
| 3,305
|
py
|
Python
|
src/rocommand/test/TestAll.py
|
A-Mazurek/ro-manager
|
e49b6025b89594e036fdb2b56c8b871717b3b620
|
[
"MIT-0",
"MIT"
] | 11
|
2015-01-19T04:21:58.000Z
|
2019-02-21T11:54:45.000Z
|
src/rocommand/test/TestAll.py
|
A-Mazurek/ro-manager
|
e49b6025b89594e036fdb2b56c8b871717b3b620
|
[
"MIT-0",
"MIT"
] | 1
|
2016-10-18T14:35:36.000Z
|
2016-10-25T19:12:05.000Z
|
src/rocommand/test/TestAll.py
|
A-Mazurek/ro-manager
|
e49b6025b89594e036fdb2b56c8b871717b3b620
|
[
"MIT-0",
"MIT"
] | 7
|
2015-03-04T17:22:00.000Z
|
2022-03-14T15:55:23.000Z
|
#!/usr/bin/env python
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys, unittest, os
if __name__ == "__main__":
# Add main project directory and ro manager directories at start of python path
sys.path.insert(0, "..")
sys.path.insert(0, "../..")
sys.path.insert(0, "../../iaeval/test")
sys.path.insert(0, "../../checklist/test")
sys.path.insert(0, "../../sync/test")
sys.path.insert(0, "../../roweb")
sys.path.insert(0, "../../roweb/test")
import TestConfig
import TestSparqlQueries
import TestBasicCommands
import TestAnnotationUtils
import TestManifest
import TestROMetadata
import TestAnnotations
import TestLinks
import TestROSRS_Session
import TestROSRSMetadata
import TestSyncCommands
import TestRemoteROMetadata
import TestRosrsSync
import TestEvoCommands
import TestMinimAccess
import TestMinimAccess2
import TestEvalChecklist
import TestEvalQueryMatch
import TestRdfReport
import TestGridMatch
import TestMkMinim
# Code to run unit tests from all library test modules
def getTestSuite(select="unit"):
suite = unittest.TestSuite()
suite.addTest(TestSparqlQueries.getTestSuite(select=select))
suite.addTest(TestBasicCommands.getTestSuite(select=select))
suite.addTest(TestAnnotationUtils.getTestSuite(select=select))
suite.addTest(TestManifest.getTestSuite(select=select))
suite.addTest(TestROMetadata.getTestSuite(select=select))
suite.addTest(TestAnnotations.getTestSuite(select=select))
suite.addTest(TestLinks.getTestSuite(select=select))
suite.addTest(TestMinimAccess.getTestSuite(select=select))
suite.addTest(TestMinimAccess2.getTestSuite(select=select))
suite.addTest(TestEvalChecklist.getTestSuite(select=select))
suite.addTest(TestEvalQueryMatch.getTestSuite(select=select))
suite.addTest(TestRdfReport.getTestSuite(select=select))
suite.addTest(TestGridMatch.getTestSuite(select=select))
suite.addTest(TestMkMinim.getTestSuite(select=select))
if select != "unit":
suite.addTest(TestROSRS_Session.getTestSuite(select=select))
suite.addTest(TestRemoteROMetadata.getTestSuite(select=select))
suite.addTest(TestROSRSMetadata.getTestSuite(select=select))
suite.addTest(TestRosrsSync.getTestSuite(select=select))
suite.addTest(TestSyncCommands.getTestSuite(select=select))
suite.addTest(TestEvoCommands.getTestSuite(select));
return suite
from MiscUtils import TestUtils
def runTestSuite():
"""
Transfer function for setup.py script ro-manager-test
"""
base = os.path.dirname(__file__)
#print "Run test suite assuming base path "+base
sys.path.insert(0, os.path.normpath(base+"/..") )
sys.path.insert(0, os.path.normpath(base+"/../..") )
sys.path.insert(0, os.path.normpath(base+"/../../iaeval/test") )
sys.path.insert(0, os.path.normpath(base+"/../../sync/test") )
#print "Path: "+repr(sys.path)
TestUtils.runTests("TestAll", getTestSuite, sys.argv)
return 0
if __name__ == "__main__":
print "By default, runs quick tests only."
print "Use \"python TestAll.py all\" to run all tests"
TestUtils.runTests("TestAll", getTestSuite, sys.argv)
# End.
| 37.134831
| 83
| 0.741604
| 370
| 3,305
| 6.532432
| 0.294595
| 0.156392
| 0.188664
| 0.21597
| 0.414564
| 0.121638
| 0.07199
| 0.07199
| 0.041374
| 0.041374
| 0
| 0.007668
| 0.131921
| 3,305
| 88
| 84
| 37.556818
| 0.834786
| 0.070197
| 0
| 0.057143
| 0
| 0
| 0.10974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.328571
| null | null | 0.028571
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2d83efe7c3959ede118ef8c59c19582ad3c63c61
| 1,099
|
py
|
Python
|
greenbot/modules/__init__.py
|
EMorf/greenbot
|
5528fcb9246109d6742a867b9668a408d43701d6
|
[
"MIT"
] | null | null | null |
greenbot/modules/__init__.py
|
EMorf/greenbot
|
5528fcb9246109d6742a867b9668a408d43701d6
|
[
"MIT"
] | null | null | null |
greenbot/modules/__init__.py
|
EMorf/greenbot
|
5528fcb9246109d6742a867b9668a408d43701d6
|
[
"MIT"
] | null | null | null |
from greenbot.modules.base import BaseModule
from greenbot.modules.base import ModuleSetting
from greenbot.modules.base import ModuleType
from greenbot.modules.basic import BasicCommandsModule
from greenbot.modules.basic.admincommands import AdminCommandsModule
from greenbot.modules.advancedadminlog import AdvancedAdminLog
from greenbot.modules.activitytracker import ActivityTracker
from greenbot.modules.chatchart import ChatChart
from greenbot.modules.giveaway import GiveawayModule
from greenbot.modules.memes import Memes
from greenbot.modules.movienight import MovieNight
from greenbot.modules.remindme import RemindMe
from greenbot.modules.role_to_level import RoleToLevel
from greenbot.modules.twitch_tracker import TwitchTracker
from greenbot.modules.timeout import TimeoutModule
from greenbot.modules.twitter import Twitter
available_modules = [
AdminCommandsModule,
AdvancedAdminLog,
ActivityTracker,
BasicCommandsModule,
ChatChart,
GiveawayModule,
Memes,
MovieNight,
RemindMe,
RoleToLevel,
TimeoutModule,
TwitchTracker,
Twitter,
]
| 32.323529
| 68
| 0.832575
| 115
| 1,099
| 7.921739
| 0.269565
| 0.210757
| 0.333699
| 0.075741
| 0.095499
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122839
| 1,099
| 33
| 69
| 33.30303
| 0.945021
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.516129
| 0
| 0.516129
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2d889c1134ab2ca2e9cb60521ee955183bedb7f1
| 41
|
py
|
Python
|
Chapter01/pipenv_run.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | 13
|
2018-06-21T01:44:49.000Z
|
2021-12-01T10:49:53.000Z
|
Chapter01/pipenv_run.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | null | null | null |
Chapter01/pipenv_run.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | 6
|
2018-10-05T08:29:24.000Z
|
2022-01-11T14:49:50.000Z
|
>>>pipenv run python3 <program_name>.py
| 13.666667
| 39
| 0.731707
| 6
| 41
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.121951
| 41
| 2
| 40
| 20.5
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2d9b03adf213cc98a0493a31a6352ac186f53f38
| 1,045
|
py
|
Python
|
pirates/leveleditor/worldData/interior_spanish_store_destroyed.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/leveleditor/worldData/interior_spanish_store_destroyed.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/leveleditor/worldData/interior_spanish_store_destroyed.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.interior_spanish_store_destroyed
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1276101352.0caoconno': {'Type': 'Building Interior', 'Name': '', 'Objects': {'1276101582.69caoconno': {'Type': 'Barrel', 'DisableCollision': False, 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(4.95, 10.08, -0.0), 'Scale': Point3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_cnt_barrelB_destroyed'}}}}}, 'Node Links': [], 'Layers': {}, 'ObjectIds': {'1276101352.0caoconno': '["Objects"]["1276101352.0caoconno"]', '1276101582.69caoconno': '["Objects"]["1276101352.0caoconno"]["Objects"]["1276101582.69caoconno"]'}}
extraInfo = {'camPos': Point3(18.6895, -47.9089, 13.2969), 'camHpr': VBase3(16.9022, -4.71118, 0), 'focalLength': 1.39999997616, 'skyState': 2, 'fog': 0}
| 149.285714
| 585
| 0.684211
| 141
| 1,045
| 5.014184
| 0.652482
| 0.016973
| 0.016973
| 0.016973
| 0.008487
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 0.090909
| 1,045
| 7
| 586
| 149.285714
| 0.533684
| 0.227751
| 0
| 0
| 0
| 0
| 0.488169
| 0.239103
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2da65dc27933a0883c1d285874f6d78c7387a021
| 78
|
py
|
Python
|
tests/basics/0prelim.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 13,648
|
2015-01-01T01:34:51.000Z
|
2022-03-31T16:19:53.000Z
|
tests/basics/0prelim.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 7,092
|
2015-01-01T07:59:11.000Z
|
2022-03-31T23:52:18.000Z
|
tests/basics/0prelim.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 4,942
|
2015-01-02T11:48:50.000Z
|
2022-03-31T19:57:10.000Z
|
# all tests need print to work! make sure it does work
print(1)
print('abc')
| 15.6
| 54
| 0.705128
| 15
| 78
| 3.666667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 0.192308
| 78
| 4
| 55
| 19.5
| 0.857143
| 0.666667
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
2dcc64b6dd2e49fac8a09323556e24b559ae3f9b
| 5,389
|
py
|
Python
|
pyroms_toolbox/pyroms_toolbox/sandbox/roms_movie.py
|
ChuningWang/pyroms
|
4f3773d39683ce78e76b30d0e41955f47d56edc2
|
[
"BSD-3-Clause"
] | null | null | null |
pyroms_toolbox/pyroms_toolbox/sandbox/roms_movie.py
|
ChuningWang/pyroms
|
4f3773d39683ce78e76b30d0e41955f47d56edc2
|
[
"BSD-3-Clause"
] | null | null | null |
pyroms_toolbox/pyroms_toolbox/sandbox/roms_movie.py
|
ChuningWang/pyroms
|
4f3773d39683ce78e76b30d0e41955f47d56edc2
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
import os
import subprocess
import netCDF4
import matplotlib.pyplot as plt
import octant
def make_movie(filelst, varname, cmin, cmax, view, lev=0,
istart=None, iend=None, grd=None,
proj='merc', imode='on', title='', clean=False):
"""
make a movie using a 2D horizontal slice of variable varname from nc
files in filelst
"""
# get variable
data = netCDF4.MFDataset(filelst)
var = data.variables[varname]
# get grid
if grd is None:
grd = octant.roms.roms_grid.get_roms_grd(filelst[0])
else:
grd = grd
# determine where on the C-grid these variable lies
if var.dimensions[2].find('_rho') != -1:
Cpos = 'rho'
if var.dimensions[2].find('_u') != -1:
Cpos = 'u'
if var.dimensions[2].find('_v') != -1:
Cpos = 'v'
# get time
time = data.variables['ocean_time'][:]
if istart is not None:
istart = istart
else:
istart = 0
if iend is not None:
iend = iend
else:
iend = time.shape[0]
if cmin is None:
cmin = var.min()
else:
cmin = float(cmin)
if cmax is None:
cmax = var.max()
else:
cmax = float(cmax)
if imode == 'off':
print('Turn interactive mode off')
plt.ioff()
for tindex in range(istart, iend, 1):
if view == 'sview':
octant.roms.roms_plot.varsview(
var, grd, tindex, lev, Cpos, cmin=cmin, cmax=cmax, proj=proj,
title=title, outfile='plot.png')
elif view == 'zview':
octant.roms.roms_plot.varzview(
var, grd, tindex, lev, Cpos, cmin=cmin, cmax=cmax, proj=proj,
title=title, outfile='plot.png')
elif view == 'view2D':
octant.roms.roms_plot.varview2D(
var, grd, tindex, Cpos, cmin=cmin, cmax=cmax, proj=proj,
title=title, outfile='plot.png')
else:
print('Option not available. ' +
'View must be set to sview, zview or view2D')
outfile = str('%05d' % tindex) + '.png'
os.rename('plot.png', outfile)
command = ('mencoder',
'mf://0*.png',
'-mf',
'type=png:w=800:h=600:fps=25',
'-ovc',
'lavc',
'-lavcopts',
'vcodec=mpeg4',
'-oac',
'copy',
'-o',
'output.avi')
print("\n\nabout to execute:\n%s\n\n" % ' '.join(command))
subprocess.check_call(command)
print("\n\n The movie was written to 'output.avi'")
if imode == 'off':
print('Turn interactive mode on again')
plt.ion()
if clean is True:
for tindex in range(istart, iend, 1):
os.remove(str('%04d' % tindex) + '.png')
return
def make_big_movie(filelst, varname, cmin, cmax, Cpos, view, lev=0, grd=None,
proj='merc', imode='on', title='', clean=False):
"""
make a movie using a 2D horizontal slice of variable varname from nc
files in filelst
"""
# get grid
if grd is None:
grd = octant.roms.roms_grid.get_roms_grd(filelst[0])
else:
grd = grd
nfile = len(filelst)
if imode == 'off':
print('Turn interactive mode off')
plt.ioff()
counter = 0
for ifile in range(nfile):
# get variable
data = netCDF4.Dataset(filelst[ifile], 'r')
var = data.variables[varname]
# get time
time = data.variables['ocean_time'][:]
for tindex in range(time.shape[0]):
if view == 'sview':
octant.roms.roms_plot.varsview(
var, grd, tindex, lev, Cpos,
cmin=cmin, cmax=cmax, proj=proj,
title=title, outfile='plot.png')
elif view == 'zview':
octant.roms.roms_plot.varzview(
var, grd, tindex, lev, Cpos,
cmin=cmin, cmax=cmax, proj=proj,
title=title, outfile='plot.png')
elif view == 'view2D':
octant.roms.roms_plot.varview2D(
var, grd, tindex, Cpos,
cmin=cmin, cmax=cmax, proj=proj,
title=title, outfile='plot.png')
else:
print('Option not available. ' +
'View must be set to sview, zview or view2D')
Tindex = counter + tindex
outfile = str('%05d' % Tindex) + '.png'
os.rename('plot.png', outfile)
counter = counter + time.shape[0]
command = ('mencoder',
'mf://0*.png',
'-mf',
'type=png:w=800:h=600:fps=25',
'-ovc',
'lavc',
'-lavcopts',
'vcodec=mpeg4',
'-oac',
'copy',
'-o',
'output.avi')
if imode == 'off':
print('Turn interactive mode on again')
plt.ion()
print("\n\nabout to execute:\n%s\n\n" % ' '.join(command))
subprocess.check_call(command)
print("\n\n The movie was written to 'output.avi'")
if clean is True:
for tindex in range(counter):
os.remove(str('%05d' % tindex) + '.png')
return
| 26.416667
| 77
| 0.501948
| 642
| 5,389
| 4.179128
| 0.227414
| 0.023854
| 0.041744
| 0.040253
| 0.76817
| 0.708535
| 0.708535
| 0.669773
| 0.6489
| 0.6489
| 0
| 0.016686
| 0.366116
| 5,389
| 203
| 78
| 26.546798
| 0.768735
| 0.055669
| 0
| 0.707143
| 0
| 0
| 0.151497
| 0.010708
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014286
| false
| 0
| 0.035714
| 0
| 0.064286
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2dd16cbcf3ca134afb7c3007dee96d5dac66a107
| 122
|
py
|
Python
|
yc130/339.py
|
c-yan/yukicoder
|
cdbbd65402177225dd989df7fe01f67908484a69
|
[
"MIT"
] | null | null | null |
yc130/339.py
|
c-yan/yukicoder
|
cdbbd65402177225dd989df7fe01f67908484a69
|
[
"MIT"
] | null | null | null |
yc130/339.py
|
c-yan/yukicoder
|
cdbbd65402177225dd989df7fe01f67908484a69
|
[
"MIT"
] | null | null | null |
from math import gcd
from functools import reduce
N, *A = map(int, open(0).read().split())
print(100 // reduce(gcd, A))
| 17.428571
| 40
| 0.672131
| 21
| 122
| 3.904762
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038835
| 0.155738
| 122
| 6
| 41
| 20.333333
| 0.757282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2ddedde02b412274d553c995f97d3c6c24669f46
| 658
|
bzl
|
Python
|
source/bazel/deps/fast_io/get.bzl
|
luxe/CodeLang-compiler
|
78837d90bdd09c4b5aabbf0586a5d8f8f0c1e76a
|
[
"MIT"
] | 1
|
2019-01-06T08:45:46.000Z
|
2019-01-06T08:45:46.000Z
|
source/bazel/deps/fast_io/get.bzl
|
luxe/CodeLang-compiler
|
78837d90bdd09c4b5aabbf0586a5d8f8f0c1e76a
|
[
"MIT"
] | 264
|
2015-11-30T08:34:00.000Z
|
2018-06-26T02:28:41.000Z
|
source/bazel/deps/fast_io/get.bzl
|
UniLang/compiler
|
c338ee92994600af801033a37dfb2f1a0c9ca897
|
[
"MIT"
] | null | null | null |
# Do not edit this file directly.
# It was auto-generated by: code/programs/reflexivity/reflexive_refresh
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
def fastIo():
http_archive(
name = "fast_io",
build_file = "//bazel/deps/fast_io:build.BUILD",
sha256 = "784191db22789ad7d78dbfea42803595712f32d0d61a0fed6a09b2c18b13f12b",
strip_prefix = "fast_io-a9c59fde3a378cda9c7cb021559880a843ecc880",
urls = [
"https://github.com/Unilang/fast_io/archive/a9c59fde3a378cda9c7cb021559880a843ecc880.tar.gz",
],
)
| 38.705882
| 105
| 0.712766
| 71
| 658
| 6.408451
| 0.605634
| 0.052747
| 0.061538
| 0.083516
| 0.189011
| 0.189011
| 0.189011
| 0.189011
| 0.189011
| 0.189011
| 0
| 0.167273
| 0.164134
| 658
| 16
| 106
| 41.125
| 0.66
| 0.153495
| 0
| 0
| 1
| 0
| 0.631769
| 0.418773
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| true
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
934a7e8ea291f600c20f748cee60d2318f44094c
| 95
|
py
|
Python
|
example/test_keepers/gym_interface/bookkeeper.py
|
RubenvanHeusden/HFO-Robotkeeper
|
03bbe1170d703b7f264ef245b99a0ced2759ed39
|
[
"MIT"
] | null | null | null |
example/test_keepers/gym_interface/bookkeeper.py
|
RubenvanHeusden/HFO-Robotkeeper
|
03bbe1170d703b7f264ef245b99a0ced2759ed39
|
[
"MIT"
] | null | null | null |
example/test_keepers/gym_interface/bookkeeper.py
|
RubenvanHeusden/HFO-Robotkeeper
|
03bbe1170d703b7f264ef245b99a0ced2759ed39
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
class BookKeeper:
def __init__(self):
pass
| 13.571429
| 23
| 0.684211
| 14
| 95
| 4.357143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.273684
| 95
| 6
| 24
| 15.833333
| 0.884058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
935d0ea75b4ea577f851e5b997d0c1c3cb2cb7c5
| 1,556
|
py
|
Python
|
voter_location/districts/migrations/0001_initial.py
|
johnshiver/vote_location
|
95330bec5060f83fe2f2fe582f8ec63f740b035b
|
[
"MIT"
] | null | null | null |
voter_location/districts/migrations/0001_initial.py
|
johnshiver/vote_location
|
95330bec5060f83fe2f2fe582f8ec63f740b035b
|
[
"MIT"
] | null | null | null |
voter_location/districts/migrations/0001_initial.py
|
johnshiver/vote_location
|
95330bec5060f83fe2f2fe582f8ec63f740b035b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-05 00:43
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='District',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('statename', models.CharField(max_length=50)),
('d_id', models.CharField(max_length=12)),
('district', models.CharField(max_length=2)),
('startcong', models.CharField(max_length=10)),
('endcong', models.CharField(max_length=10)),
('districtsi', models.CharField(max_length=32)),
('county', models.CharField(max_length=32)),
('page', models.CharField(max_length=32)),
('law', models.CharField(max_length=32)),
('note', models.CharField(max_length=32)),
('bestdec', models.CharField(max_length=32)),
('finalnote', models.CharField(max_length=26)),
('rnote', models.CharField(max_length=32)),
('lastchange', models.CharField(max_length=29)),
('fromcounty', models.CharField(max_length=2)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4269)),
],
),
]
| 38.9
| 114
| 0.577763
| 161
| 1,556
| 5.434783
| 0.440994
| 0.257143
| 0.308571
| 0.411429
| 0.393143
| 0.068571
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0.275707
| 1,556
| 39
| 115
| 39.897436
| 0.732919
| 0.043702
| 0
| 0
| 1
| 0
| 0.081481
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.096774
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
fa7fb888db581e0528b96c38c402b129977e14f9
| 171
|
py
|
Python
|
tests/web_platform/css_grid_1/grid_items/test_grid_items_sizing_alignment.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 71
|
2015-04-13T09:44:14.000Z
|
2019-03-24T01:03:02.000Z
|
tests/web_platform/css_grid_1/grid_items/test_grid_items_sizing_alignment.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 35
|
2019-05-06T15:26:09.000Z
|
2022-03-28T06:30:33.000Z
|
tests/web_platform/css_grid_1/grid_items/test_grid_items_sizing_alignment.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 139
|
2015-05-30T18:37:43.000Z
|
2019-03-27T17:14:05.000Z
|
from tests.utils import W3CTestCase
class TestGridItemsSizingAlignment(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'grid-items-sizing-alignment-'))
| 28.5
| 83
| 0.807018
| 18
| 171
| 7.388889
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019108
| 0.081871
| 171
| 5
| 84
| 34.2
| 0.828025
| 0
| 0
| 0
| 0
| 0
| 0.163743
| 0.163743
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
fa8d53f9fa884b4599a8e318ab444d0d4fe2a6c2
| 121
|
py
|
Python
|
src/plugins/radeky_bot/utils/bilibiliuploader/__init__.py
|
Radekyspec/Radeky_bot
|
24ee088026c7443723a5e9c72abfb512ca3b3327
|
[
"MIT"
] | 4
|
2021-12-25T10:17:13.000Z
|
2022-03-03T03:29:07.000Z
|
src/plugins/radeky_bot/utils/bilibiliuploader/__init__.py
|
Radekyspec/Radeky_bot
|
24ee088026c7443723a5e9c72abfb512ca3b3327
|
[
"MIT"
] | null | null | null |
src/plugins/radeky_bot/utils/bilibiliuploader/__init__.py
|
Radekyspec/Radeky_bot
|
24ee088026c7443723a5e9c72abfb512ca3b3327
|
[
"MIT"
] | 1
|
2021-12-25T10:17:16.000Z
|
2021-12-25T10:17:16.000Z
|
from .bilibiliuploader import BilibiliUploader
from .core import VideoPart
from . import BiliAuth
__version__ = '0.0.6'
| 20.166667
| 46
| 0.801653
| 15
| 121
| 6.2
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.132231
| 121
| 5
| 47
| 24.2
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.041322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
fa8e533d9b6e1e1eb0f723662c70c4175b53f1b6
| 68
|
py
|
Python
|
create_database.py
|
project-nichijou/nichijou-db-tools
|
e75322415117d11d1453955fced6e5d6141ec1c6
|
[
"MIT"
] | null | null | null |
create_database.py
|
project-nichijou/nichijou-db-tools
|
e75322415117d11d1453955fced6e5d6141ec1c6
|
[
"MIT"
] | null | null | null |
create_database.py
|
project-nichijou/nichijou-db-tools
|
e75322415117d11d1453955fced6e5d6141ec1c6
|
[
"MIT"
] | null | null | null |
from database.database import NichijouDatabase
NichijouDatabase()
| 13.6
| 46
| 0.852941
| 6
| 68
| 9.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102941
| 68
| 4
| 47
| 17
| 0.95082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
fa90edff9589239dd8331c40615642864b778c96
| 140
|
py
|
Python
|
app/message/views.py
|
KimKiHyuk/django-boilerplate
|
198a02e09366787549c3d7a76a98a0ea916af932
|
[
"MIT"
] | null | null | null |
app/message/views.py
|
KimKiHyuk/django-boilerplate
|
198a02e09366787549c3d7a76a98a0ea916af932
|
[
"MIT"
] | 1
|
2020-12-17T09:54:15.000Z
|
2020-12-17T09:54:15.000Z
|
app/message/views.py
|
KimKiHyuk/django-boilerplate
|
198a02e09366787549c3d7a76a98a0ea916af932
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def myfunc_with_events(event, context):
print('here!!!')
print(event)
| 23.333333
| 39
| 0.728571
| 19
| 140
| 5.263158
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157143
| 140
| 6
| 40
| 23.333333
| 0.847458
| 0.164286
| 0
| 0
| 0
| 0
| 0.060345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
faa3c4b97a57f8779239df0a39edceaf0b6a7c49
| 267
|
py
|
Python
|
MainApp/admin.py
|
MadhavPruthi/Email-Scraping-from-publication
|
7dd289e7ec9f5c19a57ac70ab4bae7bf1c8f9325
|
[
"Apache-2.0"
] | 3
|
2021-03-23T03:48:03.000Z
|
2021-11-24T04:08:38.000Z
|
MainApp/admin.py
|
MadhavPruthi/Email-Scraping-from-publication
|
7dd289e7ec9f5c19a57ac70ab4bae7bf1c8f9325
|
[
"Apache-2.0"
] | 3
|
2020-06-06T00:17:14.000Z
|
2021-06-10T22:23:50.000Z
|
MainApp/admin.py
|
MadhavPruthi/Email-Scraping-from-publication
|
7dd289e7ec9f5c19a57ac70ab4bae7bf1c8f9325
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import EmailInfo, DOIQuery, EmailInfoJournal, JournalQuery
# Register your models here.
admin.site.register(EmailInfo)
admin.site.register(DOIQuery)
admin.site.register(JournalQuery)
admin.site.register(EmailInfoJournal)
| 33.375
| 71
| 0.838951
| 32
| 267
| 7
| 0.4375
| 0.160714
| 0.303571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074906
| 267
| 8
| 72
| 33.375
| 0.906883
| 0.097378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
faade2e9a3d4624415700e5f831ecdbf559da630
| 118
|
py
|
Python
|
tests/worker/tests_worker.py
|
vtalks/twitter_worker
|
2a049817b8db5e8357075d101efcd2adea2cc2d2
|
[
"Apache-2.0"
] | 1
|
2018-01-29T16:46:32.000Z
|
2018-01-29T16:46:32.000Z
|
tests/worker/tests_worker.py
|
vtalks/twitter_worker
|
2a049817b8db5e8357075d101efcd2adea2cc2d2
|
[
"Apache-2.0"
] | 1
|
2018-02-06T08:07:19.000Z
|
2018-02-06T13:41:38.000Z
|
tests/tests_worker.py
|
vtalks/updater_worker
|
568ee2cecaf2a524dab4bf930c5e3f444c96744a
|
[
"Apache-2.0"
] | null | null | null |
import unittest
class WorkerTest(unittest.TestCase):
def test_dummy(self):
self.assertEqual(True, True)
| 16.857143
| 36
| 0.720339
| 14
| 118
| 6
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186441
| 118
| 7
| 37
| 16.857143
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
fadc80cc24c92b0d42c901d381ac789348ca518e
| 69
|
py
|
Python
|
contests/atcoder/abc063/a.py
|
conao3/coder
|
2cdb610fec013da88a3470d460108e8a9b462445
|
[
"CC0-1.0"
] | null | null | null |
contests/atcoder/abc063/a.py
|
conao3/coder
|
2cdb610fec013da88a3470d460108e8a9b462445
|
[
"CC0-1.0"
] | null | null | null |
contests/atcoder/abc063/a.py
|
conao3/coder
|
2cdb610fec013da88a3470d460108e8a9b462445
|
[
"CC0-1.0"
] | null | null | null |
a, b = map(int, input().split())
print(a+b if a+b <= 9 else 'error')
| 23
| 35
| 0.565217
| 15
| 69
| 2.6
| 0.733333
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 0.173913
| 69
| 2
| 36
| 34.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.072464
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
faeb32e215de2ea849498abb814dd23789f0c1fb
| 93
|
py
|
Python
|
test_frame/test_broker_kafka/kafka_publisher_test.py
|
DJMIN/funboost
|
7570ca2909bb0b44a1080f5f98aa96c86d3da9d4
|
[
"Apache-2.0"
] | 333
|
2019-08-08T10:25:27.000Z
|
2022-03-30T07:32:04.000Z
|
test_frame/test_broker_kafka/kafka_publisher_test.py
|
mooti-barry/funboost
|
2cd9530e2c4e5a52fc921070d243d402adbc3a0e
|
[
"Apache-2.0"
] | 38
|
2020-04-24T01:47:51.000Z
|
2021-12-20T07:22:15.000Z
|
test_frame/test_broker_kafka/kafka_publisher_test.py
|
mooti-barry/funboost
|
2cd9530e2c4e5a52fc921070d243d402adbc3a0e
|
[
"Apache-2.0"
] | 84
|
2019-08-09T11:51:14.000Z
|
2022-03-02T06:29:09.000Z
|
from kafka_cosumer_test import f
# i = 203
# f.push(i)
for i in range(200):
f.push(i)
| 10.333333
| 32
| 0.634409
| 19
| 93
| 3
| 0.684211
| 0.175439
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084507
| 0.236559
| 93
| 8
| 33
| 11.625
| 0.71831
| 0.182796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4f0593669854c9f3091d4f7782d98374183bc61a
| 102
|
py
|
Python
|
rotkehlchen/chain/ethereum/modules/kyber/__init__.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 137
|
2018-03-05T11:53:29.000Z
|
2019-11-03T16:38:42.000Z
|
rotkehlchen/chain/ethereum/modules/kyber/__init__.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 385
|
2018-03-08T12:43:41.000Z
|
2019-11-10T09:15:36.000Z
|
rotkehlchen/chain/ethereum/modules/kyber/__init__.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 59
|
2018-03-08T10:08:27.000Z
|
2019-10-26T11:30:44.000Z
|
from .accountant import KyberAccountant # noqa: F401
from .decoder import KyberDecoder # noqa: F401
| 34
| 53
| 0.784314
| 12
| 102
| 6.666667
| 0.666667
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 0.156863
| 102
| 2
| 54
| 51
| 0.860465
| 0.205882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
8799945f1b26293a1ea599946a4588564d26b901
| 199
|
py
|
Python
|
SimGeneral/MixingModule/python/stripDigitizer_APVModeDec_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
SimGeneral/MixingModule/python/stripDigitizer_APVModeDec_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
SimGeneral/MixingModule/python/stripDigitizer_APVModeDec_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
# Silicon Strip Digitizer running with APV Mode Deconvolution
from SimGeneral.MixingModule.stripDigitizer_cfi import *
stripDigitizer.APVpeakmode = False
| 22.111111
| 61
| 0.839196
| 23
| 199
| 7.217391
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120603
| 199
| 8
| 62
| 24.875
| 0.948571
| 0.296482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
87a6655695a8875249cc76f99605f67be5fdbbe3
| 69
|
py
|
Python
|
compemu/__init__.py
|
trslater/compemu
|
a89730b1f82909772951d0c9111c07a136c6805d
|
[
"MIT"
] | null | null | null |
compemu/__init__.py
|
trslater/compemu
|
a89730b1f82909772951d0c9111c07a136c6805d
|
[
"MIT"
] | null | null | null |
compemu/__init__.py
|
trslater/compemu
|
a89730b1f82909772951d0c9111c07a136c6805d
|
[
"MIT"
] | null | null | null |
""".. include:: ../README.md
.. include:: ../docs/design-notes.md"""
| 23
| 39
| 0.565217
| 8
| 69
| 4.875
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 69
| 2
| 40
| 34.5
| 0.619048
| 0.898551
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
87b0e459515b0b46ac41824d96e5f7f57f3ce85a
| 245
|
py
|
Python
|
p2/storage/local/apps.py
|
BeryJu/p2
|
80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27
|
[
"MIT"
] | null | null | null |
p2/storage/local/apps.py
|
BeryJu/p2
|
80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27
|
[
"MIT"
] | null | null | null |
p2/storage/local/apps.py
|
BeryJu/p2
|
80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27
|
[
"MIT"
] | null | null | null |
"""p2 LocalStorage App Config"""
from django.apps import AppConfig
class P2LocalStorageConfig(AppConfig):
"""p2 LocalStorage App Config"""
name = 'p2.storage.local'
label = 'p2_storage_local'
verbose_name = 'p2 Local Storage'
| 22.272727
| 38
| 0.710204
| 29
| 245
| 5.896552
| 0.551724
| 0.163743
| 0.19883
| 0.269006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029851
| 0.179592
| 245
| 10
| 39
| 24.5
| 0.820896
| 0.216327
| 0
| 0
| 0
| 0
| 0.265193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
87c7b1f2ca0c8b602b17b25ffcd5c91632c79f38
| 167
|
py
|
Python
|
networkx-neo4j/nxneo4j/graph.py
|
KMS-TEAM/MyKMS
|
6baacc476ad21945858f77c573fe910b40cf6d12
|
[
"MIT"
] | 97
|
2018-04-24T07:29:08.000Z
|
2022-03-13T21:29:40.000Z
|
networkx-neo4j/nxneo4j/graph.py
|
KMS-TEAM/MyKMS
|
6baacc476ad21945858f77c573fe910b40cf6d12
|
[
"MIT"
] | 3
|
2019-02-12T13:00:54.000Z
|
2020-04-06T23:34:29.000Z
|
networkx-neo4j/nxneo4j/graph.py
|
KMS-TEAM/MyKMS
|
6baacc476ad21945858f77c573fe910b40cf6d12
|
[
"MIT"
] | 18
|
2018-10-16T07:28:27.000Z
|
2021-12-06T11:33:23.000Z
|
from nxneo4j.base_graph import BaseGraph
class Graph(BaseGraph):
def __init__(self, driver, config=None):
super().__init__(driver, "UNDIRECTED", config)
| 23.857143
| 54
| 0.724551
| 20
| 167
| 5.6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007143
| 0.161677
| 167
| 6
| 55
| 27.833333
| 0.792857
| 0
| 0
| 0
| 0
| 0
| 0.05988
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
87d0f0eb829f4abb6334d275b82336e20da62e06
| 22,663
|
py
|
Python
|
src/preprocessing/filteringbordersimages.py
|
AntonioGUJ/AirwaySegmentation_Keras
|
7da4c88dfde6f0dd2f8f181b2d3fd07dc2d28638
|
[
"MIT"
] | 15
|
2021-04-09T12:42:35.000Z
|
2022-03-22T09:01:57.000Z
|
src/preprocessing/filteringbordersimages.py
|
id-b3/bronchinet
|
5acf5243da2a0e38041bbbf2ffd033291eff13a4
|
[
"MIT"
] | 13
|
2021-03-31T11:16:12.000Z
|
2022-02-10T06:11:16.000Z
|
src/preprocessing/filteringbordersimages.py
|
id-b3/bronchinet
|
5acf5243da2a0e38041bbbf2ffd033291eff13a4
|
[
"MIT"
] | 9
|
2021-04-13T13:27:51.000Z
|
2022-02-25T07:03:25.000Z
|
from typing import Tuple, List, Union
import numpy as np
from common.exceptionmanager import catch_error_exception
from common.functionutil import ImagesUtil
from imageoperators.boundingboxes import BoundingBoxes, BoundBox3DType, BoundBox2DType
from preprocessing.imagegenerator import ImageGenerator
TYPES_FILTERING_AVAIL = ['linear', 'quadratic', 'cubic', 'exponential', 'fullzero']
class FilteringBordersImages(ImageGenerator):
_type_filtering_default = 'quadratic'
def __init__(self,
size_image: Union[Tuple[int, int, int], Tuple[int, int]],
size_output_image: Union[Tuple[int, int, int], Tuple[int, int],
List[Tuple[int, int, int]], List[Tuple[int, int]]],
type_filtering: str = _type_filtering_default,
is_filter_multiple_windows: bool = False
) -> None:
self._size_image = size_image
self._ndims = len(size_image)
self._size_output_image = size_output_image
self._type_filtering = type_filtering
self._is_filter_multiple_windows = is_filter_multiple_windows
if self._ndims == 2:
self._func_multiply_matrices_channels = self._multiply_matrices_with_channels_2d
elif self._ndims == 3:
self._func_multiply_matrices_channels = self._multiply_matrices_with_channels_3d
else:
message = 'FilteringBorderEffectsImages:__init__: wrong \'ndims\': %s' % (self._ndims)
catch_error_exception(message)
if self._type_filtering not in TYPES_FILTERING_AVAIL:
message = 'wrong input \'type_filtering\': \'%s\'. Options available: %s' \
% (self._type_filtering, TYPES_FILTERING_AVAIL)
catch_error_exception(message)
if self._is_filter_multiple_windows and type(self._size_output_image) != list:
message = 'with \'is_filter_multiple_windows\', input \'size_output_image\' must be a list. Received: %s' \
% (str(self._size_output_image))
catch_error_exception(message)
self._compute_factor_filtering()
super(FilteringBordersImages, self).__init__(size_image, 1)
def update_image_data(self, in_shape_image: Tuple[int, ...]) -> None:
self._num_images = in_shape_image[0]
def _get_image(self, in_image: np.ndarray) -> np.ndarray:
return self._get_filtered_image(in_image)
def _get_filtered_image(self, in_image: np.ndarray) -> np.ndarray:
if ImagesUtil.is_without_channels(self._size_image, in_image.shape):
return np.multiply(in_image, self._factor_filtering)
else:
return self._func_multiply_matrices_channels(in_image, self._factor_filtering)
def _fill_flat_interior_boundbox(self, inner_boundbox: Union[BoundBox3DType, BoundBox2DType],
value_fill: float) -> None:
raise NotImplementedError
def _fill_flat_exterior_boundbox(self, outer_boundbox: Union[BoundBox3DType, BoundBox2DType],
value_fill: float) -> None:
raise NotImplementedError
def _fill_progression_between_two_boundboxes(self, inner_boundbox: Union[BoundBox3DType, BoundBox2DType],
outer_boundbox: Union[BoundBox3DType, BoundBox2DType],
value_inner: float, value_outer: float) -> None:
raise NotImplementedError
@classmethod
def _compute_progression_increasing(cls, coord_beg: int, coord_end: int,
value_beg: float, value_end: float,
type_progression: str) -> np.ndarray:
if type_progression == 'linear':
progression_ref0_1 = cls._calc_linear_progression(coord_beg, coord_end)
elif type_progression == 'quadratic':
progression_ref0_1 = cls._calc_quadratic_progression(coord_beg, coord_end)
elif type_progression == 'cubic':
progression_ref0_1 = cls._calc_cubic_progression(coord_beg, coord_end)
elif type_progression == 'exponential':
progression_ref0_1 = cls._calc_exponential_progression(coord_beg, coord_end)
else: # type_progression == 'fullzeros':
progression_ref0_1 = np.zeros(coord_end - coord_beg, dtype=np.float32)
return progression_ref0_1 * (value_end - value_beg) + value_beg
@classmethod
def _compute_progression_decreasing(cls, coord_beg: int, coord_end: int,
value_beg: float, value_end: float,
type_progression: str) -> np.ndarray:
return cls._compute_progression_increasing(coord_beg, coord_end, value_end, value_beg, type_progression)[::-1]
@staticmethod
def _compute_fill_progression(coord_beg: int, coord_end: int, value_fill: float,
type_filling: str) -> np.ndarray:
if type_filling == 'full':
return np.full(coord_end - coord_beg, value_fill, dtype=np.float32)
elif type_filling == 'zeros':
return np.zeros(coord_end - coord_beg, dtype=np.float32)
@staticmethod
def _multiply_matrices_with_channels_2d(matrix_1_channels: np.ndarray, matrix_2: np.ndarray) -> np.ndarray:
return np.einsum('ijk,ij->ijk', matrix_1_channels, matrix_2)
@staticmethod
def _multiply_matrices_with_channels_3d(matrix_1_channels: np.ndarray, matrix_2: np.ndarray) -> np.ndarray:
return np.einsum('ijkl,ijk->ijkl', matrix_1_channels, matrix_2)
@staticmethod
def _calc_tensor_product_2d(vector_1: np.ndarray, vector_2: np.ndarray) -> np.ndarray:
return np.einsum('i,j->ij', vector_1, vector_2)
@staticmethod
def _calc_tensor_product_3d(vector_1: np.ndarray, vector_2: np.ndarray, vector_3: np.ndarray) -> np.ndarray:
return np.einsum('i,j,k->ijk', vector_1, vector_2, vector_3)
@staticmethod
def _calc_linear_progression(coord_0: int, coord_1: int) -> np.ndarray:
return np.linspace(0, 1, coord_1 - coord_0)
@staticmethod
def _calc_quadratic_progression(coord_0: int, coord_1: int) -> np.ndarray:
return np.power(np.linspace(0, 1, coord_1 - coord_0), 2)
@staticmethod
def _calc_cubic_progression(coord_0: int, coord_1: int) -> np.ndarray:
return np.power(np.linspace(0, 1, coord_1 - coord_0), 3)
@staticmethod
def _calc_exponential_progression(coord_0: int, coord_1: int) -> np.ndarray:
return (np.exp(np.linspace(0, 1, coord_1 - coord_0)) - 1.0) / (np.exp(1) - 1.0)
def _compute_factor_filtering(self) -> None:
self._factor_filtering = np.zeros(self._size_image, dtype=np.float32)
if self._is_filter_multiple_windows:
# 'factor_filtering' defined:
# - consider several concentrical windows, of increasing sizes, and until the input image border
# - i_window == 0:
# - inside the window -> '1'
# - outside the window, and until next window -> decreasing function (linear, quadratic, ...)
# - for i_window == 1, ...:
# - inside the window -> factor previous window
# - outside the window, and until next window (or input image borders) -> decreasing function
num_windows = len(self._size_output_image)
sizes_windows = self._size_output_image + [self._size_image]
boundbox_output_image = BoundingBoxes.calc_boundbox_centered_image_fitimg(self._size_image,
sizes_windows[0])
self._fill_flat_interior_boundbox(boundbox_output_image, 1.0)
for iwin in range(num_windows):
iwindow_value_inner = 1.0 - iwin / float(num_windows)
iwindow_value_outer = 1.0 - (iwin + 1) / float(num_windows)
iwindow_boundbox_inner = BoundingBoxes.calc_boundbox_centered_image_fitimg(sizes_windows[iwin],
self._size_image)
iwindow_boundbox_outer = BoundingBoxes.calc_boundbox_centered_image_fitimg(sizes_windows[iwin + 1],
self._size_image)
self._fill_progression_between_two_boundboxes(iwindow_boundbox_inner, iwindow_boundbox_outer,
iwindow_value_inner, iwindow_value_outer)
else:
# 'factor_filtering' defined:
# - inside the output window -> '1'
# - outside the output window and until input image borders -> decreasing function (linear, quadratic, ...)
boundbox_output_image = BoundingBoxes.calc_boundbox_centered_image_fitimg(self._size_output_image,
self._size_image)
boundbox_input_image = BoundingBoxes.get_default_boundbox_image(self._size_image)
self._fill_flat_interior_boundbox(boundbox_output_image, 1.0)
self._fill_progression_between_two_boundboxes(boundbox_output_image, boundbox_input_image, 1.0, 0.0)
class FilteringBordersImages2D(FilteringBordersImages):
def __init__(self,
size_image: Tuple[int, int],
size_output_image: Tuple[int, int],
type_filtering: str = FilteringBordersImages._type_filtering_default,
is_filter_multiple_windows: bool = False
) -> None:
super(FilteringBordersImages2D, self).__init__(size_image, size_output_image,
type_filtering=type_filtering,
is_filter_multiple_windows=is_filter_multiple_windows)
def _fill_flat_interior_boundbox(self, inner_boundbox: BoundBox2DType,
value_fill: float) -> None:
# set 'value_fill' inside bounding-box
((x_left, x_right), (y_down, y_up)) = inner_boundbox
self._factor_filtering[x_left:x_right, y_down:y_up] = value_fill
def _fill_flat_exterior_boundbox(self, outer_boundbox: BoundBox2DType,
value_fill: float) -> None:
# set 'value_fill' outside bounding-box
((x_left, x_right), (y_down, y_up)) = outer_boundbox
self._factor_filtering[0:x_left, :] = value_fill
self._factor_filtering[x_right:, :] = value_fill
self._factor_filtering[:, 0:y_down] = value_fill
self._factor_filtering[:, y_up:] = value_fill
def _fill_progression_between_two_boundboxes(self, inner_boundbox: BoundBox2DType, outer_boundbox: BoundBox2DType,
value_inner: float, value_outer: float) -> None:
# set progression between 'value_inner' and 'value_outer', between 'inner' and 'outer' bounding-boxes
((x_left_in, x_right_in), (y_down_in, y_up_in)) = inner_boundbox
((x_left_out, x_right_out), (y_down_out, y_up_out)) = outer_boundbox
progression_x_left = self._compute_progression_increasing(x_left_out, x_left_in, value_outer, value_inner,
self._type_filtering)
progression_x_right = self._compute_progression_decreasing(x_right_in, x_right_out, value_inner, value_outer,
self._type_filtering)
progression_y_down = self._compute_progression_increasing(y_down_out, y_down_in, value_outer, value_inner,
self._type_filtering)
progression_y_up = self._compute_progression_decreasing(y_up_in, y_up_out, value_inner, value_outer,
self._type_filtering)
progression_x_middle = self._compute_fill_progression(x_left_in, x_right_in, value_inner, 'full')
progression_y_middle = self._compute_fill_progression(y_down_in, y_up_in, value_inner, 'full')
# laterals
self._factor_filtering[x_left_out:x_left_in, y_down_in:y_up_in] = \
self._calc_tensor_product_2d(progression_x_left, progression_y_middle)
self._factor_filtering[x_right_in:x_right_out, y_down_in:y_up_in] = \
self._calc_tensor_product_2d(progression_x_right, progression_y_middle)
self._factor_filtering[x_left_in:x_right_in, y_down_out:y_down_in] = \
self._calc_tensor_product_2d(progression_x_middle, progression_y_down)
self._factor_filtering[x_left_in:x_right_in, y_up_in:y_up_out] = \
self._calc_tensor_product_2d(progression_x_middle, progression_y_up)
# corners
self._factor_filtering[x_left_out:x_left_in, y_down_out:y_down_in] = \
self._calc_tensor_product_2d(progression_x_left, progression_y_down)
self._factor_filtering[x_right_in:x_right_out, y_down_out:y_down_in] = \
self._calc_tensor_product_2d(progression_x_right, progression_y_down)
self._factor_filtering[x_left_out:x_left_in, y_up_in:y_up_out] = \
self._calc_tensor_product_2d(progression_x_left, progression_y_up)
self._factor_filtering[x_right_in:x_right_out, y_up_in:y_up_out] = \
self._calc_tensor_product_2d(progression_x_right, progression_y_up)
class FilteringBordersImages3D(FilteringBordersImages):
def __init__(self,
size_image: Tuple[int, int, int],
size_output_image: Tuple[int, int, int],
type_filtering: str = FilteringBordersImages._type_filtering_default,
is_filter_multiple_windows: bool = False
) -> None:
super(FilteringBordersImages3D, self).__init__(size_image, size_output_image,
type_filtering=type_filtering,
is_filter_multiple_windows=is_filter_multiple_windows)
def _fill_flat_interior_boundbox(self, inner_boundbox: BoundBox3DType,
value_fill: float) -> None:
# set 'value_fill' inside bounding-box
((z_back, z_front), (x_left, x_right), (y_down, y_up)) = inner_boundbox
self._factor_filtering[z_back:z_front, x_left:x_right, y_down:y_up] = value_fill
def _fill_flat_exterior_boundbox(self, outer_boundbox: BoundBox3DType,
value_fill: float) -> None:
# set 'value_fill' outside bounding-box
((z_back, z_front), (x_left, x_right), (y_down, y_up)) = outer_boundbox
self._factor_filtering[0:z_back, :, :] = value_fill
self._factor_filtering[z_front:, :, :] = value_fill
self._factor_filtering[:, 0:x_left, :] = value_fill
self._factor_filtering[:, x_right:, :] = value_fill
self._factor_filtering[:, :, 0:y_down] = value_fill
self._factor_filtering[:, :, y_up:] = value_fill
def _fill_progression_between_two_boundboxes(self, inner_boundbox: BoundBox3DType, outer_boundbox: BoundBox3DType,
value_inner: float, value_outer: float) -> None:
# set progression between 'value_inner' and 'value_outer', between 'inner' and 'outer' bounding-boxes
((z_back_in, z_front_in), (x_left_in, x_right_in), (y_down_in, y_up_in)) = inner_boundbox
((z_back_out, z_front_out), (x_left_out, x_right_out), (y_down_out, y_up_out)) = outer_boundbox
progression_z_back = self._compute_progression_increasing(z_back_out, z_back_in, value_outer, value_inner,
self._type_filtering)
progression_z_front = self._compute_progression_decreasing(z_front_in, z_front_out, value_inner, value_outer,
self._type_filtering)
progression_x_left = self._compute_progression_increasing(x_left_out, x_left_in, value_outer, value_inner,
self._type_filtering)
progression_x_right = self._compute_progression_decreasing(x_right_in, x_right_out, value_inner, value_outer,
self._type_filtering)
progression_y_down = self._compute_progression_increasing(y_down_out, y_down_in, value_outer, value_inner,
self._type_filtering)
progression_y_up = self._compute_progression_decreasing(y_up_in, y_up_out, value_inner, value_outer,
self._type_filtering)
progression_z_middle = self._compute_fill_progression(z_back_in, z_front_in, value_inner, 'full')
progression_x_middle = self._compute_fill_progression(x_left_in, x_right_in, value_inner, 'full')
progression_y_middle = self._compute_fill_progression(y_down_in, y_up_in, value_inner, 'full')
# laterals
self._factor_filtering[z_back_in:z_front_in, x_left_out:x_left_in, y_down_in:y_up_in] = \
self._calc_tensor_product_3d(progression_z_middle, progression_x_left, progression_y_middle)
self._factor_filtering[z_back_in:z_front_in, x_right_in:x_right_out, y_down_in:y_up_in] = \
self._calc_tensor_product_3d(progression_z_middle, progression_x_right, progression_y_middle)
self._factor_filtering[z_back_in:z_front_in, x_left_in:x_right_in, y_down_out:y_down_in] = \
self._calc_tensor_product_3d(progression_z_middle, progression_x_middle, progression_y_down)
self._factor_filtering[z_back_in:z_front_in, x_left_in:x_right_in, y_up_in:y_up_out] = \
self._calc_tensor_product_3d(progression_z_middle, progression_x_middle, progression_y_up)
self._factor_filtering[z_back_out:z_back_in, x_left_in:x_right_in, y_down_in:y_up_in] = \
self._calc_tensor_product_3d(progression_z_back, progression_x_middle, progression_y_middle)
self._factor_filtering[z_front_in:z_front_out, x_left_in:x_right_in, y_down_in:y_up_in] = \
self._calc_tensor_product_3d(progression_z_front, progression_x_middle, progression_y_middle)
# edges corners
self._factor_filtering[z_back_out:z_back_in, x_left_out:x_left_in, y_down_in:y_up_in] = \
self._calc_tensor_product_3d(progression_z_back, progression_x_left, progression_y_middle)
self._factor_filtering[z_back_out:z_back_in, x_right_in:x_right_out, y_down_in:y_up_in] = \
self._calc_tensor_product_3d(progression_z_back, progression_x_right, progression_y_middle)
self._factor_filtering[z_back_out:z_back_in, x_left_in:x_right_in, y_down_out:y_down_in] = \
self._calc_tensor_product_3d(progression_z_back, progression_x_middle, progression_y_down)
self._factor_filtering[z_back_out:z_back_in, x_left_in:x_right_in, y_up_in:y_up_out] = \
self._calc_tensor_product_3d(progression_z_back, progression_x_middle, progression_y_up)
self._factor_filtering[z_front_in:z_front_out, x_left_out:x_left_in, y_down_in:y_up_in] = \
self._calc_tensor_product_3d(progression_z_front, progression_x_left, progression_y_middle)
self._factor_filtering[z_front_in:z_front_out, x_right_in:x_right_out, y_down_in:y_up_in] = \
self._calc_tensor_product_3d(progression_z_front, progression_x_right, progression_y_middle)
self._factor_filtering[z_front_in:z_front_out, x_left_in:x_right_in, y_down_out:y_down_in] = \
self._calc_tensor_product_3d(progression_z_front, progression_x_middle, progression_y_down)
self._factor_filtering[z_front_in:z_front_out, x_left_in:x_right_in, y_up_in:y_up_out] = \
self._calc_tensor_product_3d(progression_z_front, progression_x_middle, progression_y_up)
self._factor_filtering[z_back_in:z_front_in, x_left_out:x_left_in, y_down_out:y_down_in] = \
self._calc_tensor_product_3d(progression_z_middle, progression_x_left, progression_y_down)
self._factor_filtering[z_back_in:z_front_in, x_right_in:x_right_out, y_down_out:y_down_in] = \
self._calc_tensor_product_3d(progression_z_middle, progression_x_right, progression_y_down)
self._factor_filtering[z_back_in:z_front_in, x_left_out:x_left_in, y_up_in:y_up_out] = \
self._calc_tensor_product_3d(progression_z_middle, progression_x_left, progression_y_up)
self._factor_filtering[z_back_in:z_front_in, x_right_in:x_right_out, y_up_in:y_up_out] = \
self._calc_tensor_product_3d(progression_z_middle, progression_x_right, progression_y_up)
# corners
self._factor_filtering[z_back_out:z_back_in, x_left_out:x_left_in, y_down_out:y_down_in] = \
self._calc_tensor_product_3d(progression_z_back, progression_x_left, progression_y_down)
self._factor_filtering[z_back_out:z_back_in, x_right_in:x_right_out, y_down_out:y_down_in] = \
self._calc_tensor_product_3d(progression_z_back, progression_x_right, progression_y_down)
self._factor_filtering[z_back_out:z_back_in, x_left_out:x_left_in, y_up_in:y_up_out] = \
self._calc_tensor_product_3d(progression_z_back, progression_x_left, progression_y_up)
self._factor_filtering[z_back_out:z_back_in, x_right_in:x_right_out, y_up_in:y_up_out] = \
self._calc_tensor_product_3d(progression_z_back, progression_x_right, progression_y_up)
self._factor_filtering[z_front_in:z_front_out, x_left_out:x_left_in, y_down_out:y_down_in] = \
self._calc_tensor_product_3d(progression_z_front, progression_x_left, progression_y_down)
self._factor_filtering[z_front_in:z_front_out, x_right_in:x_right_out, y_down_out:y_down_in] = \
self._calc_tensor_product_3d(progression_z_front, progression_x_right, progression_y_down)
self._factor_filtering[z_front_in:z_front_out, x_left_out:x_left_in, y_up_in:y_up_out] = \
self._calc_tensor_product_3d(progression_z_front, progression_x_left, progression_y_up)
self._factor_filtering[z_front_in:z_front_out, x_right_in:x_right_out, y_up_in:y_up_out] = \
self._calc_tensor_product_3d(progression_z_front, progression_x_right, progression_y_up)
| 64.566952
| 119
| 0.672594
| 2,945
| 22,663
| 4.633277
| 0.061121
| 0.024185
| 0.06823
| 0.052327
| 0.791059
| 0.758959
| 0.722316
| 0.708465
| 0.665225
| 0.619934
| 0
| 0.009396
| 0.248599
| 22,663
| 350
| 120
| 64.751429
| 0.791884
| 0.045404
| 0
| 0.275362
| 0
| 0
| 0.013096
| 0.001759
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097826
| false
| 0
| 0.021739
| 0.036232
| 0.188406
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
87d36b47e8214b18770c87bac3b8bfb16cebbf45
| 103
|
py
|
Python
|
server/main.py
|
kleanlins/wireless-paint
|
d45e7a50dbe9a093610ae74c236321f6cfad2f24
|
[
"MIT"
] | null | null | null |
server/main.py
|
kleanlins/wireless-paint
|
d45e7a50dbe9a093610ae74c236321f6cfad2f24
|
[
"MIT"
] | null | null | null |
server/main.py
|
kleanlins/wireless-paint
|
d45e7a50dbe9a093610ae74c236321f6cfad2f24
|
[
"MIT"
] | null | null | null |
from image import image_to_binary
from sender import modulate
img = image_to_binary("app")
print(img)
| 17.166667
| 33
| 0.805825
| 17
| 103
| 4.647059
| 0.588235
| 0.177215
| 0.329114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126214
| 103
| 5
| 34
| 20.6
| 0.877778
| 0
| 0
| 0
| 0
| 0
| 0.029126
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
87d9c772ccba0633952be2ddce77880a6a4a1ef6
| 78
|
py
|
Python
|
src/class_resolver/contrib/__init__.py
|
rusty1s/class-resolver
|
b97ca43aff802141f390b6ca140f36016d287250
|
[
"MIT"
] | 3
|
2022-02-10T17:45:24.000Z
|
2022-02-12T12:24:55.000Z
|
src/class_resolver/contrib/__init__.py
|
rusty1s/class-resolver
|
b97ca43aff802141f390b6ca140f36016d287250
|
[
"MIT"
] | null | null | null |
src/class_resolver/contrib/__init__.py
|
rusty1s/class-resolver
|
b97ca43aff802141f390b6ca140f36016d287250
|
[
"MIT"
] | 1
|
2022-02-15T08:14:04.000Z
|
2022-02-15T08:14:04.000Z
|
# -*- coding: utf-8 -*-
"""Class resolvers that require external imports."""
| 19.5
| 52
| 0.641026
| 9
| 78
| 5.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 0.153846
| 78
| 3
| 53
| 26
| 0.742424
| 0.884615
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
87dbcfff422a7f0079a6d7b58fa07fc6b60151f0
| 3,650
|
py
|
Python
|
tests/test_old_input.py
|
alex-oleshkevich/kupala
|
2cbf566fb601631afc4fc1ec90384502ef546ce8
|
[
"MIT"
] | 8
|
2021-05-26T00:17:21.000Z
|
2022-03-28T13:15:22.000Z
|
tests/test_old_input.py
|
alex-oleshkevich/kupala
|
1c45b537846cec23480396ce7afac05baf83300d
|
[
"MIT"
] | 10
|
2021-11-06T16:56:43.000Z
|
2022-03-28T13:15:02.000Z
|
tests/test_old_input.py
|
alex-oleshkevich/kupala
|
2cbf566fb601631afc4fc1ec90384502ef546ce8
|
[
"MIT"
] | null | null | null |
from starsessions import SessionMiddleware
from kupala.application import Kupala
from kupala.requests import Request
from kupala.responses import JSONResponse, RedirectResponse
from kupala.routing import Route
from kupala.testclient import TestClient
async def set_view(request: Request) -> RedirectResponse:
await request.remember_form_data()
return RedirectResponse('/get')
def get_view(request: Request) -> JSONResponse:
return JSONResponse(request.old_input)
def test_old_input() -> None:
app = Kupala(routes=[Route('/', set_view, methods=['POST']), Route('/get', get_view)])
app.middleware.use(SessionMiddleware, secret_key='key', autoload=True)
client = TestClient(app)
with open('/tmp/test1.txt', 'wb') as f:
f.write(b'content')
with open('/tmp/test1.txt', 'rb') as f:
response = client.post(
'/',
allow_redirects=True,
data=[('first_name', 'root'), ('last_name', 'user')],
files=[('avatar', f)],
)
assert response.status_code == 200
assert response.json() == {
'first_name': 'root',
'last_name': 'user',
}
# when accessing page for the second time, the session data has to be absent
response = client.get('/get')
assert response.json() == {}
def test_old_input_without_session() -> None:
app = Kupala(routes=[Route('/', set_view, methods=['POST']), Route('/get', get_view)])
client = TestClient(app)
response = client.post(
'/',
allow_redirects=True,
data=[('first_name', 'root'), ('last_name', 'user')],
)
assert response.status_code == 200
assert response.json() == {}
# when accessing page for the second time, the session data has to be absent
assert response.json() == {}
async def set_form_errors_view(request: Request) -> RedirectResponse:
request.set_form_errors(
{
'first_name': ['This field is required.'],
'last_name': ['This field is required.'],
},
)
return RedirectResponse('/get')
def get_form_errors_view(request: Request) -> JSONResponse:
return JSONResponse(request.form_errors)
def test_form_errors() -> None:
app = Kupala(routes=[Route('/', set_form_errors_view, methods=['POST']), Route('/get', get_form_errors_view)])
app.middleware.use(SessionMiddleware, secret_key='key', autoload=True)
client = TestClient(app)
response = client.post(
'/',
allow_redirects=True,
data={
'first_name': 'root',
'last_name': 'user',
},
)
assert response.status_code == 200
assert response.json() == {
'field_errors': {
'first_name': ['This field is required.'],
'last_name': ['This field is required.'],
},
}
# when accessing page for the second time, the session data has to be absent
response = client.get('/get')
assert response.json() == {'field_errors': {}}
def test_form_errors_without_session() -> None:
app = Kupala(routes=[Route('/', set_form_errors_view, methods=['POST']), Route('/get', get_form_errors_view)])
client = TestClient(app)
response = client.post(
'/',
allow_redirects=True,
data={
'first_name': 'root',
'last_name': 'user',
},
)
assert response.status_code == 200
assert response.json() == {
'field_errors': {},
}
# when accessing page for the second time, the session data has to be absent
response = client.get('/get')
assert response.json() == {'field_errors': {}}
| 30.165289
| 114
| 0.61726
| 420
| 3,650
| 5.197619
| 0.2
| 0.076958
| 0.065964
| 0.038937
| 0.803023
| 0.721484
| 0.710032
| 0.659643
| 0.626202
| 0.626202
| 0
| 0.005049
| 0.240274
| 3,650
| 120
| 115
| 30.416667
| 0.782185
| 0.081918
| 0
| 0.588889
| 0
| 0
| 0.126718
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.066667
| false
| 0
| 0.066667
| 0.022222
| 0.177778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
87e0fade4b4e28d11518990dc480d70e4a501644
| 127
|
py
|
Python
|
django-simple-captcha/testproject/forms.py
|
teeworldsCNFun/upTee
|
1c04b7421f4834f83bbb9f59f43dfebac08e56b0
|
[
"BSD-3-Clause"
] | null | null | null |
django-simple-captcha/testproject/forms.py
|
teeworldsCNFun/upTee
|
1c04b7421f4834f83bbb9f59f43dfebac08e56b0
|
[
"BSD-3-Clause"
] | null | null | null |
django-simple-captcha/testproject/forms.py
|
teeworldsCNFun/upTee
|
1c04b7421f4834f83bbb9f59f43dfebac08e56b0
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from captcha.fields import CaptchaField
class CaptchaForm(forms.Form):
captcha = CaptchaField()
| 21.166667
| 40
| 0.787402
| 15
| 127
| 6.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149606
| 127
| 5
| 41
| 25.4
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
3562d53aabcaadecdf60d125f0df4a15c454cbb7
| 205
|
py
|
Python
|
graphql/language/tests/test_location.py
|
ThanksBoomerang/graphql-core-legacy
|
6e2fbccdec655ce9122b84d3808c14242c4e6b96
|
[
"MIT"
] | 8
|
2020-03-23T21:34:02.000Z
|
2021-11-12T11:27:45.000Z
|
graphql/language/tests/test_location.py
|
ThanksBoomerang/graphql-core-legacy
|
6e2fbccdec655ce9122b84d3808c14242c4e6b96
|
[
"MIT"
] | 17
|
2020-03-14T22:22:29.000Z
|
2022-03-16T19:26:37.000Z
|
graphql/language/tests/test_location.py
|
ThanksBoomerang/graphql-core-legacy
|
6e2fbccdec655ce9122b84d3808c14242c4e6b96
|
[
"MIT"
] | 17
|
2020-03-23T12:06:23.000Z
|
2022-02-13T05:33:32.000Z
|
from graphql.language.location import SourceLocation
def test_repr_source_location():
# type: () -> None
loc = SourceLocation(10, 25)
assert repr(loc) == "SourceLocation(line=10, column=25)"
| 25.625
| 60
| 0.707317
| 25
| 205
| 5.68
| 0.72
| 0.239437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046784
| 0.165854
| 205
| 7
| 61
| 29.285714
| 0.783626
| 0.078049
| 0
| 0
| 0
| 0
| 0.181818
| 0.122995
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
357205db8038bcfae6a4d48fc64b7045e7826c48
| 145,964
|
py
|
Python
|
lib/kb_SetUtilities/kb_SetUtilitiesImpl.py
|
kbaseapps/kb_SetUtilities
|
1d093bc1c3b896a541b850cd57b606bdea70d066
|
[
"MIT"
] | null | null | null |
lib/kb_SetUtilities/kb_SetUtilitiesImpl.py
|
kbaseapps/kb_SetUtilities
|
1d093bc1c3b896a541b850cd57b606bdea70d066
|
[
"MIT"
] | 22
|
2017-09-06T18:51:48.000Z
|
2021-09-14T18:58:34.000Z
|
lib/kb_SetUtilities/kb_SetUtilitiesImpl.py
|
kbaseapps/kb_SetUtilities
|
1d093bc1c3b896a541b850cd57b606bdea70d066
|
[
"MIT"
] | 3
|
2017-08-28T18:35:34.000Z
|
2019-02-19T16:44:17.000Z
|
# -*- coding: utf-8 -*-
#BEGIN_HEADER
import os
import re
import sys
from datetime import datetime
from pprint import pformat # ,pprint
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.SetAPIServiceClient import SetAPI
from installed_clients.WorkspaceClient import Workspace as workspaceService
#END_HEADER
class kb_SetUtilities:
'''
Module Name:
kb_SetUtilities
Module Description:
** A KBase module: kb_SetUtilities
**
** This module contains basic utilities for set manipulation, originally extracted
** from kb_util_dylan
**
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "1.7.6"
GIT_URL = "https://github.com/kbaseapps/kb_SetUtilities"
GIT_COMMIT_HASH = "5d75bb3340d9a3b78f4b81d44f9ec0dc3b2195a9"
#BEGIN_CLASS_HEADER
workspaceURL = None
shockURL = None
handleURL = None
serviceWizardsURL = None
callbackURL = None
scratch = None
def now_ISO(self):
now_timestamp = datetime.now()
now_secs_from_epoch = (now_timestamp - datetime(1970,1,1)).total_seconds()
now_timestamp_in_iso = datetime.fromtimestamp(int(now_secs_from_epoch)).strftime('%Y-%m-%d_%T')
return now_timestamp_in_iso
def log(self, target, message):
# target is a list for collecting log messages
message = '['+self.now_ISO()+'] '+message
if target is not None:
target.append(message)
print(message)
sys.stdout.flush()
def check_params (self, params, required_params):
missing_params = []
for param in required_params:
if not params.get(param):
missing_params.append(param)
if len(missing_params):
raise ValueError("Missing required param(s):\n" + "\n".join(missing_params))
def ws_fetch_error(self, obj_desc, obj_ref, error=None):
msg = 'Unable to fetch '+obj_desc+' ref:'+ obj_ref + ' from workspace.'
if error is not None:
msg += ' Error: ' + str(error)
raise ValueError(msg)
def set_provenance (self, ctx, input_ws_obj_refs=[], service_name=None, method_name=None):
if ctx.get('provenance '):
provenance = ctx['provenance']
else:
provenance = [{}]
# add additional info to provenance here, especially the input data object reference(s)
if 'input_ws_objects' not in provenance[0]:
provenance[0]['input_ws_objects'] = []
if len(input_ws_obj_refs) > 0:
provenance[0]['input_ws_objects'].extend(input_ws_obj_refs)
if service_name is not None:
provenance[0]['service'] = service_name
if method_name is not None:
provenance[0]['method'] = method_name
return provenance
def get_obj_name_and_type_from_obj_info (self, obj_info, full_type=False):
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
obj_name = obj_info[NAME_I]
obj_type = obj_info[TYPE_I].split('-')[0]
if not full_type:
obj_type = obj_type.split('.')[1]
return (obj_name, obj_type)
def get_obj_ref_from_obj_info (self, obj_info):
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
return '/'.join([str(obj_info[WSID_I]),
str(obj_info[OBJID_I]),
str(obj_info[VERSION_I])])
def get_obj_ref_from_obj_info_noVer (self, obj_info):
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
return '/'.join([str(obj_info[WSID_I]),
str(obj_info[OBJID_I])])
def get_obj_data (self, obj_ref, obj_type_desc, full_type=False):
obj_data = None
obj_info = None
obj_name = None
obj_type = None
try:
objects = self.wsClient.get_objects2({'objects': [{'ref': obj_ref}]})['data'][0]
except Exception as e:
self.ws_fetch_error(obj_type_desc+' object', obj_ref, error=e)
obj_data = objects['data']
obj_info = objects['info']
(obj_name, obj_type) = self.get_obj_name_and_type_from_obj_info (obj_info, full_type)
return (obj_data, obj_info, obj_name, obj_type)
def get_obj_info (self, obj_ref, obj_type_desc, full_type=False):
obj_info = None
obj_name = None
obj_type = None
try:
obj_info = self.wsClient.get_object_info_new ({'objects':[{'ref':obj_ref}]})[0]
except Exception as e:
self.ws_fetch_error(obj_type_desc+' object info', obj_ref, error=e)
(obj_name, obj_type) = self.get_obj_name_and_type_from_obj_info (obj_info, full_type)
return (obj_info, obj_name, obj_type)
def get_obj_info_list_from_ws_id (self, ws_id, obj_type, obj_type_desc):
obj_info_list = []
try:
obj_info_list = self.wsClient.list_objects({'ids':[ws_id],'type':obj_type})
except Exception as e:
raise ValueError ("Unable to list "+obj_type_desc+" objects from workspace: "+str(ws_id)+" "+str(e))
return obj_info_list
def get_obj_info_list_from_ws_name (self, ws_name, obj_type, obj_type_desc):
obj_info_list = []
try:
obj_info_list = self.wsClient.list_objects({'workspaces':[ws_name],'type':obj_type})
except Exception as e:
raise ValueError ("Unable to list "+obj_type_desc+" objects from workspace: "+str(ws_id)+" "+str(e))
return obj_info_list
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.token = os.environ['KB_AUTH_TOKEN']
self.workspaceURL = config['workspace-url']
self.shockURL = config['shock-url']
self.serviceWizardURL = config['service-wizard-url']
self.callbackURL = os.environ.get('SDK_CALLBACK_URL')
# if self.callbackURL == None:
# self.callbackURL = os.environ['SDK_CALLBACK_URL']
if self.callbackURL is None:
raise ValueError("SDK_CALLBACK_URL not set in environment")
self.scratch = os.path.abspath(config['scratch'])
if not os.path.exists(self.scratch):
os.makedirs(self.scratch)
# set test status for called modules
self.SERVICE_VER = 'release'
# instantiate clients
try:
self.wsClient = workspaceService(self.workspaceURL, token=self.token)
except Exception as e:
raise ValueError('Unable to connect to workspace at ' + self.workspaceURL + str(e))
try:
self.reportClient = KBaseReport(self.callbackURL, token=self.token, service_ver=self.SERVICE_VER)
except Exception as e:
raise ValueError('Unable to instantiate reportClient ' + str(e))
try:
self.setAPI_Client = SetAPI(url=self.serviceWizardURL, token=self.token, service_ver=self.SERVICE_VER)
except Exception as e:
raise ValueError('Unable to instantiate SetAPI' + str(e))
#END_CONSTRUCTOR
pass
def KButil_Localize_GenomeSet(self, ctx, params):
"""
:param params: instance of type "KButil_Localize_GenomeSet_Params"
(KButil_Localize_GenomeSet() ** ** Method for creating Genome Set
with all local Genomes) -> structure: parameter "workspace_name"
of type "workspace_name" (** The workspace object refs are of
form: ** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_ref" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name"
:returns: instance of type "KButil_Localize_GenomeSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Localize_GenomeSet
raise NotImplementedError
#END KButil_Localize_GenomeSet
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Localize_GenomeSet return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Localize_FeatureSet(self, ctx, params):
"""
:param params: instance of type "KButil_Localize_FeatureSet_Params"
(KButil_Localize_FeatureSet() ** ** Method for creating Feature
Set with all local Genomes) -> structure: parameter
"workspace_name" of type "workspace_name" (** The workspace object
refs are of form: ** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_ref" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name"
:returns: instance of type "KButil_Localize_FeatureSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Localize_FeatureSet
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
console = []
invalid_msgs = []
self.log(console, 'Running KButil_Localize_FeatureSet with params=')
self.log(console, "\n" + pformat(params))
report = ''
# param checks
required_params = ['workspace_name',
'input_ref'
]
self.check_params (params, required_params)
# read FeatureSet to get local workspace ID, source object name, and list of original genome refs
#
self.log (console, "READING LOCAL WORKSPACE ID")
src_featureSet_ref = params['input_ref']
(src_featureSet,
info,
src_featureSet_name,
type_name) = self.get_obj_data(src_featureSet_ref, 'featureSet')
if type_name != 'FeatureSet':
raise ValueError("Bad Type: Should be FeatureSet instead of '" + type_name + "'")
# Set local WSID from FeatureSet
local_WSID = str(info[WSID_I])
# read workspace to determine which genome objects are already present
#
genome_obj_type = "KBaseGenomes.Genome"
local_genome_refs_by_name = dict()
genome_obj_info_list = self.get_obj_info_list_from_ws_id(local_WSID,
genome_obj_type,
genome_obj_type)
for info in genome_obj_info_list:
genome_obj_ref = self.get_obj_ref_from_obj_info(info)
(genome_obj_name, type_name) = self.get_obj_name_and_type_from_obj_info (info)
local_genome_refs_by_name[genome_obj_name] = genome_obj_ref
# set order for features list
#
self.log (console, "GETTING FEATURES ORDERING")
src_featureSet = data
src_element_ordering = []
if 'element_ordering' in list(src_featureSet.keys()):
src_element_ordering = src_featureSet['element_ordering']
else:
src_element_ordering = sorted(src_featureSet['elements'].keys())
logMsg = 'features in input set {}: {}'.format(src_featureSet_ref,
len(src_element_ordering))
self.log(console, logMsg)
report += logMsg
# Standardize genome refs to numerical IDs
#
self.log (console, "STANDARDIZING GENOME REFS")
genome_ref_to_standardized = dict()
standardized_genome_refs = []
for fId in src_element_ordering:
for src_genome_ref in src_featureSet['elements'][fId]:
if src_genome_ref in genome_ref_to_standardized:
pass
else:
(src_genome_obj_info,
src_genome_obj_name,
src_genome_obj_type) = self.get_obj_info(src_genome_ref, 'genome', full_type=True)
#acceptable_types = ["KBaseGenomes.Genome", "KBaseGenomeAnnotations.GenomeAnnotation"]
acceptable_types = ["KBaseGenomes.Genome"]
if src_genome_obj_type not in acceptable_types:
raise ValueError("Input Genome of type: '" + src_genome_obj_type +
"'. Must be one of " + ", ".join(acceptable_types))
standardized_src_genome_ref = self.get_obj_ref_from_obj_info(src_genome_obj_info)
genome_ref_to_standardized[src_genome_ref] = standardized_src_genome_ref
standardized_genome_refs.append(standardized_src_genome_ref)
# Copy all non-local genomes to local workspace
#
self.log (console, "COPYING NON-LOCAL GENOMES TO LOCAL WORKSPACE")
src2dst_genome_refs = dict()
objects_created = []
local_genome_cnt = 0
non_local_genome_cnt = 0
for src_genome_ref in standardized_genome_refs:
this_WSID = str(src_genome_ref.split('/')[0])
if this_WSID == local_WSID:
src2dst_genome_refs[src_genome_ref] = src_genome_ref
else:
(src_genome_obj_data,
src_genome_obj_info,
src_genome_obj_name,
type_name) = self.get_obj_data(src_genome_ref, 'genome')
if src_genome_obj_name in local_genome_refs_by_name:
src2dst_genome_refs[src_genome_ref] = local_genome_refs_by_name[src_genome_obj_name]
local_genome_cnt += 1
continue
non_local_genome_cnt += 1
# set provenance
input_ws_obj_refs = [src_featureSet_ref, src_genome_ref]
provenance = self.set_provenance(ctx, input_ws_obj_refs, 'kb_SetUtilities', 'KButil_Localize_FeatureSet')
# Save object
src_genome_obj_ref = self.get_obj_ref_from_obj_info(src_genome_obj_info)
self.log(console, "SAVING GENOME "+src_genome_obj_ref+" to workspace "+str(params['workspace_name'])+" (ws."+str(local_WSID)+")")
dst_genome_obj_data = src_genome_obj_data
(dst_genome_obj_name, type_name) = self.get_obj_name_and_type_from_obj_info (src_genome_obj_info)
dst_genome_obj_info = self.wsClient.save_objects({
'workspace': params['workspace_name'],
'objects': [
{
'type': 'KBaseGenomes.Genome',
'data': dst_genome_obj_data,
'name': dst_genome_obj_name,
'meta': {},
'provenance': provenance
}
]})[0]
dst_standardized_genome_ref = self.get_obj_ref_from_obj_info(dst_genome_obj_info)
src2dst_genome_refs[src_genome_ref] = dst_standardized_genome_ref
objects_created.append({'ref': dst_standardized_genome_ref,
'description': 'localized '+dst_genome_obj_name})
# Build Localized FeatureSet with local genome_refs
#
if non_local_genome_cnt == 0 and local_genome_cnt == 0:
self.log (console, "NO NON-LOCAL GENOME REFS FOUND")
else:
self.log (console, "BUILDING LOCAL FEATURESET")
dst_featureSet_data = dict()
dst_featureSet_data['desc'] = src_featureSet['desc']+' - localized'
dst_featureSet_data['element_ordering'] = src_element_ordering
dst_featureSet_data['elements'] = dict()
for fId in src_element_ordering:
dst_genome_refs = []
for orig_src_genome_ref in src_featureSet[fId]:
standardized_src_genome_ref = genome_ref_to_standardized[orig_src_genome_ref]
dst_genome_refs.append(src2dst_genome_refs[standardized_src_genome_ref])
dst_featureSet_data['elements'][fId] = dst_genome_refs
# Overwrite input FeatureSet object with local genome refs
dst_featureSet_name = src_featureSet_name
# set provenance
input_ws_obj_refs = [src_featureSet_ref]
provenance = self.set_provenance(ctx, input_ws_obj_refs, 'kb_SetUtilities', 'KButil_Localize_FeatureSet')
# save output obj
dst_featureSet_info = self.wsClient.save_objects({
'workspace': params['workspace_name'],
'objects': [
{
'type': 'KBaseCollections.FeatureSet',
'data': output_FeatureSet,
'name': dst_featureSet_name,
'meta': {},
'provenance': provenance
}
]})[0]
objects_created.append({'ref': params['workspace_name']+'/'+dst_featureSet_name,
'description': 'localized FeatureSet'})
# build output report object
self.log(console, "BUILDING REPORT")
total_genomes_cnt = len(standardized_genome_refs)
if non_local_genome_cnt > 0 or local_genome_cnt > 0:
final_msg = []
final_msg.append("Total genomes in FeatureSet: " + str(total_genome_cnt))
final_msg.append("Non-local genomes copied over: " + str(non_local_genome_cnt))
final_msg.append("Local genomes with remote references: " + str(local_genome_cnt))
logMsg = "\n".join(final_msg)
self.log(console, logMsg)
report += logMsg
reportObj = {
'objects_created': objects_created,
'text_message': report
}
else:
report += "NO NON-LOCAL GENOMES FOUND. NO NEW FEATURESET CREATED."
reportObj = {
'objects_created': [],
'text_message': report
}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console, "KButil_Localize_FeatureSet DONE")
#END KButil_Localize_FeatureSet
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Localize_FeatureSet return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Merge_FeatureSet_Collection(self, ctx, params):
"""
:param params: instance of type
"KButil_Merge_FeatureSet_Collection_Params"
(KButil_Merge_FeatureSet_Collection() ** ** Method for merging
FeatureSets) -> structure: parameter "workspace_name" of type
"workspace_name" (** The workspace object refs are of form: ** **
objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type
"KButil_Merge_FeatureSet_Collection_Output" -> structure:
parameter "report_name" of type "data_obj_name", parameter
"report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Merge_FeatureSet_Collection
console = []
invalid_msgs = []
self.log(console, 'Running KButil_Merge_FeatureSet_Collection with params=')
self.log(console, "\n" + pformat(params))
report = ''
# param checks
required_params = ['workspace_name',
'input_refs',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Merged FeatureSet'
# clean input_refs
clean_input_refs = []
for ref in params['input_refs']:
if ref is not None and ref != '' and ref not in clean_input_refs:
clean_input_refs.append(ref)
params['input_refs'] = clean_input_refs
if len(params['input_refs']) < 2:
self.log(console, "Must provide at least two FeatureSets")
self.log(invalid_msgs, "Must provide at least two FeatureSets")
# Build FeatureSet
element_ordering = []
elements = {}
featureSet_seen = dict()
feature_seen = dict()
input_feature_cnt = dict()
merged_feature_cnt = 0
for featureSet_ref in params['input_refs']:
if featureSet_ref not in list(featureSet_seen.keys()):
featureSet_seen[featureSet_ref] = True
input_feature_cnt[featureSet_ref] = 0
else:
self.log(console, "repeat featureSet_ref: '" + featureSet_ref + "'")
self.log(invalid_msgs, "repeat featureSet_ref: '" + featureSet_ref + "'")
continue
(this_featureSet,
info,
obj_name,
type_name) = self.get_obj_data(featureSet_ref, 'featureSet')
if type_name != 'FeatureSet':
raise ValueError("Bad Type: Should be FeatureSet instead of '" + type_name + "'")
this_element_ordering = []
if 'element_ordering' in list(this_featureSet.keys()):
this_element_ordering = this_featureSet['element_ordering']
else:
this_element_ordering = sorted(this_featureSet['elements'].keys())
logMsg = 'features in input set {}: {}'.format(featureSet_ref,
len(this_element_ordering))
self.log(console, logMsg)
for fId in this_element_ordering:
if not elements.get(fId):
elements[fId] = []
element_ordering.append(fId)
for genome_ref in this_featureSet['elements'][fId]:
input_feature_cnt[featureSet_ref] += 1
unique_fId = genome_ref+'-'+fId
if not feature_seen.get(unique_fId):
elements[fId].append(genome_ref)
merged_feature_cnt += 1
feature_seen[unique_fId] = True
report += 'features in input set ' + featureSet_ref + ': ' + str(
input_feature_cnt[featureSet_ref]) + "\n"
# set provenance
input_ws_obj_refs = params['input_refs']
provenance = self.set_provenance(ctx, input_ws_obj_refs, 'kb_SetUtilities', 'KButil_Merge_FeatureSet_Collection')
# Store output object
#
if len(invalid_msgs) == 0:
self.log(console, "SAVING FEATURESET")
output_FeatureSet = {'description': params['desc'],
'element_ordering': element_ordering,
'elements': elements}
new_obj_info = self.wsClient.save_objects({'workspace': params['workspace_name'],
'objects': [{
'type': 'KBaseCollections.FeatureSet',
'data': output_FeatureSet,
'name': params['output_name'],
'meta': {},
'provenance': provenance}]})[0]
# build output report object
self.log(console, "BUILDING REPORT")
if len(invalid_msgs) == 0:
self.log(console, "features in output set " + params['output_name'] + ": "
+ str(merged_feature_cnt))
report += 'features in output set ' + params['output_name'] + ': '
report += str(merged_feature_cnt) + "\n"
reportObj = {
'objects_created': [{'ref': params['workspace_name'] + '/' + params['output_name'],
'description':'KButil_Merge_FeatureSet_Collection'}],
'text_message': report
}
else:
report += "FAILURE:\n\n" + "\n".join(invalid_msgs) + "\n"
reportObj = {
'objects_created': [],
'text_message': report
}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console, "KButil_Merge_FeatureSet_Collection DONE")
#END KButil_Merge_FeatureSet_Collection
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Merge_FeatureSet_Collection return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Slice_FeatureSets_by_Genomes(self, ctx, params):
"""
:param params: instance of type
"KButil_Slice_FeatureSets_by_Genomes_Params"
(KButil_Slice_FeatureSets_by_Genomes() ** ** Method for Slicing a
FeatureSet or FeatureSets by a Genome, Genomes, or GenomeSet) ->
structure: parameter "workspace_name" of type "workspace_name" (**
The workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_featureSet_refs" of type "data_obj_ref",
parameter "input_genome_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type
"KButil_Slice_FeatureSets_by_Genomes_Output" -> structure:
parameter "report_name" of type "data_obj_name", parameter
"report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Slice_FeatureSets_by_Genomes
console = []
invalid_msgs = []
self.log(console, 'Running Slice_FeatureSets_by_Genomes with params=')
self.log(console, "\n" + pformat(params))
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
logMsg = ''
report = ''
# check params
required_params = ['workspace_name',
'input_featureSet_refs',
'input_genome_refs',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Sliced FeatureSet'
# clean input_feature_refs
clean_input_refs = []
for ref in params['input_featureSet_refs']:
if ref is not None and ref != '' and ref not in clean_input_refs:
clean_input_refs.append(ref)
params['input_featureSet_refs'] = clean_input_refs
# clean input_genome_refs
clean_input_refs = []
for ref in params['input_genome_refs']:
if ref is not None and ref != '' and ref not in clean_input_refs:
clean_input_refs.append(ref)
params['input_genome_refs'] = clean_input_refs
# Standardize genome refs so string comparisons are valid (only do requested genomes)
#
genome_ref_to_standardized = dict()
genome_ref_from_standardized_in_input_flag = dict()
for this_genome_ref in params['input_genome_refs']:
(genome_obj_info,
genome_obj_name,
genome_obj_type) = self.get_obj_info(this_genome_ref, 'genome', full_type=True)
acceptable_types = ["KBaseGenomes.Genome", "KBaseMetagenomes.AnnotatedMetagenomeAssembly"]
if genome_obj_type not in acceptable_types:
raise ValueError("Input Genome of type: '" + genome_obj_type +
"'. Must be one of " + ", ".join(acceptable_types))
this_standardized_genome_ref = self.get_obj_ref_from_obj_info(genome_obj_info)
genome_ref_to_standardized[this_genome_ref] = this_standardized_genome_ref
genome_ref_from_standardized_in_input_flag[this_standardized_genome_ref] = True
# Build FeatureSets
#
featureSet_seen = dict()
featureSet_genome_ref_to_standardized = dict() # have to map genome refs in featureSets also because might be mixed WS_ID-WS_NAME/OBJID-OBJNAME and not exactly correspond with input genome refs
feature_list_lens = []
objects_created = []
for featureSet_ref in params['input_featureSet_refs']:
if featureSet_ref not in list(featureSet_seen.keys()):
featureSet_seen[featureSet_ref] = 1
else:
self.log(console, "repeat featureSet_ref: '" + featureSet_ref + "'")
self.log(invalid_msgs, "repeat featureSet_ref: '" + featureSet_ref + "'")
continue
(this_featureSet,
info,
this_featureSet_obj_name,
type_name) = self.get_obj_data(featureSet_ref, 'featureSet')
if type_name != 'FeatureSet':
raise ValueError("Bad Type: Should be FeatureSet instead of '" + type_name + "'")
this_element_ordering = []
if 'element_ordering' in list(this_featureSet.keys()):
this_element_ordering = this_featureSet['element_ordering']
else:
this_element_ordering = sorted(this_featureSet['elements'].keys())
logMsg = 'features in input set {}: {}'.format(featureSet_ref,
len(this_element_ordering))
self.log(console, logMsg)
# Build sliced FeatureSet
#
self.log (console, "BUILDING SLICED FEATURESET\n")
self.log (console, "Slicing out genomes "+("\n".join(params['input_genome_refs'])))
element_ordering = []
elements = {}
for fId in this_element_ordering:
feature_hit = False
genomes_retained = []
for this_genome_ref in this_featureSet['elements'][fId]:
genome_hit = False
if this_genome_ref in genome_ref_to_standardized: # The KEY line
genome_hit = True
standardized_genome_ref = genome_ref_to_standardized[this_genome_ref]
elif this_genome_ref in featureSet_genome_ref_to_standardized:
standardized_genome_ref = featureSet_genome_ref_to_standardized[this_genome_ref]
if standardized_genome_ref in genome_ref_from_standardized_in_input_flag:
genome_hit = True
else: # get standardized genome_ref
(genome_obj_info,
genome_obj_name,
genome_obj_type) = self.get_obj_info(this_genome_ref, 'genome', full_type=True)
acceptable_types = ["KBaseGenomes.Genome", "KBaseMetagenomes.AnnotatedMetagenomeAssembly"]
if genome_obj_type not in acceptable_types:
raise ValueError("Input Genome of type: '" + genome_obj_type +
"'. Must be one of " + ", ".join(acceptable_types))
standardized_genome_ref = self.get_obj_ref_from_obj_info(genome_obj_info)
featureSet_genome_ref_to_standardized[this_genome_ref] = standardized_genome_ref
if standardized_genome_ref in genome_ref_from_standardized_in_input_flag:
genome_hit = True
if genome_hit:
feature_hit = True
genomes_retained.append(standardized_genome_ref)
if feature_hit:
element_ordering.append(fId)
elements[fId] = genomes_retained
logMsg = 'features in sliced output set: {}'.format(len(element_ordering))
self.log(console, logMsg)
# Save output FeatureSet
#
if len(element_ordering) == 0:
report += 'no features for requested genomes in FeatureSet '+str(featureSet_ref)
feature_list_lens.append(0)
else:
# set provenance
self.log(console, "SETTING PROVENANCE")
input_ws_obj_refs = [featureSet_ref]
input_ws_obj_refs.extend(params['input_genome_refs'])
provenance = self.set_provenance(ctx, input_ws_obj_refs, 'kb_SetUtilities', 'KButil_Slice_FeatureSets_by_Genome')
# Store output object
if len(invalid_msgs) == 0:
self.log(console, "SAVING FEATURESET")
output_FeatureSet = {'description': params['desc'],
'element_ordering': element_ordering,
'elements': elements}
output_name = params['output_name']
if len(params['input_featureSet_refs']) > 1:
output_name += '-' + this_featureSet_obj_name
new_obj_info = self.wsClient.save_objects({'workspace': params['workspace_name'],
'objects': [{
'type': 'KBaseCollections.FeatureSet',
'data': output_FeatureSet,
'name': output_name,
'meta': {},
'provenance': provenance}]})[0]
feature_list_lens.append(len(element_ordering))
objects_created.append({'ref': params['workspace_name'] + '/' + output_name,
'description': params['desc']})
# build output report object
self.log(console, "BUILDING REPORT")
if len(invalid_msgs) == 0:
obj_i = -1
for output_i,list_len in enumerate(feature_list_lens):
if feature_list_lens[output_i] == 0:
report += 'No features for requested genomes in featureSet '+str(params['input_featureSet_refs'][output_i])+"\n"
else:
obj_i += 1
report += 'features in output set ' + objects_created[obj_i]['ref'] + ': '
report += str(feature_list_lens[output_i]) + "\n"
reportObj = {
'objects_created': objects_created,
'text_message': report
}
else:
report += "FAILURE:\n\n" + "\n".join(invalid_msgs) + "\n"
reportObj = {
'objects_created': [],
'text_message': report
}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console, "KButil_Slice_FeatureSets_by_Genomes DONE")
#END KButil_Slice_FeatureSets_by_Genomes
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Slice_FeatureSets_by_Genomes return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Logical_Slice_Two_FeatureSets(self, ctx, params):
"""
:param params: instance of type
"KButil_Logical_Slice_Two_FeatureSets_Params"
(KButil_Logical_Slice_Two_FeatureSets() ** ** Method for Slicing
Two FeatureSets by Venn overlap) -> structure: parameter
"workspace_name" of type "workspace_name" (** The workspace object
refs are of form: ** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_featureSet_ref_A" of type "data_obj_ref",
parameter "input_featureSet_ref_B" of type "data_obj_ref",
parameter "operator" of String, parameter "desc" of String,
parameter "output_name" of type "data_obj_name"
:returns: instance of type
"KButil_Logical_Slice_Two_FeatureSets_Output" -> structure:
parameter "report_name" of type "data_obj_name", parameter
"report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Logical_Slice_Two_FeatureSets
console = []
invalid_msgs = []
self.log(console, 'Running Logical_Slice_Two_FeatureSets with params=')
self.log(console, "\n" + pformat(params))
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
logMsg = ''
report = ''
genome_id_feature_id_delim = ".f:"
# check params
required_params = ['workspace_name',
'operator',
'input_featureSet_ref_A',
'input_featureSet_ref_B',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Sliced FeatureSet'
# Get FeatureSets
#
FeatureSet = dict()
FeatureSet['A'] = dict()
FeatureSet['B'] = dict()
input_featureSet_refs = dict()
input_featureSet_refs['A'] = params['input_featureSet_ref_A']
input_featureSet_refs['B'] = params['input_featureSet_ref_B']
input_featureSet_names = dict()
for set_id in ['A','B']:
(this_featureSet,
info,
this_featureSet_obj_name,
type_name) = self.get_obj_data(input_featureSet_refs[set_id], 'featureSet')
if type_name != 'FeatureSet':
raise ValueError("Bad Type: Should be FeatureSet instead of '" + type_name + "'")
input_featureSet_names[set_id] = this_featureSet_obj_name
FeatureSet[set_id] = this_featureSet
if 'element_ordering' not in list(this_featureSet.keys()):
FeatureSet[set_id]['element_ordering'] = sorted(this_featureSet['elements'].keys())
logMsg = 'features in input set {} - {}: {}'.format(set_id,
this_featureSet_obj_name,
len(FeatureSet[set_id]['element_ordering']))
self.log(console, logMsg)
report += logMsg+"\n"
# Store A and B genome + fid hits
#
genome_feature_present = dict()
genome_feature_present['A'] = dict()
genome_feature_present['B'] = dict()
featureSet_genome_ref_to_standardized = dict() # must use standardized genome_refs
for set_id in ['A','B']:
for fId in FeatureSet[set_id]['element_ordering']:
feature_standardized_genome_refs = []
for this_genome_ref in FeatureSet[set_id]['elements'][fId]:
if this_genome_ref in featureSet_genome_ref_to_standardized:
standardized_genome_ref_noVer = featureSet_genome_ref_to_standardized[this_genome_ref]
else: # get standardized genome_ref
(genome_obj_info,
genome_obj_name,
genome_obj_type) = self.get_obj_info(this_genome_ref, 'genome', full_type=True)
acceptable_types = ["KBaseGenomes.Genome", "KBaseGenomeAnnotations.GenomeAnnotation","KBaseMetagenomes.AnnotatedMetagenomeAssembly"]
if genome_obj_type not in acceptable_types:
raise ValueError("Input Genome of type: '" + genome_obj_type +
"'. Must be one of " + ", ".join(acceptable_types))
standardized_genome_ref_noVer = '{}/{}'.format(genome_obj_info[WSID_I],
genome_obj_info[OBJID_I])
featureSet_genome_ref_to_standardized[this_genome_ref] = standardized_genome_ref_noVer
feature_standardized_genome_refs.append(standardized_genome_ref_noVer) # standardize list
combo_id = standardized_genome_ref_noVer + genome_id_feature_id_delim + fId
genome_feature_present[set_id][combo_id] = True
self.log(console,"Set {} contains {}".format(set_id,combo_id))
FeatureSet[set_id]['elements'][fId] = feature_standardized_genome_refs
# Build sliced FeatureSet
#
self.log (console, "BUILDING SLICED FEATURESET\n")
output_element_ordering = []
output_elements = dict()
if params['operator'] == 'yesA_yesB' or params['operator'] == 'yesA_noB':
input_element_ordering = FeatureSet['A']['element_ordering']
fwd_set_id = 'A'
rev_set_id = 'B'
else:
input_element_ordering = FeatureSet['B']['element_ordering']
fwd_set_id = 'B'
rev_set_id = 'A'
for fId in input_element_ordering:
feature_hit = False
genomes_retained = []
for this_genome_ref_noVer in FeatureSet[fwd_set_id]['elements'][fId]:
combo_id = this_genome_ref_noVer + genome_id_feature_id_delim + fId
self.log (console, "\t"+'checking set {} genome+fid: {}'.format(fwd_set_id,combo_id))
if params['operator'] == 'yesA_yesB':
if genome_feature_present[rev_set_id].get(combo_id):
feature_hit = True
genomes_retained.append(this_genome_ref_noVer)
self.log(console, "keeping feature {}".format(combo_id))
else:
if not genome_feature_present[rev_set_id].get(combo_id):
feature_hit = True
genomes_retained.append(this_genome_ref_noVer)
self.log(console, "keeping feature {}".format(combo_id))
if feature_hit:
output_element_ordering.append(fId)
output_elements[fId] = genomes_retained
logMsg = 'features in sliced output set: {}'.format(len(output_element_ordering))
self.log(console, logMsg)
# Save output FeatureSet
#
objects_created = []
# set provenance
input_ws_obj_refs = [input_featureSet_refs['A'], input_featureSet_refs['B']]
provenance = self.set_provenance(ctx, input_ws_obj_refs, 'kb_SetUtilities', 'KButil_Logical_Slice_Two_FeatureSets')
if len(output_element_ordering) == 0:
report += 'no features to output under operator '+params['operator']+"\n"
else:
# Store output object
if len(invalid_msgs) == 0:
self.log(console, "SAVING FEATURESET")
output_FeatureSet = {'description': params['desc'],
'element_ordering': output_element_ordering,
'elements': output_elements}
output_name = params['output_name']
new_obj_info = self.wsClient.save_objects({'workspace': params['workspace_name'],
'objects': [{
'type': 'KBaseCollections.FeatureSet',
'data': output_FeatureSet,
'name': output_name,
'meta': {},
'provenance': provenance}]})[0]
objects_created.append({'ref': params['workspace_name'] + '/' + output_name,
'description': params['desc']})
# build output report object
self.log(console, "BUILDING REPORT")
if len(invalid_msgs) == 0:
self.log(console, "features in output set " + params['output_name'] + ": "
+ str(len(output_element_ordering)))
report += 'features in output set ' + params['output_name'] + ': '
report += str(len(output_element_ordering)) + "\n"
reportObj = {
'objects_created': objects_created,
'text_message': report
}
else:
report += "FAILURE:\n\n" + "\n".join(invalid_msgs) + "\n"
reportObj = {
'objects_created': [],
'text_message': report
}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console, "KButil_Logical_Slice_Two_FeatureSets DONE")
#END KButil_Logical_Slice_Two_FeatureSets
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Logical_Slice_Two_FeatureSets return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Logical_Slice_Two_AssemblySets(self, ctx, params):
"""
:param params: instance of type
"KButil_Logical_Slice_Two_AssemblySets_Params"
(KButil_Logical_Slice_Two_AssemblySets() ** ** Method for Slicing
Two AssemblySets by Venn overlap) -> structure: parameter
"workspace_name" of type "workspace_name" (** The workspace object
refs are of form: ** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_assemblySet_ref_A" of type "data_obj_ref",
parameter "input_assemblySet_ref_B" of type "data_obj_ref",
parameter "operator" of String, parameter "desc" of String,
parameter "output_name" of type "data_obj_name"
:returns: instance of type
"KButil_Logical_Slice_Two_AssemblySets_Output" -> structure:
parameter "report_name" of type "data_obj_name", parameter
"report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Logical_Slice_Two_AssemblySets
console = []
invalid_msgs = []
self.log(console, 'Running Logical_Slice_Two_AssemblySets with params=')
self.log(console, "\n" + pformat(params))
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
logMsg = ''
report = ''
# check params
required_params = ['workspace_name',
'operator',
'input_assemblySet_ref_A',
'input_assemblySet_ref_B',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Sliced AssemblySet'
# Get AssemblySets
#
AssemblySet = dict()
AssemblySet['A'] = dict()
AssemblySet['B'] = dict()
input_assemblySet_refs = dict()
input_assemblySet_refs['A'] = params['input_assemblySet_ref_A']
input_assemblySet_refs['B'] = params['input_assemblySet_ref_B']
input_assemblySet_names = dict()
for set_id in ['A','B']:
(this_assemblySet,
info,
this_assemblySet_obj_name,
type_name) = self.get_obj_data(input_assemblySet_refs[set_id], 'assemblySet')
if type_name != 'AssemblySet':
raise ValueError("Bad Type: Should be AssemblySet instead of '" + type_name + "'")
input_assemblySet_names[set_id] = this_assemblySet_obj_name
AssemblySet[set_id] = this_assemblySet
logMsg = 'assemblies in input set {} - {}: {}'.format(set_id,
this_assemblySet_obj_name,
len(AssemblySet[set_id]['items']))
self.log(console, logMsg)
report += logMsg+"\n"
# Store A and B assemblies
#
assembly_obj_present = dict()
assembly_obj_present['A'] = dict()
assembly_obj_present['B'] = dict()
assembly_ref_to_standardized = dict() # must use standardized assembly_refs
for set_id in ['A','B']:
new_items = []
for item in AssemblySet[set_id]['items']:
standardized_assembly_refs = []
this_assembly_ref = item['ref']
if this_assembly_ref in assembly_ref_to_standardized:
standardized_assembly_ref_noVer = assembly_ref_to_standardized[this_assembly_ref]
else: # get standardized assembly_ref
(assembly_obj_info,
assembly_obj_name,
assembly_obj_type) = self.get_obj_info(this_assembly_ref, 'assembly', full_type=True)
acceptable_types = ["KBaseGenomeAnnotations.Assembly"]
if assembly_obj_type not in acceptable_types:
raise ValueError("Input Assembly of type: '" + assembly_obj_type +
"'. Must be one of " + ", ".join(acceptable_types))
standardized_assembly_ref_noVer = '{}/{}'.format(assembly_obj_info[WSID_I],
assembly_obj_info[OBJID_I])
assembly_ref_to_standardized[this_assembly_ref] = standardized_assembly_ref_noVer
standardized_assembly_refs.append(standardized_assembly_ref_noVer) # standardize list
assembly_obj_present[set_id][standardized_assembly_ref_noVer] = True
new_items.append({'ref':standardized_assembly_ref_noVer,'label':item['label']})
self.log(console,"Set {} contains {}".format(set_id,standardized_assembly_ref_noVer))
AssemblySet[set_id]['items'] = new_items
# Build sliced AssemblySet
#
self.log (console, "BUILDING SLICED ASSEMBLYSET")
output_items = []
if params['operator'] == 'yesA_yesB' or params['operator'] == 'yesA_noB':
input_items = AssemblySet['A']['items']
fwd_set_id = 'A'
rev_set_id = 'B'
else:
input_items = AssemblySet['B']['items']
fwd_set_id = 'B'
rev_set_id = 'A'
for item in input_items:
self.log (console, 'checking assembly {} from set {}'.format(item['ref'],fwd_set_id))
this_standardized_assembly_ref_noVer = item['ref']
if params['operator'] == 'yesA_yesB':
if assembly_obj_present[rev_set_id].get(this_standardized_assembly_ref_noVer):
self.log(console, "keeping assembly {}".format(item['ref']))
output_items.append(item)
else:
if not assembly_obj_present[rev_set_id].get(this_standardized_assembly_ref_noVer):
self.log(console, "keeping assembly {}".format(item['ref']))
output_items.append(item)
logMsg = 'assemblies in sliced output set: {}'.format(len(output_items))
self.log(console, logMsg)
# Save output AssemblySet
#
objects_created = []
if len(output_items) == 0:
report += 'no assemblies to output under operator '+params['operator']+"\n"
else:
if params.get('desc'):
output_desc = params['desc']
else:
output_desc = 'Venn slice '+params['operator']+' of AssemblySets '+input_assemblySet_names['A']+' and '+input_assemblySet_names['B']
output_assemblySet_obj = { 'description': output_desc,
'items': output_items
}
output_assemblySet_name = params['output_name']
try:
output_assemblySet_ref = self.setAPI_Client.save_assembly_set_v1 ({'workspace_name': params['workspace_name'],
'output_object_name': output_assemblySet_name,
'data': output_assemblySet_obj
})['set_ref']
except Exception as e:
raise ValueError('SetAPI FAILURE: Unable to save assembly set object to workspace: (' + params['workspace_name']+")\n" + str(e))
# build output report object
self.log(console, "BUILDING REPORT")
if len(output_items) > 0:
self.log(console, "assemblies in output set " + params['output_name'] + ": "
+ str(len(output_items)))
report += 'assemblies in output set ' + params['output_name'] + ': '
report += str(len(output_items)) + "\n"
reportObj = {
'objects_created': objects_created,
'text_message': report
}
else:
reportObj = {
'objects_created': [],
'text_message': report
}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console, "KButil_Logical_Slice_Two_AssemblySets DONE")
#END KButil_Logical_Slice_Two_AssemblySets
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Logical_Slice_Two_AssemblySets return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Logical_Slice_Two_GenomeSets(self, ctx, params):
"""
:param params: instance of type
"KButil_Logical_Slice_Two_GenomeSets_Params"
(KButil_Logical_Slice_Two_GenomeSets() ** ** Method for Slicing
Two AssemblySets by Venn overlap) -> structure: parameter
"workspace_name" of type "workspace_name" (** The workspace object
refs are of form: ** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_genomeSet_ref_A" of type "data_obj_ref",
parameter "input_genomeSet_ref_B" of type "data_obj_ref",
parameter "operator" of String, parameter "desc" of String,
parameter "output_name" of type "data_obj_name"
:returns: instance of type
"KButil_Logical_Slice_Two_GenomeSets_Output" -> structure:
parameter "report_name" of type "data_obj_name", parameter
"report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Logical_Slice_Two_GenomeSets
console = []
invalid_msgs = []
self.log(console, 'Running Logical_Slice_Two_GenomeSets with params=')
self.log(console, "\n" + pformat(params))
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
logMsg = ''
report = ''
# check params
required_params = ['workspace_name',
'operator',
'input_genomeSet_ref_A',
'input_genomeSet_ref_B',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Sliced GenomeSet'
# Get GenomeSets
#
GenomeSet_element_refs = dict()
input_genomeSet_refs = dict()
input_genomeSet_refs['A'] = params['input_genomeSet_ref_A']
input_genomeSet_refs['B'] = params['input_genomeSet_ref_B']
input_genomeSet_names = dict()
for set_id in ['A','B']:
(this_genomeSet,
info,
this_genomeSet_obj_name,
type_name) = self.get_obj_data(input_genomeSet_refs[set_id], 'genomeSet')
input_genomeSet_names[set_id] = this_genomeSet_obj_name;
if type_name != 'GenomeSet':
raise ValueError("Bad Type: Should be GenomeSet instead of '" + type_name + "'")
GenomeSet_element_refs[set_id] = []
for genome_id in sorted(this_genomeSet['elements'].keys()):
GenomeSet_element_refs[set_id].append(this_genomeSet['elements'][genome_id]['ref'])
logMsg = 'genomes in input set {} - {}: {}'.format(set_id,
this_genomeSet_obj_name,
len(GenomeSet_element_refs[set_id]))
self.log(console, logMsg)
report += logMsg+"\n"
# Store A and B genome + fid hits
#
genome_obj_present = dict()
genome_obj_present['A'] = dict()
genome_obj_present['B'] = dict()
genome_ref_to_standardized = dict() # must use standardized genome_refs
for set_id in ['A','B']:
new_element_refs = []
for this_genome_ref in GenomeSet_element_refs[set_id]:
standardized_genome_refs = []
if this_genome_ref in genome_ref_to_standardized:
standardized_genome_ref_noVer = genome_ref_to_standardized[this_genome_ref]
else: # get standardized genome_ref
(genome_obj_info,
genome_obj_name,
genome_obj_type) = self.get_obj_info(this_genome_ref, 'genome', full_type=True)
acceptable_types = ["KBaseGenomes.Genome","KBaseGenomeAnnotations.GenomeAnnotation"]
if genome_obj_type not in acceptable_types:
raise ValueError("Input Genome of type: '" + genome_obj_type +
"'. Must be one of " + ", ".join(acceptable_types))
standardized_genome_ref_noVer = '{}/{}'.format(genome_obj_info[WSID_I],
genome_obj_info[OBJID_I])
genome_ref_to_standardized[this_genome_ref] = standardized_genome_ref_noVer
standardized_genome_refs.append(standardized_genome_ref_noVer) # standardize list
genome_obj_present[set_id][standardized_genome_ref_noVer] = True
new_element_refs.append(standardized_genome_ref_noVer)
self.log(console,"Set {} contains {}".format(set_id,standardized_genome_ref_noVer))
GenomeSet_element_refs[set_id] = new_element_refs
# Build sliced GenomeSet
#
self.log (console, "BUILDING SLICED GENOMESET")
output_items = []
if params['operator'] == 'yesA_yesB' or params['operator'] == 'yesA_noB':
input_element_refs = GenomeSet_element_refs['A']
fwd_set_id = 'A'
rev_set_id = 'B'
else:
input_element_refs = GenomeSet_element_refs['B']
fwd_set_id = 'B'
rev_set_id = 'A'
for this_standardized_genome_ref_noVer in input_element_refs:
self.log (console, 'checking set {} genome {}'.format(set_id,this_standardized_genome_ref_noVer))
if params['operator'] == 'yesA_yesB':
if genome_obj_present[rev_set_id].get(this_standardized_genome_ref_noVer):
output_items.append(this_standardized_genome_ref_noVer)
self.log(console, "keeping genome {}".format(this_standardized_genome_ref_noVer))
else:
if not genome_obj_present[rev_set_id].get(this_standardized_genome_ref_noVer):
output_items.append(this_standardized_genome_ref_noVer)
self.log(console, "keeping genome {}".format(this_standardized_genome_ref_noVer))
logMsg = 'genomes in sliced output set: {}'.format(len(output_items))
self.log(console, logMsg)
# Save output GenomeSet
#
objects_created = []
# set provenance
input_ws_obj_refs = [input_genomeSet_refs['A'], input_genomeSet_refs['B']]
provenance = self.set_provenance(ctx, input_ws_obj_refs, 'kb_SetUtilities', 'KButil_Logical_Slice_Two_GenomeSets')
if len(output_items) == 0:
report += 'no genomes to output under operator '+params['operator']+"\n"
else:
# KBaseSearch.GenomeSet form is a dict of elements, not a list of items
output_elements = dict();
for genome_ref in sorted(output_items):
output_elements[genome_ref] = {'ref':genome_ref}
if params.get('desc'):
output_desc = params['desc']
else:
output_desc = 'Venn slice '+params['operator']+' of GenomeSets '+input_genomeSet_names['A']+' and '+input_genomeSet_names['B']
output_genomeSet_obj = { 'description': output_desc,
'elements': output_elements
}
output_genomeSet_name = params['output_name']
new_obj_info = self.wsClient.save_objects({'workspace': params['workspace_name'],
'objects': [{
'type': 'KBaseSearch.GenomeSet',
'data': output_genomeSet_obj,
'name': output_genomeSet_name,
'meta': {},
'provenance': provenance}]})[0]
objects_created.append({'ref': params['workspace_name'] + '/' + output_genomeSet_name,
'description': output_desc})
# build output report object
self.log(console, "BUILDING REPORT")
if len(output_items) > 0:
self.log(console, "assemblies in output set " + params['output_name'] + ": "
+ str(len(output_items)))
report += 'genomes in output set ' + params['output_name'] + ': '
report += str(len(output_items)) + "\n"
reportObj = {
'objects_created': objects_created,
'text_message': report
}
else:
reportObj = {
'objects_created': [],
'text_message': report
}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console, "KButil_Logical_Slice_Two_GenomeSets DONE")
#END KButil_Logical_Slice_Two_GenomeSets
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Logical_Slice_Two_GenomeSets return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Merge_GenomeSets(self, ctx, params):
"""
:param params: instance of type "KButil_Merge_GenomeSets_Params"
(KButil_Merge_GenomeSets() ** ** Method for merging GenomeSets)
-> structure: parameter "workspace_name" of type "workspace_name"
(** The workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Merge_GenomeSets_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Merge_GenomeSets
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
console = []
invalid_msgs = []
self.log(console, 'Running KButil_Merge_GenomeSets with params=')
self.log(console, "\n" + pformat(params))
report = ''
# check params
required_params = ['workspace_name',
'input_refs',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Merged GenomeSet'
# clean input_refs
clean_input_refs = []
for ref in params['input_refs']:
if ref is not None and ref != '' and ref not in clean_input_refs:
clean_input_refs.append(ref)
params['input_refs'] = clean_input_refs
if len(params['input_refs']) < 2:
self.log(console, "Must provide at least two GenomeSets")
self.log(invalid_msgs, "Must provide at least two GenomeSets")
# set provenance
self.log(console, "SETTING PROVENANCE")
input_ws_obj_refs = params['input_refs']
provenance = self.set_provenance(ctx, input_ws_obj_refs, 'kb_SetUtilities', 'KButil_Merge_GenomeSets')
# Build GenomeSet
#
elements = dict()
# Add Genomes from GenomeSets
for input_genomeset_ref in params['input_refs']:
(genomeSet,
info,
this_genomeSet_obj_name,
type_name) = self.get_obj_data(input_genomeset_ref, 'genomeSet')
if type_name != 'GenomeSet':
raise ValueError("Bad Type: Should be GenomeSet instead of '" + type_name + "'")
for gId in list(genomeSet['elements'].keys()):
old_genomeRef = genomeSet['elements'][gId]['ref']
(this_obj_info,
this_obj_name,
this_obj_type) = self.get_obj_info(old_genomeRef, 'genome')
standardized_genomeRef = self.get_obj_ref_from_obj_info_noVer(this_obj_info)
new_gId = standardized_genomeRef
if not elements.get(new_gId):
elements[new_gId] = dict()
elements[new_gId]['ref'] = standardized_genomeRef # the key line
self.log(console, "adding element " + new_gId + " : " + standardized_genomeRef)
# Store output object
#
if len(invalid_msgs) == 0:
self.log(console, "SAVING GENOMESET")
output_GenomeSet = {'description': params['desc'],
'elements': elements
}
new_obj_info = self.wsClient.save_objects({'workspace': params['workspace_name'],
'objects': [{'type': 'KBaseSearch.GenomeSet',
'data': output_GenomeSet,
'name': params['output_name'],
'meta': {},
'provenance': provenance
}]
})[0]
# build output report object
self.log(console, "BUILDING REPORT")
if len(invalid_msgs) == 0:
self.log(console, "genomes in output set " + params['output_name'] + ": " +
str(len(list(elements.keys()))))
report += 'genomes in output set ' + params['output_name'] + ': '
report += str(len(list(elements.keys()))) + "\n"
ref = params['workspace_name'] + '/' + params['output_name']
reportObj = {'objects_created': [{'ref': ref,
'description': 'KButil_Merge_GenomeSets'}],
'text_message': report
}
else:
report += "FAILURE:\n\n" + "\n".join(invalid_msgs) + "\n"
reportObj = {'objects_created': [],
'text_message': report
}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console, "KButil_Merge_GenomeSets DONE")
#END KButil_Merge_GenomeSets
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Merge_GenomeSets return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Build_GenomeSet(self, ctx, params):
"""
:param params: instance of type "KButil_Build_GenomeSet_Params"
(KButil_Build_GenomeSet() ** ** Method for creating a GenomeSet)
-> structure: parameter "workspace_name" of type "workspace_name"
(** The workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Build_GenomeSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Build_GenomeSet
console = []
invalid_msgs = []
self.log(console, 'Running KButil_Build_GenomeSet with params=')
self.log(console, "\n" + pformat(params))
report = ''
# check params
required_params = ['workspace_name',
'input_refs',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Built GenomeSet'
# clean input_refs
clean_input_refs = []
for ref in params['input_refs']:
if ref is not None and ref != '' and ref not in clean_input_refs:
clean_input_refs.append(ref)
params['input_refs'] = clean_input_refs
if len(params['input_refs']) < 1:
self.log(console, "Must provide at least one Genome")
self.log(invalid_msgs, "Must provide at least one Genome")
# Build GenomeSet
#
elements = {}
genome_seen = dict()
for genomeRef in params['input_refs']:
if not genome_seen.get(genomeRef):
genome_seen[genomeRef] = True
(genomeObj,
info,
obj_name,
type_name) = self.get_obj_data(genomeRef, 'genome')
if type_name != 'Genome' and type_name != 'GenomeAnnotation':
errMsg = "Bad Type: Should be Genome or GenomeAnnotation not '{}' for ref: '{}'"
raise ValueError(errMsg.format(type_name, genomeRef))
if type_name == 'Genome':
genome_id = genomeObj['id']
else:
genome_id = genomeObj['genome_annotation_id']
genome_sci_name = genomeObj['scientific_name']
if genomeRef not in list(elements.keys()):
elements[genomeRef] = dict()
elements[genomeRef]['ref'] = genomeRef # the key line
self.log(console, "adding element {} ({}) aka ({}): {}".format(obj_name,
genome_sci_name,
genome_id,
genomeRef))
# set provenance
self.log(console, "SETTING PROVENANCE")
input_ws_obj_refs = params['input_refs']
provenance = self.set_provenance(ctx, input_ws_obj_refs, 'kb_SetUtilities', 'KButil_Build_GenomeSet')
# Store output object
if len(invalid_msgs) == 0:
self.log(console, "SAVING GENOMESET")
output_GenomeSet = {'description': params['desc'],
'elements': elements}
new_obj_info = self.wsClient.save_objects({'workspace': params['workspace_name'],
'objects': [{'type': 'KBaseSearch.GenomeSet',
'data': output_GenomeSet,
'name': params['output_name'],
'meta': {},
'provenance': provenance}]})[0]
# build output report object
#
self.log(console, "BUILDING REPORT")
if len(invalid_msgs) == 0:
self.log(console, "genomes in output set " + params['output_name'] +
": " + str(len(list(elements.keys()))))
report += 'genomes in output set ' + params['output_name'] + ': '
report += str(len(list(elements.keys()))) + "\n"
reportObj = {
'objects_created': [{'ref': params['workspace_name'] + '/' + params['output_name'],
'description':'KButil_Build_GenomeSet'}],
'text_message': report
}
else:
report += "FAILURE:\n\n" + "\n".join(invalid_msgs) + "\n"
reportObj = {'objects_created': [],
'text_message': report}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console, "KButil_Build_GenomeSet DONE")
#END KButil_Build_GenomeSet
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Build_GenomeSet return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Build_GenomeSet_from_FeatureSet(self, ctx, params):
"""
:param params: instance of type
"KButil_Build_GenomeSet_from_FeatureSet_Params"
(KButil_Build_GenomeSet_from_FeatureSet() ** ** Method for
obtaining a GenomeSet from a FeatureSet) -> structure: parameter
"workspace_name" of type "workspace_name" (** The workspace object
refs are of form: ** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_ref" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type
"KButil_Build_GenomeSet_from_FeatureSet_Output" -> structure:
parameter "report_name" of type "data_obj_name", parameter
"report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Build_GenomeSet_from_FeatureSet
console = []
invalid_msgs = []
self.log(console, 'Running KButil_Build_GenomeSet_from_FeatureSet with params=')
self.log(console, "\n" + pformat(params))
report = ''
# check params
required_params = ['workspace_name',
'input_ref',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Built GenomeSet'
# Obtain FeatureSet
(featureSet,
info,
obj_name,
type_name) = self.get_obj_data(params['input_ref'], 'featureSet')
if type_name != 'FeatureSet':
raise ValueError("Bad Type: Should be FeatureSet instead of '" + type_name + "'")
# Build GenomeSet
elements = {}
genome_seen = dict()
for fId in list(featureSet['elements'].keys()):
for genomeRef in featureSet['elements'][fId]:
if not genome_seen.get(genomeRef):
genome_seen[genomeRef] = True
(genomeObj,
info,
obj_name,
type_name) = self.get_obj_data(genomeRef, 'genome')
if type_name == 'AnnotatedMetagenomeAssembly':
self.log(console, "SKIPPING AnnotatedMetagenomeAssembly Object "+obj_name)
continue
elif type_name != 'Genome' and type_name != 'GenomeAnnotaton':
errMsg = "Bad Type: Should be Genome or GenomeAnnotation instead"
errMsg += " of '{}' for ref: '{}'"
raise ValueError(errMsg.format(type_name, genomeRef))
if type_name == 'Genome':
genome_id = genomeObj['id']
else:
genome_id = genomeObj['genome_annotation_id']
genome_sci_name = genomeObj['scientific_name']
#if not genome_id in elements.keys():
# elements[genome_id] = dict()
#elements[genome_id]['ref'] = genomeRef # the key line
if genomeRef not in list(elements.keys()):
elements[genomeRef] = dict()
elements[genomeRef]['ref'] = genomeRef # the key line
self.log(console, "adding element {} ({}/{}) : {}".format(obj_name,
genome_sci_name,
genome_id,
genomeRef))
# set provenance
self.log(console, "SETTING PROVENANCE")
input_ws_obj_refs = [params['input_ref']]
provenance = self.set_provenance(ctx, input_ws_obj_refs, 'kb_SetUtilities', 'KButil_Build_GenomeSet_from_FeatureSet')
# Store output object
#
if len(invalid_msgs) == 0:
self.log(console, "SAVING GENOMESET")
output_GenomeSet = {'description': params['desc'],
'elements': elements}
new_obj_info = self.wsClient.save_objects({'workspace': params['workspace_name'],
'objects': [{'type': 'KBaseSearch.GenomeSet',
'data': output_GenomeSet,
'name': params['output_name'],
'meta': {},
'provenance': provenance}]})[0]
# build output report object
#
self.log(console, "BUILDING REPORT")
if len(invalid_msgs) == 0:
self.log(console, "genomes in output set " + params['output_name'] + ": " +
str(len(list(elements.keys()))))
report += 'genomes in output set {}:{}\n'.format(params['output_name'],
len(list(elements.keys())))
ref = "{}/{}".format(params['workspace_name'], params['output_name'])
reportObj = {'objects_created': [{'ref': ref,
'description': 'KButil_Build_GenomeSet_from_FeatureSet'}],
'text_message': report}
else:
report += "FAILURE:\n\n" + "\n".join(invalid_msgs) + "\n"
reportObj = {'objects_created': [],
'text_message': report}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console, "KButil_Build_GenomeSet_from_FeatureSet DONE")
#END KButil_Build_GenomeSet_from_FeatureSet
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Build_GenomeSet_from_FeatureSet return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Add_Genomes_to_GenomeSet(self, ctx, params):
"""
:param params: instance of type
"KButil_Add_Genomes_to_GenomeSet_Params"
(KButil_Add_Genomes_to_GenomeSet() ** ** Method for adding a
Genome to a GenomeSet) -> structure: parameter "workspace_name" of
type "workspace_name" (** The workspace object refs are of form:
** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_genome_refs" of list of type "data_obj_ref",
parameter "input_genomeset_ref" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Add_Genomes_to_GenomeSet_Output"
-> structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Add_Genomes_to_GenomeSet
# init
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
console = []
invalid_msgs = []
self.log(console, 'Running KButil_Add_Genomes_to_GenomeSet with params=')
self.log(console, "\n" + pformat(params))
report = ''
# check params
required_params = ['workspace_name',
'input_genome_refs',
'input_genomeset_ref',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Increased GenomeSet'
# Build GenomeSet
elements = dict()
query_genome_ref_order = []
# add old GenomeSet
#
if 'input_genomeset_ref' in params and params['input_genomeset_ref'] is not None:
(genomeSet,
info,
obj_name,
type_name) = self.get_obj_data(params['input_genomeset_ref'], 'genomeSet')
if type_name != 'GenomeSet':
raise ValueError("Bad Type: Should be GenomeSet instead of '" + type_name + "'")
for gId in list(genomeSet['elements'].keys()):
genomeRef = genomeSet['elements'][gId]['ref']
if not elements.get(genomeRef):
elements[genomeRef] = dict()
elements[genomeRef]['ref'] = genomeRef # the key line
self.log(console, "adding element " + gId + " : " + genomeRef)
query_genome_ref_order.append(genomeRef)
# add new genomes
#
genomeSet_obj_types = ["KBaseSearch.GenomeSet", "KBaseSets.GenomeSet"]
genome_obj_types = ["KBaseGenomes.Genome", "KBaseGenomeAnnotations.Genome"]
tree_obj_types = ["KBaseTrees.Tree"]
for input_ref in params['input_genome_refs']:
(query_genome_obj_data,
query_genome_obj_info,
query_genome_obj_name,
query_genome_obj_type) = self.get_obj_data(input_ref, 'genome or genomeSet', full_type=True)
# just a genome
if query_genome_obj_type in genome_obj_types:
if input_ref not in elements:
elements[input_ref] = dict()
elements[input_ref]['ref'] = input_ref # the key line
self.log(console, "adding element " + input_ref)
query_genome_ref_order.append(input_ref)
# handle genomeSet
elif query_genome_obj_type in genomeSet_obj_types:
for genome_id in sorted(query_genome_obj_data['elements'].keys()):
genome_ref = query_genome_obj_data['elements'][genome_id]['ref']
if genome_ref not in elements:
elements[genome_ref] = dict()
elements[genome_ref]['ref'] = genome_ref # the key line
self.log(console, "adding element " + genome_ref)
query_genome_ref_order.append(genome_ref)
# handle tree type
elif query_genome_obj_type in tree_obj_types:
for genome_id in sorted(query_genome_obj_data['ws_refs'].keys()):
genome_ref = query_genome_obj_data['ws_refs'][genome_id]['g'][0]
if genome_ref not in elements:
elements[genome_ref] = dict()
elements[genome_ref]['ref'] = genome_ref # the key line
self.log(console, "adding element " + genome_ref)
query_genome_ref_order.append(genome_ref)
else:
raise ValueError ("bad type for input_genome_refs")
# set provenance
self.log(console, "SETTING PROVENANCE")
input_ws_obj_refs = [params['input_genomeset_ref']]
input_ws_obj_refs.extend(params['input_genome_refs'])
provenance = self.set_provenance(ctx, input_ws_obj_refs, 'kb_SetUtilities', 'KButil_Add_Genomes_to_GenomeSet')
# Store output object
#
if len(invalid_msgs) == 0:
self.log(console, "SAVING GENOMESET")
output_GenomeSet = {'description': params['desc'],
'elements': elements}
new_obj_info = self.wsClient.save_objects({'workspace': params['workspace_name'],
'objects': [{
'type': 'KBaseSearch.GenomeSet',
'data': output_GenomeSet,
'name': params['output_name'],
'meta': {},
'provenance': provenance}]})[0]
# build output report object
self.log(console, "BUILDING REPORT")
if len(invalid_msgs) == 0:
self.log(console, "genomes in output set " + params['output_name'] + ": " +
str(len(list(elements.keys()))))
report += 'genomes in output set ' + params['output_name'] + ': '
report += str(len(list(elements.keys()))) + "\n"
reportObj = {
'objects_created': [{'ref': params['workspace_name'] + '/' + params['output_name'],
'description':'KButil_Add_Genomes_to_GenomeSet'}],
'text_message': report}
else:
report += "FAILURE:\n\n" + "\n".join(invalid_msgs) + "\n"
reportObj = {'objects_created': [],
'text_message': report}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console, "KButil_Add_Genomes_to_GenomeSet DONE")
#END KButil_Add_Genomes_to_GenomeSet
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Add_Genomes_to_GenomeSet return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Remove_Genomes_from_GenomeSet(self, ctx, params):
"""
:param params: instance of type
"KButil_Remove_Genomes_from_GenomeSet_Params"
(KButil_Remove_Genomes_from_GenomeSet() ** ** Method for removing
Genomes from a GenomeSet) -> structure: parameter "workspace_name"
of type "workspace_name" (** The workspace object refs are of
form: ** ** objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_genome_refs" of list of type "data_obj_ref",
parameter "nonlocal_genome_names" of list of type "data_obj_name",
parameter "input_genomeset_ref" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type
"KButil_Remove_Genomes_from_GenomeSet_Output" -> structure:
parameter "report_name" of type "data_obj_name", parameter
"report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Remove_Genomes_from_GenomeSet
# init
console = []
invalid_msgs = []
self.log(console, 'Running KButil_Remove_Genomes_from_GenomeSet with params=')
self.log(console, "\n" + pformat(params))
report = ''
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
# check params
required_params = ['workspace_name',
'input_genomeset_ref',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Reduced GenomeSet'
if not params.get('input_genome_refs') and \
not params.get('nonlocal_genome_names'):
raise ValueError('must define either Local genomes or Non-local genomes to remove')
# read orig GenomeSet
#
genomeSet_workspace = None
if 'input_genomeset_ref' in params and params['input_genomeset_ref'] is not None:
(genomeSet,
info,
obj_name,
type_name) = self.get_obj_data(params['input_genomeset_ref'], 'genomeSet')
if type_name != 'GenomeSet':
raise ValueError("Bad Type: Should be GenomeSet instead of '" + type_name + "'")
genomeSet_workspace = info[WORKSPACE_I]
# Build list of genome refs (without version) to skip.
# Note: standardize to workspace_name and obj_id
skip_genomes_by_ref = dict()
nonlocal_skip_genome_refs = []
if params.get('input_genome_refs'):
for genomeRef in params['input_genome_refs']:
(this_obj_info,
this_obj_name,
this_obj_type) = self.get_obj_info(genomeRef, 'genome')
standardized_genomeRef = self.get_obj_ref_from_obj_info_noVer(this_obj_info)
skip_genomes_by_ref[standardized_genomeRef] = True
if params.get('nonlocal_genome_names'):
for gId in list(genomeSet['elements'].keys()):
genomeRef = genomeSet['elements'][gId]['ref']
(genome_obj_info,
this_genome_objname,
type_name) = self.get_obj_info(genomeRef, 'genome')
this_genome_workspace = genome_obj_info[WORKSPACE_I]
standardized_genomeRef = self.get_obj_ref_from_obj_info_noVer(genome_obj_info)
if this_genome_workspace != genomeSet_workspace \
and this_genome_objname in params['nonlocal_genome_names']:
skip_genomes_by_ref[standardized_genomeRef] = True
nonlocal_skip_genome_refs.append(standardized_genomeRef)
# build new genome set without skip genomes
elements = dict()
for gId in list(genomeSet['elements'].keys()):
genomeRef = genomeSet['elements'][gId]['ref']
(this_obj_info,
this_genome_obj_name,
this_genome_obj_type) = self.get_obj_info(genomeRef, 'genome')
standardized_genomeRef = self.get_obj_ref_from_obj_info_noVer(this_obj_info)
# this is where they are removed
if not skip_genomes_by_ref.get(standardized_genomeRef):
elements[gId] = dict()
elements[gId]['ref'] = genomeRef # the key line
self.log(console, "keeping element " + gId + " : " + genomeRef)
else:
self.log(console, "removing element " + gId + " : " + genomeRef)
# set provenance
self.log(console, "SETTING PROVENANCE")
input_ws_obj_refs = [params['input_genomeset_ref']]
input_ws_obj_refs.extend(params['input_genome_refs'])
provenance = self.set_provenance(ctx, input_ws_obj_refs, 'kb_SetUtilities', 'KButil_Remove_Genomes_from_GenomeSet')
# Store output object
#
if len(invalid_msgs) == 0:
self.log(console, "SAVING GENOMESET")
output_GenomeSet = {'description': params['desc'],
'elements': elements}
new_obj_info = self.wsClient.save_objects({'workspace': params['workspace_name'],
'objects': [{
'type': 'KBaseSearch.GenomeSet',
'data': output_GenomeSet,
'name': params['output_name'],
'meta': {},
'provenance': provenance}]})[0]
# build output report object
self.log(console, "BUILDING REPORT")
if len(invalid_msgs) == 0:
self.log(console, "genomes in output set " + params['output_name'] + ": " +
str(len(list(elements.keys()))))
report += 'genomes in output set ' + params['output_name'] + ': '
report += str(len(list(elements.keys()))) + "\n"
reportObj = {
'objects_created': [{'ref': params['workspace_name'] + '/' + params['output_name'],
'description':'KButil_Remove_Genomes_from_GenomeSet'}],
'text_message': report}
else:
report += "FAILURE:\n\n" + "\n".join(invalid_msgs) + "\n"
reportObj = {'objects_created': [],
'text_message': report}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console, "KButil_Remove_Genomes_from_GenomeSet DONE")
#END KButil_Remove_Genomes_from_GenomeSet
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Remove_Genomes_from_GenomeSet return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Build_ReadsSet(self, ctx, params):
"""
:param params: instance of type "KButil_Build_ReadsSet_Params"
(KButil_Build_ReadsSet() ** ** Method for creating a ReadsSet) ->
structure: parameter "workspace_name" of type "workspace_name" (**
The workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Build_ReadsSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Build_ReadsSet
console = []
invalid_msgs = []
self.log(console,'Running KButil_Build_ReadsSet with params=')
self.log(console, "\n" + pformat(params))
report = ''
# check params
required_params = ['workspace_name',
'input_refs',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Built ReadsSet'
# clean input_refs
clean_input_refs = []
for ref in params['input_refs']:
if ref is not None and ref != '' and ref not in clean_input_refs:
clean_input_refs.append(ref)
params['input_refs'] = clean_input_refs
if len(params['input_refs']) < 1:
self.log(console,"Must provide at least one Reads Lib")
self.log(invalid_msgs,"Must provide at least one Reads Lib")
# Build ReadsSet
#
items = []
lib_seen = dict()
set_type = None
for libRef in params['input_refs']:
if not lib_seen.get(libRef):
lib_seen[libRef] = True
(libObj,
info,
lib_name,
lib_type) = self.get_obj_data(libRef, 'reads library')
if set_type is None:
set_type = lib_type
elif lib_type != set_type:
raise ValueError("Don't currently support heterogeneous ReadsSets"+
" (e.g. PairedEndLibrary and SingleEndLibrary)." +
" You have more than one type in your input")
if lib_type != 'SingleEndLibrary' and lib_type != 'PairedEndLibrary':
errMsg = "Bad Type: Should be SingleEndLibrary or PairedEndLibrary instead of "
errMsg += "'{}' for ref: '{}'"
raise ValueError(errMsg.format(lib_type, libRef))
# add lib
self.log(console, "adding lib " + lib_name + " : " + libRef)
items.append({'ref': libRef, 'label': lib_name})
# Store output object
#
if len(invalid_msgs) == 0:
self.log(console, "SAVING READS_SET")
output_readsSet_obj = {'description': params['desc'],
'items': items}
output_readsSet_name = params['output_name']
try:
rSet_ref = self.setAPI_Client.save_reads_set_v1(
{'workspace_name': params['workspace_name'],
'output_object_name': output_readsSet_name,
'data': output_readsSet_obj})['set_ref']
except Exception as e:
errMsg = 'SetAPI Error: Unable to save read library set obj to workspace: ({})\n{}'
raise ValueError(errMsg.format(params['workspace_name'], str(e)))
# build output report object
#
self.log(console, "SAVING REPORT")
if len(invalid_msgs) == 0:
self.log(console, "reads libs in output set " + params['output_name'] + ": " +
str(len(params['input_refs'])))
report += 'reads libs in output set ' + params['output_name'] + ': ' + str(
len(params['input_refs']))
reportObj = {
'objects_created': [{'ref': params['workspace_name'] + '/' + params['output_name'],
'description': 'KButil_Build_ReadsSet'}],
'text_message': report}
else:
report += "FAILURE:\n\n"+"\n".join(invalid_msgs)+"\n"
reportObj = {'objects_created': [], 'text_message': report}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console, "KButil_Build_ReadsSet DONE")
#END KButil_Build_ReadsSet
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Build_ReadsSet return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Merge_MultipleReadsSets_to_OneReadsSet(self, ctx, params):
"""
:param params: instance of type
"KButil_Merge_MultipleReadsSets_to_OneReadsSet_Params"
(KButil_Merge_MultipleReadsSets_to_OneReadsSet() ** ** Method for
merging multiple ReadsSets into one ReadsSet) -> structure:
parameter "workspace_name" of type "workspace_name" (** The
workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type
"KButil_Merge_MultipleReadsSets_to_OneReadsSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Merge_MultipleReadsSets_to_OneReadsSet
console = []
report = ''
self.log(console, 'Running KButil_Merge_MultipleReadsSets_to_OneReadsSet with parameters: ')
self.log(console, "\n"+pformat(params))
# check params
required_params = ['workspace_name',
'input_refs',
'output_name'
]
self.check_params (params, required_params)
# clean input_refs
clean_input_refs = []
for ref in params['input_refs']:
if ref is not None and ref != '' and ref not in clean_input_refs:
clean_input_refs.append(ref)
params['input_refs'] = clean_input_refs
if len(params['input_refs']) < 2:
self.log(console,"Must provide at least two ReadsSets")
self.log(invalid_msgs,"Must provide at least two ReadsSets")
# init output object fields and SetAPI
combined_readsSet_ref_list = []
combined_readsSet_name_list = []
combined_readsSet_label_list = []
# Iterate through list of ReadsSets
#
reads_lib_type = None
reads_lib_ref_seen = dict()
accepted_libs = []
repeat_libs = []
for set_i,this_readsSet_ref in enumerate(params['input_refs']):
accepted_libs.append([])
repeat_libs.append([])
(input_reads_obj_info,
input_reads_obj_name,
input_reads_obj_type) = self.get_obj_info(this_readsSet_ref, 'reads set', full_type=True)
acceptable_types = ["KBaseSets.ReadsSet"]
if input_reads_obj_type not in acceptable_types:
raise ValueError("Input reads of type: '" + input_reads_obj_type +
"'. Must be one of " + ", ".join(acceptable_types))
# iterate through read libraries in read set and add new ones to combined ReadsSet
try:
input_readsSet_obj = self.setAPI_Client.get_reads_set_v1({
'ref': this_readsSet_ref,
'include_item_info': 1})
except Exception as e:
raise ValueError('SetAPI Error: Unable to get read library set from workspace: (' +
this_readsSet_ref + ")\n" + str(e))
for readsLibrary_obj in input_readsSet_obj['data']['items']:
this_readsLib_ref = readsLibrary_obj['ref']
this_readsLib_label = readsLibrary_obj['label']
(this_readsLib_name, this_readsLib_type) = self.get_obj_name_and_type_from_obj_info (readsLibrary_obj['info'])
if reads_lib_type is None:
reads_lib_type = this_readsLib_type
elif this_readsLib_type != reads_lib_type:
raise ValueError ("inconsistent reads library types in ReadsSets. " +
"Must all be PairedEndLibrary or SingleEndLibrary to merge")
if this_readsLib_ref not in reads_lib_ref_seen:
reads_lib_ref_seen[this_readsLib_ref] = True
combined_readsSet_ref_list.append(this_readsLib_ref)
combined_readsSet_label_list.append(this_readsLib_label)
combined_readsSet_name_list.append(this_readsLib_name)
accepted_libs[set_i].append(this_readsLib_ref)
else:
repeat_libs[set_i].append(this_readsLib_ref)
# Save Merged ReadsSet
#
items = []
for lib_i,lib_ref in enumerate(combined_readsSet_ref_list):
items.append({'ref': lib_ref,
'label': combined_readsSet_label_list[lib_i]
#'data_attachment': ,
#'info':
})
output_readsSet_obj = { 'description': params['desc'],
'items': items
}
output_readsSet_name = params['output_name']
try:
output_readsSet_ref = self.setAPI_Client.save_reads_set_v1 ({'workspace_name': params['workspace_name'],
'output_object_name': output_readsSet_name,
'data': output_readsSet_obj
})['set_ref']
except Exception as e:
raise ValueError('SetAPI FAILURE: Unable to save read library set object to workspace: (' + params['workspace_name']+")\n" + str(e))
# build report
#
self.log (console, "SAVING REPORT")
report += "TOTAL READS LIBRARIES COMBINED INTO ONE READS SET: "+ str(len(combined_readsSet_ref_list))+"\n"
for set_i,this_readsLib_ref in enumerate(params['input_refs']):
report += "READS LIBRARIES ACCEPTED FROM ReadsSet "+str(set_i)+": "+str(len(accepted_libs[set_i]))+"\n"
report += "READS LIBRARIES REPEAT FROM ReadsSet "+str(set_i)+": "+str(len(repeat_libs[set_i]))+"\n"
report += "\n"
reportObj = {'objects_created':[],
'text_message': report}
reportObj['objects_created'].append({'ref':output_readsSet_ref,
'description':params['desc']})
# save report object
#
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console,"KButil_Merge_MultipleReadsSets_to_OneReadsSet DONE")
#END KButil_Merge_MultipleReadsSets_to_OneReadsSet
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Merge_MultipleReadsSets_to_OneReadsSet return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Build_AssemblySet(self, ctx, params):
"""
:param params: instance of type "KButil_Build_AssemblySet_Params"
(KButil_Build_AssemblySet() ** ** Method for creating an
AssemblySet) -> structure: parameter "workspace_name" of type
"workspace_name" (** The workspace object refs are of form: ** **
objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "input_refs" of type "data_obj_ref", parameter
"output_name" of type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Build_AssemblySet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Build_AssemblySet
console = []
invalid_msgs = []
self.log(console,'Running KButil_Build_AssemblySet with params=')
self.log(console, "\n"+pformat(params))
report = ''
# check params
required_params = ['workspace_name',
'input_refs',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Built AssemblySet'
# clean input_refs
clean_input_refs = []
for ref in params['input_refs']:
if ref is not None and ref != '' and ref not in clean_input_refs:
clean_input_refs.append(ref)
params['input_refs'] = clean_input_refs
if len(params['input_refs']) < 1:
self.log(console,"Must provide at least one Assembly")
self.log(invalid_msgs,"Must provide at least one Assembly")
# Build AssemblySet
#
items = []
ass_seen = dict()
set_type = None
for assRef in params['input_refs']:
if not ass_seen.get(assRef):
ass_seen[assRef] = True
(assObj,
info,
ass_name,
ass_type) = self.get_obj_data(assRef, 'assembly')
if set_type != None:
if ass_type != set_type:
raise ValueError ("Don't currently support heterogeneous AssemblySets. You have more than one type in your input")
set_type = ass_type
# add assembly
self.log(console,"adding assembly "+ass_name+" : "+assRef)
items.append ({'ref': assRef,
'label': ass_name
#'data_attachment': ,
#'info'
})
# Store output object
#
if len(invalid_msgs) == 0:
self.log(console,"SAVING ASSEMBLY_SET")
output_assemblySet_obj = { 'description': params['desc'],
'items': items
}
output_assemblySet_name = params['output_name']
try:
output_assemblySet_ref = self.setAPI_Client.save_assembly_set_v1 ({'workspace_name': params['workspace_name'],
'output_object_name': output_assemblySet_name,
'data': output_assemblySet_obj
})['set_ref']
except Exception as e:
raise ValueError('SetAPI FAILURE: Unable to save assembly set object to workspace: (' + params['workspace_name']+")\n" + str(e))
# build output report object
#
self.log(console,"SAVING REPORT")
if len(invalid_msgs) == 0:
self.log(console,"assembly objs in output set "+params['output_name']+": "+str(len(params['input_refs'])))
report += 'assembly objs in output set '+params['output_name']+': '+str(len(params['input_refs']))
reportObj = {
'objects_created':[{'ref':params['workspace_name']+'/'+params['output_name'], 'description':'KButil_Build_AssemblySet'}],
'text_message':report
}
else:
report += "FAILURE:\n\n"+"\n".join(invalid_msgs)+"\n"
reportObj = {
'objects_created':[],
'text_message':report
}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console,"KButil_Build_AssemblySet DONE")
#END KButil_Build_AssemblySet
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Build_AssemblySet return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Batch_Create_ReadsSet(self, ctx, params):
"""
:param params: instance of type "KButil_Batch_Create_ReadsSet_Params"
(KButil_Batch_Create_ReadsSet() ** ** Method for creating a
ReadsSet without specifying individual objects) -> structure:
parameter "workspace_name" of type "workspace_name" (** The
workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "name_pattern" of String, parameter "output_name" of
type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Batch_Create_ReadsSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Batch_Create_ReadsSet
#### STEP 0: standard method init
##
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
console = []
invalid_msgs = []
self.log(console,'Running KButil_Batch_Create_ReadsSet with params=')
self.log(console, "\n"+pformat(params))
report = ''
# check params
required_params = ['workspace_name',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Batch Created ReadsSet'
#### STEP 3: refine name_pattern
##
name_pattern = params.get('name_pattern')
if name_pattern:
name_pattern = name_pattern.strip()
name_pattern = name_pattern.strip('*')
name_pattern = name_pattern.replace('.','\.')
name_pattern = name_pattern.replace('*','.*')
regexp_name_pattern = re.compile ('^.*'+name_pattern+'.*$')
#### STEP 4: read ws for readslib objects
##
pe_reads_obj_ref_by_name = dict()
se_reads_obj_ref_by_name = dict()
reads_obj_ref_by_name = None
# Paired End
pe_reads_obj_info_list = self.get_obj_info_list_from_ws_name(params['workspace_name'],
'KBaseFile.PairedEndLibrary',
'Paired-End Reads Library')
for info in pe_reads_obj_info_list:
reads_ref = self.get_obj_ref_from_obj_info(info)
(reads_name, type_name) = self.get_obj_name_and_type_from_obj_info (info)
if name_pattern:
self.log(console, "NAME_PATTERN: '"+name_pattern+"' READS_NAME: '"+reads_name+"'")
if not name_pattern or regexp_name_pattern.match(reads_name):
self.log(console, "ADDING "+reads_name+" ("+reads_ref+")")
pe_reads_obj_ref_by_name[reads_name] = reads_ref
# Single End
se_reads_obj_info_list = self.get_obj_info_list_from_ws_name(params['workspace_name'],
'KBaseFile.SingleEndLibrary',
'Single-End Reads Library')
for info in se_reads_obj_info_list:
reads_ref = self.get_obj_ref_from_obj_info(info)
(reads_name, type_name) = self.get_obj_name_and_type_from_obj_info (info)
if name_pattern:
self.log(console, "NAME_PATTERN: '"+name_pattern+"' READS_NAME: '"+reads_name+"'")
if not name_pattern or regexp_name_pattern.match(reads_name):
self.log(console, "ADDING "+reads_name+" ("+reads_ref+")")
se_reads_obj_ref_by_name[reads_name] = reads_ref
# check for no hits
if len(list(pe_reads_obj_ref_by_name.keys())) == 0 \
and len(list(se_reads_obj_ref_by_name.keys())) == 0:
if not name_pattern:
self.log(invalid_msgs, "No Reads Library objects found")
else:
self.log(invalid_msgs, "No Reads Library objects passing name_pattern filter: '"+name_pattern+"'")
#### STEP 5: Build ReadsSet
##
if len(invalid_msgs) == 0:
items = []
reads_ref_list = []
# pick whether to use single end or paired end hits (favor paired end)
if len(list(pe_reads_obj_ref_by_name.keys())) == 0 \
and len(list(se_reads_obj_ref_by_name.keys())) != 0:
reads_obj_ref_by_name = se_reads_obj_ref_by_name
else:
reads_obj_ref_by_name = pe_reads_obj_ref_by_name
# add readslibs
for reads_name in sorted (reads_obj_ref_by_name.keys()):
reads_ref = reads_obj_ref_by_name[reads_name]
reads_ref_list.append (reads_ref)
self.log(console,"adding reads library "+reads_name+" : "+reads_ref)
items.append ({'ref': reads_ref,
'label': reads_name
#'data_attachment': ,
#'info'
})
#### STEP 6: Store output object
##
if len(invalid_msgs) == 0:
self.log(console,"SAVING READS_SET")
# object def
output_readsSet_obj = { 'description': params['desc'],
'items': items
}
output_readsSet_name = params['output_name']
# object save
try:
output_readsSet_ref = self.setAPI_Client.save_reads_set_v1 ({'workspace_name': params['workspace_name'],
'output_object_name': output_readsSet_name,
'data': output_readsSet_obj
})['set_ref']
except Exception as e:
raise ValueError('SetAPI FAILURE: Unable to save reads library set object to workspace: (' + params['workspace_name']+")\n" + str(e))
#### STEP 7: build output report object
##
self.log(console,"SAVING REPORT")
if len(invalid_msgs) != 0:
report += "\n".join(invalid_msgs)
reportObj = {
'objects_created':[],
'text_message':report
}
else:
self.log(console,"reads library objs in output set "+params['output_name']+": "+str(len(items)))
report += 'reads library objs in output set '+params['output_name']+': '+str(len(items))
desc = 'KButil_Batch_Create_ReadsSet'
if name_pattern:
desc += ' with name_pattern: '+name_pattern
reportObj = {
'objects_created':[{'ref':params['workspace_name']+'/'+params['output_name'], 'description':desc}],
'text_message':report
}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console,"KButil_Batch_Create_ReadsSet DONE")
#END KButil_Batch_Create_ReadsSet
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Batch_Create_ReadsSet return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Batch_Create_AssemblySet(self, ctx, params):
"""
:param params: instance of type
"KButil_Batch_Create_AssemblySet_Params"
(KButil_Batch_Create_AssemblySet() ** ** Method for creating an
AssemblySet without specifying individual objects) -> structure:
parameter "workspace_name" of type "workspace_name" (** The
workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "name_pattern" of String, parameter "output_name" of
type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Batch_Create_AssemblySet_Output"
-> structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Batch_Create_AssemblySet
#### STEP 0: standard method init
##
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
console = []
invalid_msgs = []
self.log(console,'Running KButil_Batch_Create_AssemblySet with params=')
self.log(console, "\n"+pformat(params))
report = ''
# check params
required_params = ['workspace_name',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Batch Created AssemblySet'
#### STEP 3: refine name_pattern
##
name_pattern = params.get('name_pattern')
if name_pattern:
name_pattern = name_pattern.strip()
name_pattern = name_pattern.strip('*')
name_pattern = name_pattern.replace('.','\.')
name_pattern = name_pattern.replace('*','.*')
regexp_name_pattern = re.compile ('^.*'+name_pattern+'.*$')
#### STEP 4: read ws for assembly objects
##
assembly_obj_ref_by_name = dict()
assembly_obj_info_list = self.get_obj_info_list_from_ws_name(params['workspace_name'],
'KBaseGenomeAnnotations.Assembly',
'Assembly')
for info in assembly_obj_info_list:
assembly_ref = self.get_obj_ref_from_obj_info(info)
(assembly_name, type_name) = self.get_obj_name_and_type_from_obj_info (info)
if name_pattern:
self.log(console, "NAME_PATTERN: '"+name_pattern+"' ASSEMBLY_NAME: '"+assembly_name+"'")
if not name_pattern or regexp_name_pattern.match(assembly_name):
self.log(console, "ADDING "+assembly_name+" ("+assembly_ref+")")
assembly_obj_ref_by_name[assembly_name] = assembly_ref
if len(list(assembly_obj_ref_by_name.keys())) == 0:
if not name_pattern:
self.log(invalid_msgs, "No Assembly objects found")
else:
self.log(invalid_msgs, "No Assembly objects passing name_pattern filter: '"+name_pattern+"'")
#### STEP 5: Build AssemblySet
##
if len(invalid_msgs) == 0:
items = []
assembly_ref_list = []
for ass_name in sorted (assembly_obj_ref_by_name.keys()):
# add assembly
ass_ref = assembly_obj_ref_by_name[ass_name]
assembly_ref_list.append (ass_ref)
self.log(console,"adding assembly "+ass_name+" : "+ass_ref)
items.append ({'ref': ass_ref,
'label': ass_name
#'data_attachment': ,
#'info'
})
#### STEP 6: Store output object
##
if len(invalid_msgs) == 0:
self.log(console,"SAVING ASSEMBLY_SET")
# object def
output_assemblySet_obj = { 'description': params['desc'],
'items': items
}
output_assemblySet_name = params['output_name']
# object save
try:
output_assemblySet_ref = self.setAPI_Client.save_assembly_set_v1 ({'workspace_name': params['workspace_name'],
'output_object_name': output_assemblySet_name,
'data': output_assemblySet_obj
})['set_ref']
except Exception as e:
raise ValueError('SetAPI FAILURE: Unable to save assembly set object to workspace: (' + params['workspace_name']+")\n" + str(e))
#### STEP 7: build output report object
##
self.log(console,"SAVING REPORT")
if len(invalid_msgs) != 0:
report += "\n".join(invalid_msgs)
reportObj = {
'objects_created':[],
'text_message':report
}
else:
self.log(console,"assembly objs in output set "+params['output_name']+": "+str(len(items)))
report += 'assembly objs in output set '+params['output_name']+': '+str(len(items))
desc = 'KButil_Batch_Create_AssemblySet'
if name_pattern:
desc += ' with name_pattern: '+name_pattern
reportObj = {
'objects_created':[{'ref':params['workspace_name']+'/'+params['output_name'], 'description':desc}],
'text_message':report
}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console,"KButil_Batch_Create_AssemblySet DONE")
#END KButil_Batch_Create_AssemblySet
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Batch_Create_AssemblySet return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def KButil_Batch_Create_GenomeSet(self, ctx, params):
"""
:param params: instance of type
"KButil_Batch_Create_GenomeSet_Params"
(KButil_Batch_Create_GenomeSet() ** ** Method for creating a
GenomeSet without specifying individual objects) -> structure:
parameter "workspace_name" of type "workspace_name" (** The
workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "name_pattern" of String, parameter "output_name" of
type "data_obj_name", parameter "desc" of String
:returns: instance of type "KButil_Batch_Create_GenomeSet_Output" ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN KButil_Batch_Create_GenomeSet
#### STEP 0: standard method init
##
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = list(range(11)) # object_info tuple
console = []
invalid_msgs = []
self.log(console,'Running KButil_Batch_Create_GenomeSet with params=')
self.log(console, "\n"+pformat(params))
report = ''
# check params
required_params = ['workspace_name',
'output_name'
]
self.check_params (params, required_params)
if 'desc' not in params:
params['desc'] = params['output_name']+' Batch Created GenomeSet'
#### STEP 3: refine name_pattern
##
name_pattern = params.get('name_pattern')
if name_pattern:
name_pattern = name_pattern.strip()
name_pattern = name_pattern.strip('*')
name_pattern = name_pattern.replace('.','\.')
name_pattern = name_pattern.replace('*','.*')
regexp_name_pattern = re.compile ('^.*'+name_pattern+'.*$')
#### STEP 4: read ws for genome objects
##
genome_obj_ref_by_name = dict()
genome_obj_info_list = self.get_obj_info_list_from_ws_name(params['workspace_name'],
'KBaseGenomes.Genome',
'Genome')
for info in genome_obj_info_list:
genome_ref = self.get_obj_ref_from_obj_info(info)
(genome_name, type_name) = self.get_obj_name_and_type_from_obj_info (info)
if name_pattern:
self.log(console, "NAME_PATTERN: '"+name_pattern+"' GENOME_NAME: '"+genome_name+"'")
if not name_pattern or regexp_name_pattern.match(genome_name):
self.log(console, "ADDING "+genome_name+" ("+genome_ref+")")
genome_obj_ref_by_name[genome_name] = genome_ref
if len(list(genome_obj_ref_by_name.keys())) == 0:
if not name_pattern:
self.log(invalid_msgs, "No Genome objects found")
else:
self.log(invalid_msgs, "No Genome objects passing name_pattern filter: '"+name_pattern+"'")
#### STEP 5: Build GenomeSet
##
if len(invalid_msgs) == 0:
#items = []
elements = dict()
genome_ref_list = []
for gen_name in sorted (genome_obj_ref_by_name.keys()):
# add genome
gen_ref = genome_obj_ref_by_name[gen_name]
genome_ref_list.append (gen_ref)
self.log(console,"adding genome "+gen_name+" : "+gen_ref)
#items.append ({'ref': gen_ref,
# 'label': gen_name
# #'data_attachment': ,
# #'info'
# })
elements[gen_name] = dict()
elements[gen_name]['ref'] = gen_ref
#### STEP 6: Store output object
##
if len(invalid_msgs) == 0:
self.log(console,"SAVING GENOME_SET")
# set provenance
self.log(console, "SETTING PROVENANCE")
input_ws_obj_refs = genome_ref_list
provenance = self.set_provenance(ctx, input_ws_obj_refs, 'kb_SetUtilities', 'KButil_Batch_Create_GenomeSet')
# object def
output_genomeSet_obj = { 'description': params['desc'],
#'items': items
'elements': elements
}
output_genomeSet_name = params['output_name']
# object save
try:
new_obj_info = self.wsClient.save_objects({'workspace': params['workspace_name'],
'objects': [{'type': 'KBaseSearch.GenomeSet',
'data': output_genomeSet_obj,
'name': output_genomeSet_name,
'meta': {},
'provenance': provenance
}]
})[0]
except Exception as e:
raise ValueError('SetAPI FAILURE: Unable to save genome set object to workspace: (' + params['workspace_name']+")\n" + str(e))
#### STEP 7: build output report object
##
self.log(console,"SAVING REPORT")
if len(invalid_msgs) != 0:
report += "\n".join(invalid_msgs)
reportObj = {
'objects_created':[],
'text_message':report
}
else:
self.log(console,"genome objs in output set "+params['output_name']+": "+str(len(list(elements.keys()))))
report += 'genome objs in output set '+params['output_name']+': '+str(len(list(elements.keys())))
desc = 'KButil_Batch_Create_GenomeSet'
if name_pattern:
desc += ' with name_pattern: '+name_pattern
reportObj = {
'objects_created':[{'ref':params['workspace_name']+'/'+params['output_name'], 'description':desc}],
'text_message':report
}
# Save report
report_info = self.reportClient.create({'report':reportObj, 'workspace_name':params['workspace_name']})
returnVal = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] }
self.log(console,"KButil_Batch_Create_GenomeSet DONE")
#END KButil_Batch_Create_GenomeSet
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method KButil_Batch_Create_GenomeSet return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK", 'message': "", 'version': self.VERSION,
'git_url': self.GIT_URL, 'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
| 47.406301
| 202
| 0.56107
| 15,408
| 145,964
| 5.023884
| 0.033035
| 0.016458
| 0.030384
| 0.012764
| 0.802591
| 0.758836
| 0.721838
| 0.699798
| 0.683573
| 0.65855
| 0
| 0.00191
| 0.34366
| 145,964
| 3,078
| 203
| 47.421702
| 0.80609
| 0.189773
| 0
| 0.572699
| 0
| 0
| 0.177276
| 0.030023
| 0.00523
| 0
| 0
| 0
| 0
| 1
| 0.016736
| false
| 0.002615
| 0.004184
| 0
| 0.040795
| 0.001046
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
35cb2c89087bbacddf3ebd726b509119055415cb
| 169
|
py
|
Python
|
deui/html/view/strong_element.py
|
urushiyama/DeUI
|
14530d2dae7d96a3dee30759f85e02239fb433c5
|
[
"MIT"
] | 1
|
2021-10-17T01:54:18.000Z
|
2021-10-17T01:54:18.000Z
|
deui/html/view/strong_element.py
|
urushiyama/DeUI
|
14530d2dae7d96a3dee30759f85e02239fb433c5
|
[
"MIT"
] | null | null | null |
deui/html/view/strong_element.py
|
urushiyama/DeUI
|
14530d2dae7d96a3dee30759f85e02239fb433c5
|
[
"MIT"
] | null | null | null |
from .element import Element
class Strong(Element):
"""
Represents content that has strong importance.
"""
def __str__(self):
return "strong"
| 15.363636
| 50
| 0.639053
| 18
| 169
| 5.777778
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266272
| 169
| 10
| 51
| 16.9
| 0.83871
| 0.272189
| 0
| 0
| 0
| 0
| 0.056075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
35d6adc68889ed358b010a0cd2ed31f599f10743
| 154
|
py
|
Python
|
World/Chat/Constants/ChatTag.py
|
sergio-ivanuzzo/idewave-core
|
31b2b2ec4ac222e02af57d8b2d7a3277e4a444ae
|
[
"Apache-2.0"
] | 10
|
2019-06-29T19:24:52.000Z
|
2021-02-21T22:45:57.000Z
|
World/Chat/Constants/ChatTag.py
|
sergio-ivanuzzo/wowcore
|
31b2b2ec4ac222e02af57d8b2d7a3277e4a444ae
|
[
"Apache-2.0"
] | 4
|
2019-08-15T07:03:36.000Z
|
2021-06-02T13:01:25.000Z
|
World/Chat/Constants/ChatTag.py
|
sergio-ivanuzzo/idewave-core
|
31b2b2ec4ac222e02af57d8b2d7a3277e4a444ae
|
[
"Apache-2.0"
] | 8
|
2019-06-30T22:47:48.000Z
|
2021-02-20T19:21:30.000Z
|
from enum import Enum
class ChatTag(Enum):
NONE = 0x00
AFK = 0x01
DND = 0x02
GM = 0x04
| 15.4
| 26
| 0.402597
| 15
| 154
| 4.133333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 0.551948
| 154
| 9
| 27
| 17.111111
| 0.724638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103896
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
35e82d4ef4d49785aa34c3af635ba146f1224411
| 188
|
py
|
Python
|
lib/python3.4/site-packages/flask/testsuite/test_apps/moduleapp/__init__.py
|
LChristakis/chalice-hunter
|
6bffea4620e23ce9ff12ac30526ebafcb9c10058
|
[
"MIT"
] | 21,684
|
2015-01-01T03:42:20.000Z
|
2022-03-30T13:32:44.000Z
|
lib/python3.4/site-packages/flask/testsuite/test_apps/moduleapp/__init__.py
|
LChristakis/chalice-hunter
|
6bffea4620e23ce9ff12ac30526ebafcb9c10058
|
[
"MIT"
] | 4,067
|
2015-01-01T00:04:51.000Z
|
2022-03-30T13:42:56.000Z
|
lib/python3.4/site-packages/flask/testsuite/test_apps/moduleapp/__init__.py
|
LChristakis/chalice-hunter
|
6bffea4620e23ce9ff12ac30526ebafcb9c10058
|
[
"MIT"
] | 1,901
|
2015-01-01T21:05:59.000Z
|
2022-03-21T08:14:25.000Z
|
from flask import Flask
app = Flask(__name__)
from moduleapp.apps.admin import admin
from moduleapp.apps.frontend import frontend
app.register_module(admin)
app.register_module(frontend)
| 23.5
| 44
| 0.835106
| 27
| 188
| 5.592593
| 0.407407
| 0.172185
| 0.225166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095745
| 188
| 7
| 45
| 26.857143
| 0.888235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
ea05a0b0b7b3805abfea825f39a9266acff20b20
| 301
|
py
|
Python
|
torchvision/models/segmentation/segmentation.py
|
willfrey/vision
|
56fb0bf5796ac374d4e353032e418236cd73c554
|
[
"BSD-3-Clause"
] | 1
|
2022-03-08T14:11:12.000Z
|
2022-03-08T14:11:12.000Z
|
torchvision/models/segmentation/segmentation.py
|
willfrey/vision
|
56fb0bf5796ac374d4e353032e418236cd73c554
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/models/segmentation/segmentation.py
|
willfrey/vision
|
56fb0bf5796ac374d4e353032e418236cd73c554
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
# Import all methods/classes for BC:
from . import * # noqa: F401, F403
warnings.warn(
"The 'torchvision.models.segmentation.segmentation' module is deprecated since 0.12 and will be removed in "
"0.14. Please use the 'torchvision.models.segmentation' directly instead."
)
| 27.363636
| 112
| 0.740864
| 41
| 301
| 5.439024
| 0.780488
| 0.125561
| 0.179372
| 0.286996
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047809
| 0.166113
| 301
| 10
| 113
| 30.1
| 0.840637
| 0.169435
| 0
| 0
| 0
| 0.166667
| 0.720648
| 0.319838
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
ea06975e629b07547da6955b29533a5ebf27238d
| 231
|
py
|
Python
|
models/taster_exceptions.py
|
awesome-archive/MusicTaster
|
5833fc1802c0182598053a35e04a5c192895638b
|
[
"MIT"
] | 45
|
2017-02-26T12:24:47.000Z
|
2021-12-13T07:41:32.000Z
|
models/taster_exceptions.py
|
awesome-archive/MusicTaster
|
5833fc1802c0182598053a35e04a5c192895638b
|
[
"MIT"
] | null | null | null |
models/taster_exceptions.py
|
awesome-archive/MusicTaster
|
5833fc1802c0182598053a35e04a5c192895638b
|
[
"MIT"
] | 17
|
2017-05-12T10:17:12.000Z
|
2021-04-17T14:27:41.000Z
|
# coding=utf-8
"""
Created by jayvee on 16/12/22.
"""
class NonDataException(IOError):
"""
无法获取到数据时的异常
"""
def __init__(self, msg):
self.message = msg
def __str__(self):
return self.message
| 12.833333
| 32
| 0.584416
| 27
| 231
| 4.703704
| 0.777778
| 0.173228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042169
| 0.281385
| 231
| 17
| 33
| 13.588235
| 0.722892
| 0.242424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
ea099d5689ea4563e3d4442ca0df54d1f3e695d7
| 743
|
py
|
Python
|
numba/tests/builtins/test_builtin_pow.py
|
liuzhenhai/numba
|
855a2b262ae3d82bd6ac1c3e1c0acb36ee2e2acf
|
[
"BSD-2-Clause"
] | null | null | null |
numba/tests/builtins/test_builtin_pow.py
|
liuzhenhai/numba
|
855a2b262ae3d82bd6ac1c3e1c0acb36ee2e2acf
|
[
"BSD-2-Clause"
] | null | null | null |
numba/tests/builtins/test_builtin_pow.py
|
liuzhenhai/numba
|
855a2b262ae3d82bd6ac1c3e1c0acb36ee2e2acf
|
[
"BSD-2-Clause"
] | null | null | null |
"""
>>> pow3(2,3,5) == 3
True
>>> pow3(3,3,5) == 2
True
>>> pow3_const() == 3
True
>>> pow2(2,3) == 8
True
>>> pow2(3,3) == 27
True
>>> pow2_const() == 8
True
>>> c1, c2 = 1.2 + 4.1j, 0.6 + 0.5j
>>> allclose(pow2(c1, c2), pow(c1, c2))
True
>>> d1, d2 = 4.2, 5.1
>>> allclose(pow2(d1, d2), pow(d1, d2))
True
"""
from numpy import allclose
from numba import *
@autojit(backend='ast')
def pow3(a,b,c):
return pow(a,b,c)
@autojit(backend='ast')
def pow3_const():
return pow(2,3,5)
@autojit(backend='ast')
def pow2(a,b):
return pow(a,b)
@autojit(backend='ast')
def pow2_const():
return pow(2,3)
if __name__ == '__main__':
# import logging; logging.getLogger().setLevel(0)
import numba
numba.testing.testmod()
| 14.568627
| 52
| 0.58681
| 128
| 743
| 3.3125
| 0.320313
| 0.018868
| 0.160377
| 0.188679
| 0.301887
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097521
| 0.185734
| 743
| 50
| 53
| 14.86
| 0.603306
| 0.483176
| 0
| 0.235294
| 0
| 0
| 0.053333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.235294
| false
| 0
| 0.176471
| 0.235294
| 0.647059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
ea10c33b2ae5ce0dfe51d6bc0f5038dc514b1cf8
| 334
|
py
|
Python
|
json_parser.py
|
ROpdebee/CAA-Auditor
|
2fb33f792dcc9ee711ebaf6532e67b85ff79cd40
|
[
"MIT"
] | null | null | null |
json_parser.py
|
ROpdebee/CAA-Auditor
|
2fb33f792dcc9ee711ebaf6532e67b85ff79cd40
|
[
"MIT"
] | null | null | null |
json_parser.py
|
ROpdebee/CAA-Auditor
|
2fb33f792dcc9ee711ebaf6532e67b85ff79cd40
|
[
"MIT"
] | null | null | null |
from typing import Any
try:
from json_parser_cysimdjson import JSONArray, JSONObject, parse, parse_str, safe_get, to_native
except ImportError:
print('Using slow built-in json parsing, install cysimdjson')
from json_parser_builtin import JSONArray, JSONObject, parse, parse_str, safe_get, to_native # type: ignore[misc]
| 41.75
| 118
| 0.787425
| 47
| 334
| 5.382979
| 0.617021
| 0.063241
| 0.110672
| 0.237154
| 0.418972
| 0.418972
| 0.418972
| 0.418972
| 0.418972
| 0.418972
| 0
| 0
| 0.146707
| 334
| 7
| 119
| 47.714286
| 0.887719
| 0.053892
| 0
| 0
| 0
| 0
| 0.165605
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
ea1d5a72a33ed813ed8f86e0ab2a8e0690d3321c
| 180
|
py
|
Python
|
post/admin.py
|
rezaul99/Sample-Django-Project
|
d087d3fd419139ec7f3c934ae89eaca4e75b3f8e
|
[
"Apache-2.0"
] | 1
|
2019-02-17T18:31:07.000Z
|
2019-02-17T18:31:07.000Z
|
post/admin.py
|
rezaul99/Sample-Django-Project
|
d087d3fd419139ec7f3c934ae89eaca4e75b3f8e
|
[
"Apache-2.0"
] | null | null | null |
post/admin.py
|
rezaul99/Sample-Django-Project
|
d087d3fd419139ec7f3c934ae89eaca4e75b3f8e
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import *
@admin.register(BlogPost)
class BlogPostAdmin(admin.ModelAdmin):
list_display = [f.name for f in BlogPost._meta.fields]
| 25.714286
| 58
| 0.777778
| 25
| 180
| 5.52
| 0.76
| 0.15942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127778
| 180
| 6
| 59
| 30
| 0.878981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
ea4291465aee10dab088c323e29dfc5454f0286b
| 182
|
py
|
Python
|
pylas/lasdatas/las12.py
|
weyerhaeuser/pylas
|
8b0e266bf65e40906128979546de97093aaeadeb
|
[
"BSD-3-Clause"
] | 2
|
2021-03-11T20:19:39.000Z
|
2021-08-18T08:31:49.000Z
|
pylas/lasdatas/las12.py
|
weyerhaeuser/pylas
|
8b0e266bf65e40906128979546de97093aaeadeb
|
[
"BSD-3-Clause"
] | null | null | null |
pylas/lasdatas/las12.py
|
weyerhaeuser/pylas
|
8b0e266bf65e40906128979546de97093aaeadeb
|
[
"BSD-3-Clause"
] | null | null | null |
from .base import LasBase
class LasData(LasBase):
def __init__(self, *, header=None, vlrs=None, points=None):
super().__init__(header=header, vlrs=vlrs, points=points)
| 26
| 65
| 0.703297
| 24
| 182
| 5
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159341
| 182
| 6
| 66
| 30.333333
| 0.784314
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
579eb50459cde106f96279f3256b79641cfa9927
| 201
|
py
|
Python
|
testcgi2.py
|
tyrell81/webuipi
|
c0bb320cd27cebfcf36019faaac443d777fd1717
|
[
"MIT"
] | null | null | null |
testcgi2.py
|
tyrell81/webuipi
|
c0bb320cd27cebfcf36019faaac443d777fd1717
|
[
"MIT"
] | null | null | null |
testcgi2.py
|
tyrell81/webuipi
|
c0bb320cd27cebfcf36019faaac443d777fd1717
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, re, shutil, json, urllib, urllib2, cgi
# Fix issues with decoding HTTP responses
#reload(sys)
#sys.setdefaultencoding('utf8')
cgi.test()
| 20.1
| 54
| 0.691542
| 29
| 201
| 4.793103
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017341
| 0.139303
| 201
| 9
| 55
| 22.333333
| 0.786127
| 0.61194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
57af9df4dbfe6db94716caa772ab5f4f6c761876
| 34
|
py
|
Python
|
homeassistant/components/crimereports/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 23
|
2017-11-15T21:03:53.000Z
|
2021-03-29T21:33:48.000Z
|
homeassistant/components/crimereports/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 57
|
2020-10-15T06:47:00.000Z
|
2022-03-31T06:11:18.000Z
|
homeassistant/components/crimereports/__init__.py
|
klauern/home-assistant-core
|
c18ba6aec0627e6afb6442c678edb5ff2bb17db6
|
[
"Apache-2.0"
] | 14
|
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""The crimereports component."""
| 17
| 33
| 0.705882
| 3
| 34
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 1
| 34
| 34
| 0.774194
| 0.794118
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
57c6c9bbe3c9715d0b69e477e1ef3a84b0ad6574
| 58
|
py
|
Python
|
unittest/loader_simple/tests/__init__.py
|
kumarstack55/python-playground
|
f152ce42cf663804ef217261402058cead0b726c
|
[
"MIT"
] | null | null | null |
unittest/loader_simple/tests/__init__.py
|
kumarstack55/python-playground
|
f152ce42cf663804ef217261402058cead0b726c
|
[
"MIT"
] | null | null | null |
unittest/loader_simple/tests/__init__.py
|
kumarstack55/python-playground
|
f152ce42cf663804ef217261402058cead0b726c
|
[
"MIT"
] | null | null | null |
#import unittest
#from tests.test_add import MyTestCase
| 11.6
| 38
| 0.810345
| 8
| 58
| 5.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 4
| 39
| 14.5
| 0.92
| 0.896552
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
17bf75caa629fffdb87a54d97d216827f269ada4
| 930
|
py
|
Python
|
src/pycropml/transpiler/antlr_py/grammars/CommentsVisitor.py
|
AgriculturalModelExchangeInitiative/PyCropML
|
2eb330297d0765771c3b240e1936eb1110d47972
|
[
"MIT"
] | 3
|
2018-01-22T15:33:44.000Z
|
2018-08-02T20:40:40.000Z
|
src/pycropml/transpiler/antlr_py/grammars/CommentsVisitor.py
|
AgriculturalModelExchangeInitiative/PyCropML
|
2eb330297d0765771c3b240e1936eb1110d47972
|
[
"MIT"
] | 24
|
2018-01-22T12:18:15.000Z
|
2018-11-19T12:55:18.000Z
|
src/pycropml/transpiler/antlr_py/grammars/CommentsVisitor.py
|
AgriculturalModelExchangeInitiative/PyCropML
|
2eb330297d0765771c3b240e1936eb1110d47972
|
[
"MIT"
] | 1
|
2018-01-25T07:07:09.000Z
|
2018-01-25T07:07:09.000Z
|
# Generated from Comments.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .CommentsParser import CommentsParser
else:
from CommentsParser import CommentsParser
# This class defines a complete generic visitor for a parse tree produced by CommentsParser.
class CommentsVisitor(ParseTreeVisitor):
# Visit a parse tree produced by CommentsParser#documentation.
def visitDocumentation(self, ctx:CommentsParser.DocumentationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by CommentsParser#documentationContent.
def visitDocumentationContent(self, ctx:CommentsParser.DocumentationContentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by CommentsParser#comment_line.
def visitComment_line(self, ctx:CommentsParser.Comment_lineContext):
return self.visitChildren(ctx)
del CommentsParser
| 33.214286
| 92
| 0.78172
| 104
| 930
| 6.884615
| 0.471154
| 0.03352
| 0.055866
| 0.100559
| 0.28352
| 0.28352
| 0.236034
| 0.181564
| 0.181564
| 0.181564
| 0
| 0.005141
| 0.163441
| 930
| 28
| 93
| 33.214286
| 0.915167
| 0.339785
| 0
| 0.230769
| 1
| 0
| 0.00165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.230769
| 0.230769
| 0.769231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
17d620c5be9a8568267b2476a1ee8b1771cf7ab6
| 237
|
py
|
Python
|
utils/__init__.py
|
koukyo1994/kaggle-rfcx
|
c3573d014d99312b58882e7b939de6c1055129b1
|
[
"MIT"
] | 6
|
2021-02-18T05:18:17.000Z
|
2022-02-19T02:49:32.000Z
|
utils/__init__.py
|
koukyo1994/kaggle-rfcx
|
c3573d014d99312b58882e7b939de6c1055129b1
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
koukyo1994/kaggle-rfcx
|
c3573d014d99312b58882e7b939de6c1055129b1
|
[
"MIT"
] | 2
|
2021-02-18T11:31:50.000Z
|
2022-02-19T02:49:07.000Z
|
from .checkpoint import save_best_model, save_model
from .config import load_config
from .jsonutil import save_json
from .logger import get_logger
from .meter import AverageMeter
from .parser import get_parser
from .seed import set_seed
| 29.625
| 51
| 0.843882
| 37
| 237
| 5.189189
| 0.459459
| 0.104167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122363
| 237
| 7
| 52
| 33.857143
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
aa06c3fb1be100d7a8deac3d0902a412c4a754ad
| 166
|
py
|
Python
|
templateaddons/settings.py
|
austinbrown34/django-templateaddons3
|
c18dd25914a74f129ea5ebf8dd2a09336458fd02
|
[
"BSD-3-Clause"
] | null | null | null |
templateaddons/settings.py
|
austinbrown34/django-templateaddons3
|
c18dd25914a74f129ea5ebf8dd2a09336458fd02
|
[
"BSD-3-Clause"
] | null | null | null |
templateaddons/settings.py
|
austinbrown34/django-templateaddons3
|
c18dd25914a74f129ea5ebf8dd2a09336458fd02
|
[
"BSD-3-Clause"
] | 3
|
2018-07-16T09:18:47.000Z
|
2022-03-16T02:46:33.000Z
|
from django.conf import settings
TEMPLATEADDONS_COUNTERS_VARIABLE = getattr(settings, 'TEMPLATEADDONS_COUNTER_GLOBAL_VARIABLE', '_templateaddons_counters') # NOQA
| 33.2
| 130
| 0.849398
| 17
| 166
| 7.882353
| 0.705882
| 0.328358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084337
| 166
| 4
| 131
| 41.5
| 0.881579
| 0.024096
| 0
| 0
| 0
| 0
| 0.3875
| 0.3875
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
aa29ef91d7e92a5eacba8d19a696681100ac5fca
| 254
|
py
|
Python
|
Py93/main.py
|
xhexe/Py8R
|
44238c5403e7f76988760a040bf5c292824c22e7
|
[
"WTFPL"
] | null | null | null |
Py93/main.py
|
xhexe/Py8R
|
44238c5403e7f76988760a040bf5c292824c22e7
|
[
"WTFPL"
] | null | null | null |
Py93/main.py
|
xhexe/Py8R
|
44238c5403e7f76988760a040bf5c292824c22e7
|
[
"WTFPL"
] | null | null | null |
def words_numbers_counter():
entered_value = input("Enter string:")
print("Words:", len(list(filter(lambda x: x.isalpha(), entered_value))))
print("Numbers:", len(list(filter(lambda x: x.isdigit(), entered_value))))
words_numbers_counter()
| 31.75
| 78
| 0.700787
| 34
| 254
| 5.029412
| 0.5
| 0.210526
| 0.222222
| 0.222222
| 0.245614
| 0.245614
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122047
| 254
| 7
| 79
| 36.285714
| 0.766816
| 0
| 0
| 0
| 0
| 0
| 0.106299
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.2
| 0.4
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
aa2ada62f40a26280d5b86bbdb07fb2896c662fc
| 43
|
pyde
|
Python
|
processing/Mod. 9/sketch_9_1_l61/sketch_9_1_l61.pyde
|
nanam0rgana/2019-fall-polytech-cs
|
1a31acb3cf22edc930318dec17324b05dd7788d5
|
[
"MIT"
] | null | null | null |
processing/Mod. 9/sketch_9_1_l61/sketch_9_1_l61.pyde
|
nanam0rgana/2019-fall-polytech-cs
|
1a31acb3cf22edc930318dec17324b05dd7788d5
|
[
"MIT"
] | null | null | null |
processing/Mod. 9/sketch_9_1_l61/sketch_9_1_l61.pyde
|
nanam0rgana/2019-fall-polytech-cs
|
1a31acb3cf22edc930318dec17324b05dd7788d5
|
[
"MIT"
] | null | null | null |
k = random(5,15)
xCoordinate = [k]
print k
| 10.75
| 17
| 0.651163
| 8
| 43
| 3.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 0.186047
| 43
| 3
| 18
| 14.333333
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.333333
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
a4a9c690a7b67fe1f7939870e81c759c5ddfaa1d
| 2,978
|
py
|
Python
|
mparticle/__init__.py
|
juviasuisei/mparticle-python-sdk
|
2f412da6fb765736c252888b1f3f57c99c4de3b9
|
[
"Apache-2.0"
] | 3
|
2020-09-24T19:37:19.000Z
|
2021-04-21T15:58:55.000Z
|
mparticle/__init__.py
|
juviasuisei/mparticle-python-sdk
|
2f412da6fb765736c252888b1f3f57c99c4de3b9
|
[
"Apache-2.0"
] | 5
|
2016-12-10T19:24:35.000Z
|
2022-03-10T14:45:55.000Z
|
mparticle/__init__.py
|
juviasuisei/mparticle-python-sdk
|
2f412da6fb765736c252888b1f3f57c99c4de3b9
|
[
"Apache-2.0"
] | 6
|
2016-12-20T23:28:36.000Z
|
2020-10-14T03:11:41.000Z
|
# coding: utf-8
"""
mParticle
mParticle Event API
OpenAPI spec version: 1.0.1
Contact: support@mparticle.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
# import models into sdk package
from .models.api_response import ApiResponse
from .models.api_response_errors import ApiResponseErrors
from .models.app_event import AppEvent
from .models.application_information import ApplicationInformation
from .models.application_state_transition_event import ApplicationStateTransitionEvent
from .models.attribution_info import AttributionInfo
from .models.batch import Batch
from .models.breadcrumb_event import BreadcrumbEvent
from .models.ccpa_consent_state import CCPAConsentState
from .models.commerce_event import CommerceEvent
from .models.consent_state import ConsentState
from .models.crash_report_event import CrashReportEvent
from .models.device_current_state import DeviceCurrentState
from .models.device_information import DeviceInformation
from .models.event_base import EventBase
from .models.event_data import EventData
from .models.first_run_event import FirstRunEvent
from .models.gdpr_consent_state import GDPRConsentState
from .models.geo_location import GeoLocation
from .models.media_info import MediaInfo
from .models.network_performance_event import NetworkPerformanceEvent
from .models.opt_out_event import OptOutEvent
from .models.product import Product
from .models.product_action import ProductAction
from .models.product_impression import ProductImpression
from .models.profile_event import ProfileEvent
from .models.promotion import Promotion
from .models.promotion_action import PromotionAction
from .models.push_message_event import PushMessageEvent
from .models.push_registration_event import PushRegistrationEvent
from .models.screen_view_event import ScreenViewEvent
from .models.session_end_event import SessionEndEvent
from .models.session_start_event import SessionStartEvent
from .models.shopping_cart import ShoppingCart
from .models.source_information import SourceInformation
from .models.user_identities import UserIdentities
from .models.batch_context import BatchContext
from .models.data_plan_context import DataPlanContext
# import apis into sdk package
from .apis.events_api import EventsApi
# import ApiClient
from .api_client import ApiClient
from .configuration import Configuration
| 40.243243
| 86
| 0.838146
| 386
| 2,978
| 6.316062
| 0.455959
| 0.155865
| 0.022149
| 0.013126
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003044
| 0.117529
| 2,978
| 73
| 87
| 40.794521
| 0.924658
| 0.259234
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
a4b8c4d8021c72da47828a33f48a0b9cf00abb4e
| 43
|
py
|
Python
|
tempCodeRunnerFile.py
|
tobby-lie/Russion-Disinformation-Project
|
387c283942380c4cf86c24a9278577897f29b2bf
|
[
"CC-BY-4.0"
] | null | null | null |
tempCodeRunnerFile.py
|
tobby-lie/Russion-Disinformation-Project
|
387c283942380c4cf86c24a9278577897f29b2bf
|
[
"CC-BY-4.0"
] | null | null | null |
tempCodeRunnerFile.py
|
tobby-lie/Russion-Disinformation-Project
|
387c283942380c4cf86c24a9278577897f29b2bf
|
[
"CC-BY-4.0"
] | null | null | null |
_file:
data_svm = json.load(json_file)
| 14.333333
| 35
| 0.697674
| 7
| 43
| 3.857143
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 43
| 2
| 36
| 21.5
| 0.771429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
a4bad98b106da41c2e3d7d213245bad114ad447a
| 294
|
py
|
Python
|
src/metrics.py
|
szymonmaszke/AiCorExample
|
e453e6922bddb612fb5a3feefa62ecf40a2ea719
|
[
"MIT"
] | 4
|
2021-02-12T16:30:53.000Z
|
2021-08-30T02:48:19.000Z
|
src/metrics.py
|
AI-Core/AiCorExample
|
e453e6922bddb612fb5a3feefa62ecf40a2ea719
|
[
"MIT"
] | null | null | null |
src/metrics.py
|
AI-Core/AiCorExample
|
e453e6922bddb612fb5a3feefa62ecf40a2ea719
|
[
"MIT"
] | 2
|
2021-01-17T16:13:03.000Z
|
2021-01-18T11:09:10.000Z
|
import torch
def accuracy(logits, y):
return torch.mean((torch.argmax(logits, dim=-1) == y).float())
def loss(logits, y):
return torch.nn.functional.cross_entropy(logits, y)
def print_metrics(metrics):
for metric, value in metrics.items():
print(f"{metric}: {value}")
| 19.6
| 66
| 0.666667
| 42
| 294
| 4.619048
| 0.595238
| 0.108247
| 0.134021
| 0.185567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004115
| 0.173469
| 294
| 14
| 67
| 21
| 0.794239
| 0
| 0
| 0
| 0
| 0
| 0.057823
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0.125
| 0.25
| 0.75
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
a4e03ee734e5025ac24d7874bb3702ff16083b79
| 273
|
py
|
Python
|
pavo_cristatus/interaction_sequence_generators/utilities.py
|
MATTHEWFRAZER/pavo_cristatus
|
a4b96c0eb6c454fbe38d2092e29f63457a4ee955
|
[
"MIT"
] | null | null | null |
pavo_cristatus/interaction_sequence_generators/utilities.py
|
MATTHEWFRAZER/pavo_cristatus
|
a4b96c0eb6c454fbe38d2092e29f63457a4ee955
|
[
"MIT"
] | null | null | null |
pavo_cristatus/interaction_sequence_generators/utilities.py
|
MATTHEWFRAZER/pavo_cristatus
|
a4b96c0eb6c454fbe38d2092e29f63457a4ee955
|
[
"MIT"
] | null | null | null |
def get_type_check(expected_type):
"""
Any -> (Any -> bool)
:param expected_type: type that will be used in the generated boolean check
:return: a function that will do a boolean check against new types
"""
return lambda x: type(x) is expected_type
| 34.125
| 79
| 0.688645
| 42
| 273
| 4.357143
| 0.619048
| 0.196721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 273
| 7
| 80
| 39
| 0.871429
| 0.59707
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
a4eaa048ec52ee4ca7626e5af261917e261102f5
| 169
|
py
|
Python
|
run_tests.py
|
roedoejet/wordweaver-legacy
|
caadb2edf0c848337f14dc56513c0b7cf14ffb67
|
[
"MIT"
] | 4
|
2019-10-03T16:22:41.000Z
|
2020-09-14T07:01:52.000Z
|
run_tests.py
|
nrc-cnrc/wordweaver
|
24789191e5a92d0b77fe46f0c2e0a251baa7e06c
|
[
"MIT"
] | null | null | null |
run_tests.py
|
nrc-cnrc/wordweaver
|
24789191e5a92d0b77fe46f0c2e0a251baa7e06c
|
[
"MIT"
] | null | null | null |
from wordweaver.tests import run
import sys
try:
run.run_tests(sys.argv[1])
except IndexError:
print("Please specify a test suite to run: i.e. 'dev' or 'prod'")
| 24.142857
| 69
| 0.710059
| 29
| 169
| 4.103448
| 0.793103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007143
| 0.171598
| 169
| 7
| 69
| 24.142857
| 0.842857
| 0
| 0
| 0
| 0
| 0
| 0.329412
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.166667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
a4f7276a042ec88a0127e8c158b97f83292c6751
| 1,303
|
py
|
Python
|
py_typescript_generator/typescript_model_compiler/ts_type.py
|
Latios96/py-typescript-generator
|
803f1fee30ceab31228db59e0408576b503b42ef
|
[
"MIT"
] | 1
|
2022-03-11T14:19:12.000Z
|
2022-03-11T14:19:12.000Z
|
py_typescript_generator/typescript_model_compiler/ts_type.py
|
Latios96/py-typescript-generator
|
803f1fee30ceab31228db59e0408576b503b42ef
|
[
"MIT"
] | null | null | null |
py_typescript_generator/typescript_model_compiler/ts_type.py
|
Latios96/py-typescript-generator
|
803f1fee30ceab31228db59e0408576b503b42ef
|
[
"MIT"
] | null | null | null |
class TsType:
def __init__(self, name: str, is_optional: bool = False):
self._name = name
self._is_optional = is_optional
@property
def name(self) -> str:
return self._name
@property
def is_optional(self) -> bool:
return self._is_optional
def as_optional_type(self):
# type: ()->TsType
return TsType(name=self.name, is_optional=True)
def as_non_optional_type(self):
# type: ()->TsType
return TsType(name=self.name, is_optional=False)
def with_is_optional(self, is_optional):
# type: (bool)->TsType
return TsType(name=self.name, is_optional=is_optional)
def __repr__(self):
return self.__str__()
def __str__(self):
return f"TsType(name='{self.name}', is_optional='{self.is_optional}')"
def __hash__(self):
return hash((self.name, self.is_optional))
def __eq__(self, other):
return (
other and self.name == other.name and self.is_optional == other.is_optional
)
def format_as_type_reference(self) -> str:
return self._format_as_optional(self.name)
def _format_as_optional(self, the_str: str) -> str:
if self.is_optional:
return f"({the_str} | undefined)"
return the_str
| 27.723404
| 87
| 0.626247
| 170
| 1,303
| 4.429412
| 0.170588
| 0.225764
| 0.130146
| 0.095618
| 0.300133
| 0.249668
| 0.212483
| 0.212483
| 0.159363
| 0.159363
| 0
| 0
| 0.260169
| 1,303
| 46
| 88
| 28.326087
| 0.78112
| 0.041443
| 0
| 0.0625
| 0
| 0
| 0.066667
| 0.04739
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0
| 0.3125
| 0.78125
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
351733708c6fc24fe4f882aca0b245d91867a702
| 17,952
|
py
|
Python
|
test/orm/test_update_delete.py
|
ContextLogic/sqlalchemy
|
b7adfe5e4d9baa61169ba79aa5ba8f64f0ff7645
|
[
"MIT"
] | 8
|
2017-07-18T18:35:10.000Z
|
2022-02-01T19:52:57.000Z
|
test/orm/test_update_delete.py
|
RetailArchitects/sqlalchemy
|
399a5c96b2fd0e0f2f0cdda7766b31e37454eb2e
|
[
"MIT"
] | null | null | null |
test/orm/test_update_delete.py
|
RetailArchitects/sqlalchemy
|
399a5c96b2fd0e0f2f0cdda7766b31e37454eb2e
|
[
"MIT"
] | 6
|
2017-07-26T08:51:10.000Z
|
2021-03-04T10:16:37.000Z
|
from test.lib.testing import eq_, assert_raises, assert_raises_message
from test.lib import fixtures, testing
from sqlalchemy import Integer, String, ForeignKey, or_, and_, exc, select, func
from sqlalchemy.orm import mapper, relationship, backref, Session, joinedload
from test.lib.schema import Table, Column
class UpdateDeleteTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(32)),
Column('age', Integer))
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
@classmethod
def insert_data(cls):
users = cls.tables.users
users.insert().execute([
dict(id=1, name='john', age=25),
dict(id=2, name='jack', age=47),
dict(id=3, name='jill', age=29),
dict(id=4, name='jane', age=37),
])
@classmethod
def setup_mappers(cls):
User = cls.classes.User
users = cls.tables.users
mapper(User, users)
def test_illegal_operations(self):
User = self.classes.User
s = Session()
for q, mname in (
(s.query(User).limit(2), "limit"),
(s.query(User).offset(2), "offset"),
(s.query(User).limit(2).offset(2), "limit"),
(s.query(User).order_by(User.id), "order_by"),
(s.query(User).group_by(User.id), "group_by"),
(s.query(User).distinct(), "distinct")
):
assert_raises_message(
exc.InvalidRequestError,
r"Can't call Query.update\(\) when %s\(\) has been called" % mname,
q.update,
{'name':'ed'})
assert_raises_message(
exc.InvalidRequestError,
r"Can't call Query.delete\(\) when %s\(\) has been called" % mname,
q.delete)
def test_delete(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(or_(User.name == 'john', User.name == 'jill')).delete()
assert john not in sess and jill not in sess
eq_(sess.query(User).order_by(User.id).all(), [jack,jane])
def test_delete_with_bindparams(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter('name = :name').params(name='john').delete('fetch')
assert john not in sess
eq_(sess.query(User).order_by(User.id).all(), [jack,jill,jane])
def test_delete_rollback(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(or_(User.name == 'john', User.name == 'jill')).\
delete(synchronize_session='evaluate')
assert john not in sess and jill not in sess
sess.rollback()
assert john in sess and jill in sess
def test_delete_rollback_with_fetch(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(or_(User.name == 'john', User.name == 'jill')).\
delete(synchronize_session='fetch')
assert john not in sess and jill not in sess
sess.rollback()
assert john in sess and jill in sess
def test_delete_without_session_sync(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(or_(User.name == 'john', User.name == 'jill')).\
delete(synchronize_session=False)
assert john in sess and jill in sess
eq_(sess.query(User).order_by(User.id).all(), [jack,jane])
def test_delete_with_fetch_strategy(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(or_(User.name == 'john', User.name == 'jill')).\
delete(synchronize_session='fetch')
assert john not in sess and jill not in sess
eq_(sess.query(User).order_by(User.id).all(), [jack,jane])
@testing.fails_on('mysql', 'FIXME: unknown')
def test_delete_invalid_evaluation(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
assert_raises(exc.InvalidRequestError,
sess.query(User).
filter(User.name == select([func.max(User.name)])).delete,
synchronize_session='evaluate'
)
sess.query(User).filter(User.name == select([func.max(User.name)])).\
delete(synchronize_session='fetch')
assert john not in sess
eq_(sess.query(User).order_by(User.id).all(), [jack,jill,jane])
def test_update(self):
User, users = self.classes.User, self.tables.users
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(User.age > 29).\
update({'age': User.age - 10}, synchronize_session='evaluate')
eq_([john.age, jack.age, jill.age, jane.age], [25,37,29,27])
eq_(sess.query(User.age).order_by(User.id).all(), zip([25,37,29,27]))
sess.query(User).filter(User.age > 29).\
update({User.age: User.age - 10}, synchronize_session='evaluate')
eq_([john.age, jack.age, jill.age, jane.age], [25,27,29,27])
eq_(sess.query(User.age).order_by(User.id).all(), zip([25,27,29,27]))
sess.query(User).filter(User.age > 27).\
update({users.c.age: User.age - 10}, synchronize_session='evaluate')
eq_([john.age, jack.age, jill.age, jane.age], [25,27,19,27])
eq_(sess.query(User.age).order_by(User.id).all(), zip([25,27,19,27]))
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session='fetch')
eq_([john.age, jack.age, jill.age, jane.age], [15,27,19,27])
eq_(sess.query(User.age).order_by(User.id).all(), zip([15,27,19,27]))
def test_update_with_bindparams(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter('age > :x').params(x=29).\
update({'age': User.age - 10}, synchronize_session='fetch')
eq_([john.age, jack.age, jill.age, jane.age], [25,37,29,27])
eq_(sess.query(User.age).order_by(User.id).all(), zip([25,37,29,27]))
def test_update_changes_resets_dirty(self):
User = self.classes.User
sess = Session(autoflush=False)
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
john.age = 50
jack.age = 37
# autoflush is false. therefore our '50' and '37' are getting
# blown away by this operation.
sess.query(User).filter(User.age > 29).\
update({'age': User.age - 10}, synchronize_session='evaluate')
for x in (john, jack, jill, jane):
assert not sess.is_modified(x)
eq_([john.age, jack.age, jill.age, jane.age], [25,37,29,27])
john.age = 25
assert john in sess.dirty
assert jack in sess.dirty
assert jill not in sess.dirty
assert not sess.is_modified(john)
assert not sess.is_modified(jack)
def test_update_changes_with_autoflush(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
john.age = 50
jack.age = 37
sess.query(User).filter(User.age > 29).\
update({'age': User.age - 10}, synchronize_session='evaluate')
for x in (john, jack, jill, jane):
assert not sess.is_modified(x)
eq_([john.age, jack.age, jill.age, jane.age], [40, 27, 29, 27])
john.age = 25
assert john in sess.dirty
assert jack not in sess.dirty
assert jill not in sess.dirty
assert sess.is_modified(john)
assert not sess.is_modified(jack)
def test_update_with_expire_strategy(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(User.age > 29).\
update({'age': User.age - 10}, synchronize_session='fetch')
eq_([john.age, jack.age, jill.age, jane.age], [25,37,29,27])
eq_(sess.query(User.age).order_by(User.id).all(), zip([25,37,29,27]))
@testing.fails_if(lambda: not testing.db.dialect.supports_sane_rowcount)
def test_update_returns_rowcount(self):
User = self.classes.User
sess = Session()
rowcount = sess.query(User).filter(User.age > 29).update({'age': User.age + 0})
eq_(rowcount, 2)
rowcount = sess.query(User).filter(User.age > 29).update({'age': User.age - 10})
eq_(rowcount, 2)
@testing.fails_if(lambda: not testing.db.dialect.supports_sane_rowcount)
def test_delete_returns_rowcount(self):
User = self.classes.User
sess = Session()
rowcount = sess.query(User).filter(User.age > 26).\
delete(synchronize_session=False)
eq_(rowcount, 3)
def test_update_all(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
sess.query(User).update({'age': 42}, synchronize_session='evaluate')
eq_([john.age, jack.age, jill.age, jane.age], [42,42,42,42])
eq_(sess.query(User.age).order_by(User.id).all(), zip([42,42,42,42]))
def test_delete_all(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
sess.query(User).delete(synchronize_session='evaluate')
assert not (john in sess or jack in sess or jill in sess or jane in sess)
eq_(sess.query(User).count(), 0)
def test_autoflush_before_evaluate_update(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
john.name = 'j2'
sess.query(User).filter_by(name='j2').\
update({'age':42},
synchronize_session='evaluate')
eq_(john.age, 42)
def test_autoflush_before_fetch_update(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
john.name = 'j2'
sess.query(User).filter_by(name='j2').\
update({'age':42},
synchronize_session='fetch')
eq_(john.age, 42)
def test_autoflush_before_evaluate_delete(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
john.name = 'j2'
sess.query(User).filter_by(name='j2').\
delete(
synchronize_session='evaluate')
assert john not in sess
def test_autoflush_before_fetch_delete(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
john.name = 'j2'
sess.query(User).filter_by(name='j2').\
delete(
synchronize_session='fetch')
assert john not in sess
def test_evaluate_before_update(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
sess.expire(john, ['age'])
# eval must be before the update. otherwise
# we eval john, age has been expired and doesn't
# match the new value coming in
sess.query(User).filter_by(name='john').filter_by(age=25).\
update({'name':'j2', 'age':40},
synchronize_session='evaluate')
eq_(john.name, 'j2')
eq_(john.age, 40)
def test_fetch_before_update(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
sess.expire(john, ['age'])
sess.query(User).filter_by(name='john').filter_by(age=25).\
update({'name':'j2', 'age':40},
synchronize_session='fetch')
eq_(john.name, 'j2')
eq_(john.age, 40)
def test_evaluate_before_delete(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
sess.expire(john, ['age'])
sess.query(User).filter_by(name='john').\
filter_by(age=25).\
delete(
synchronize_session='evaluate')
assert john not in sess
def test_fetch_before_delete(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name='john').one()
sess.expire(john, ['age'])
sess.query(User).filter_by(name='john').\
filter_by(age=25).\
delete(
synchronize_session='fetch')
assert john not in sess
class UpdateDeleteRelatedTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(32)),
Column('age', Integer))
Table('documents', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', None, ForeignKey('users.id')),
Column('title', String(32)))
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Document(cls.Comparable):
pass
@classmethod
def insert_data(cls):
users = cls.tables.users
users.insert().execute([
dict(id=1, name='john', age=25),
dict(id=2, name='jack', age=47),
dict(id=3, name='jill', age=29),
dict(id=4, name='jane', age=37),
])
documents = cls.tables.documents
documents.insert().execute([
dict(id=1, user_id=1, title='foo'),
dict(id=2, user_id=1, title='bar'),
dict(id=3, user_id=2, title='baz'),
])
@classmethod
def setup_mappers(cls):
documents, Document, User, users = (cls.tables.documents,
cls.classes.Document,
cls.classes.User,
cls.tables.users)
mapper(User, users)
mapper(Document, documents, properties={
'user': relationship(User, lazy='joined',
backref=backref('documents', lazy='select'))
})
def test_update_with_eager_relationships(self):
Document = self.classes.Document
sess = Session()
foo,bar,baz = sess.query(Document).order_by(Document.id).all()
sess.query(Document).filter(Document.user_id == 1).\
update({'title': Document.title+Document.title}, synchronize_session='fetch')
eq_([foo.title, bar.title, baz.title], ['foofoo','barbar', 'baz'])
eq_(sess.query(Document.title).order_by(Document.id).all(),
zip(['foofoo','barbar', 'baz']))
def test_update_with_explicit_joinedload(self):
User = self.classes.User
sess = Session()
john,jack,jill,jane = sess.query(User).order_by(User.id).all()
sess.query(User).options(joinedload(User.documents)).filter(User.age > 29).\
update({'age': User.age - 10}, synchronize_session='fetch')
eq_([john.age, jack.age, jill.age, jane.age], [25,37,29,27])
eq_(sess.query(User.age).order_by(User.id).all(), zip([25,37,29,27]))
def test_delete_with_eager_relationships(self):
Document = self.classes.Document
sess = Session()
sess.query(Document).filter(Document.user_id == 1).\
delete(synchronize_session=False)
eq_(sess.query(Document.title).all(), zip(['baz']))
class ExpressionUpdateTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
data = Table('data', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('counter', Integer, nullable=False, default=0)
)
@classmethod
def setup_classes(cls):
class Data(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
data = cls.tables.data
mapper(cls.classes.Data, data, properties={'cnt':data.c.counter})
@testing.provide_metadata
def test_update_attr_names(self):
Data = self.classes.Data
d1 = Data()
sess = Session()
sess.add(d1)
sess.commit()
eq_(d1.cnt, 0)
sess.query(Data).update({Data.cnt:Data.cnt + 1})
sess.flush()
eq_(d1.cnt, 1)
sess.query(Data).update({Data.cnt:Data.cnt + 1}, 'fetch')
sess.flush()
eq_(d1.cnt, 2)
sess.close()
| 32.93945
| 93
| 0.572527
| 2,282
| 17,952
| 4.38475
| 0.087642
| 0.06656
| 0.087048
| 0.06646
| 0.798121
| 0.748051
| 0.73406
| 0.722367
| 0.693484
| 0.677693
| 0
| 0.02331
| 0.283088
| 17,952
| 544
| 94
| 33
| 0.754157
| 0.011754
| 0
| 0.658915
| 0
| 0
| 0.040771
| 0
| 0
| 0
| 0
| 0
| 0.077519
| 1
| 0.103359
| false
| 0.010336
| 0.01292
| 0
| 0.134367
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
35349b8577f3085c779179bf350678a96b5840bf
| 64
|
py
|
Python
|
pitstop/backends/__init__.py
|
darvid/pitstop
|
517fd3631b437bc5591cea10aacbecde7d21e9ac
|
[
"MIT"
] | 4
|
2018-12-13T12:15:58.000Z
|
2021-01-17T09:26:09.000Z
|
pitstop/backends/__init__.py
|
darvid/pitstop
|
517fd3631b437bc5591cea10aacbecde7d21e9ac
|
[
"MIT"
] | null | null | null |
pitstop/backends/__init__.py
|
darvid/pitstop
|
517fd3631b437bc5591cea10aacbecde7d21e9ac
|
[
"MIT"
] | 1
|
2019-05-03T09:29:02.000Z
|
2019-05-03T09:29:02.000Z
|
"""Provides read-only interfaces to configuration backends."""
| 32
| 63
| 0.765625
| 7
| 64
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 64
| 1
| 64
| 64
| 0.859649
| 0.875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
353671a03b7f92abe65b3c7cefe7954accd870de
| 99
|
py
|
Python
|
actionlab/experiment/balance.py
|
danjgale/action-lab
|
ab4016b46dcd10d4c75712f7a5eef6e88ca69b04
|
[
"MIT"
] | null | null | null |
actionlab/experiment/balance.py
|
danjgale/action-lab
|
ab4016b46dcd10d4c75712f7a5eef6e88ca69b04
|
[
"MIT"
] | null | null | null |
actionlab/experiment/balance.py
|
danjgale/action-lab
|
ab4016b46dcd10d4c75712f7a5eef6e88ca69b04
|
[
"MIT"
] | null | null | null |
"""Module for counterbalancing and shuffling trial types, conditions,
experimental blocks, etc.
"""
| 33
| 69
| 0.787879
| 11
| 99
| 7.090909
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 99
| 3
| 70
| 33
| 0.886364
| 0.929293
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
104c839c97d6583b09dc59183a4e031f70802da2
| 120
|
py
|
Python
|
enthought/contexts/adapter/unit_converter_functions.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/contexts/adapter/unit_converter_functions.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/contexts/adapter/unit_converter_functions.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from codetools.contexts.adapter.unit_converter_functions import *
| 30
| 65
| 0.866667
| 15
| 120
| 6.466667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091667
| 120
| 3
| 66
| 40
| 0.889908
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
106f7adf84e4436de3b852034b2d2bf770a9ece7
| 159
|
py
|
Python
|
flask_stormpath/errors.py
|
lavyaKoli/stormpath-flask
|
ca79162302e34085bef774751288d2ac95479c0d
|
[
"Apache-2.0"
] | 99
|
2015-01-04T06:27:10.000Z
|
2021-07-27T11:06:15.000Z
|
flask_stormpath/errors.py
|
lavyaKoli/stormpath-flask
|
ca79162302e34085bef774751288d2ac95479c0d
|
[
"Apache-2.0"
] | 65
|
2015-01-05T17:34:27.000Z
|
2019-01-21T09:59:01.000Z
|
flask_stormpath/errors.py
|
lavyaKoli/stormpath-flask
|
ca79162302e34085bef774751288d2ac95479c0d
|
[
"Apache-2.0"
] | 37
|
2015-03-20T16:24:44.000Z
|
2020-10-01T16:12:30.000Z
|
"""Custom errors."""
class ConfigurationError(Exception):
"""
This exception is raised if a user has misconfigured Flask-Stormpath.
"""
pass
| 17.666667
| 73
| 0.666667
| 17
| 159
| 6.235294
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.220126
| 159
| 8
| 74
| 19.875
| 0.854839
| 0.528302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
107164b332bac9308752bb0a131dd912a81a7081
| 35
|
py
|
Python
|
ticdat/testing/__init__.py
|
adampkehoe/ticdat
|
bc9c9f6f407de0312f87e0e8aa8b247caa92ef3e
|
[
"BSD-2-Clause"
] | 15
|
2019-05-16T13:22:50.000Z
|
2022-02-18T08:07:10.000Z
|
ticdat/testing/__init__.py
|
adampkehoe/ticdat
|
bc9c9f6f407de0312f87e0e8aa8b247caa92ef3e
|
[
"BSD-2-Clause"
] | 86
|
2019-03-13T16:18:07.000Z
|
2022-02-07T22:13:15.000Z
|
ticdat/testing/__init__.py
|
adampkehoe/ticdat
|
bc9c9f6f407de0312f87e0e8aa8b247caa92ef3e
|
[
"BSD-2-Clause"
] | 9
|
2020-05-06T15:13:32.000Z
|
2022-01-26T15:30:44.000Z
|
"""
holds testing code utilites
"""
| 11.666667
| 27
| 0.685714
| 4
| 35
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 35
| 3
| 28
| 11.666667
| 0.8
| 0.771429
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
109beed58b8f90a0572aa828e13489fc3e178bc9
| 80
|
py
|
Python
|
Animal.py
|
juliaviolet/Google_IT_Python_Crash_Course
|
e48f37f41000bb7fa6cfca197a964b792125067f
|
[
"MIT"
] | null | null | null |
Animal.py
|
juliaviolet/Google_IT_Python_Crash_Course
|
e48f37f41000bb7fa6cfca197a964b792125067f
|
[
"MIT"
] | null | null | null |
Animal.py
|
juliaviolet/Google_IT_Python_Crash_Course
|
e48f37f41000bb7fa6cfca197a964b792125067f
|
[
"MIT"
] | null | null | null |
animal = "Hippopotamus"
print(animal[3:6])
print(animal[-5])
print(animal[10:])
| 16
| 23
| 0.7
| 12
| 80
| 4.666667
| 0.583333
| 0.589286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067568
| 0.075
| 80
| 4
| 24
| 20
| 0.689189
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.75
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
109c7fd3c15716c3ecee3cbdb28786d31cebd401
| 179
|
py
|
Python
|
encrypt_decrypt_fields/__init__.py
|
barseghyanartur/encrypt-decrypt-fields
|
4737883d7eea8baf3e96e982f8d7a17f0d4e4a6b
|
[
"MIT"
] | 9
|
2021-06-26T01:26:27.000Z
|
2022-01-01T13:56:56.000Z
|
encrypt_decrypt_fields/__init__.py
|
barseghyanartur/encrypt-decrypt-fields
|
4737883d7eea8baf3e96e982f8d7a17f0d4e4a6b
|
[
"MIT"
] | null | null | null |
encrypt_decrypt_fields/__init__.py
|
barseghyanartur/encrypt-decrypt-fields
|
4737883d7eea8baf3e96e982f8d7a17f0d4e4a6b
|
[
"MIT"
] | 3
|
2021-06-26T01:26:28.000Z
|
2021-12-14T23:11:03.000Z
|
from .crypto import Crypto # noqa
from .alchemy_fields import EncryptedAlchemyBinaryField # noqa
from .django_fields import EncryptedBinaryField # noqa
__version__ = '1.1.3'
| 29.833333
| 63
| 0.793296
| 21
| 179
| 6.47619
| 0.571429
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019608
| 0.145251
| 179
| 5
| 64
| 35.8
| 0.869281
| 0.078212
| 0
| 0
| 0
| 0
| 0.031056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
52c578d89fbb94731b16407075d37caf257a20ea
| 288
|
py
|
Python
|
rpcparse.py
|
eternal-flame-AD/zjzk_rpcrelay
|
dd3a16b330efd858f0d95049966ecd151230ef99
|
[
"Apache-2.0"
] | null | null | null |
rpcparse.py
|
eternal-flame-AD/zjzk_rpcrelay
|
dd3a16b330efd858f0d95049966ecd151230ef99
|
[
"Apache-2.0"
] | null | null | null |
rpcparse.py
|
eternal-flame-AD/zjzk_rpcrelay
|
dd3a16b330efd858f0d95049966ecd151230ef99
|
[
"Apache-2.0"
] | null | null | null |
def parsenum(s,num):
result=0
#num*=2
for i in range(0,num):
result=result*16+s[i]
return result
def getint(s):
return parsenum(s,4)
def getshort(s):
return parsenum(s,2)
def getstring(s):
return str(s)
def getbool(s):
return (s==1) or (s==b"\x01")
| 20.571429
| 33
| 0.597222
| 51
| 288
| 3.372549
| 0.45098
| 0.162791
| 0.174419
| 0.186047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.236111
| 288
| 14
| 33
| 20.571429
| 0.736364
| 0.020833
| 0
| 0
| 0
| 0
| 0.014184
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.384615
| false
| 0
| 0
| 0.307692
| 0.769231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
52ef0285866c3140b641cb100abab29cfded65cc
| 945
|
py
|
Python
|
horizon/test/django_pyscss_fix/__init__.py
|
hemantsonawane95/horizon-apelby
|
01a5e72219aeca8c1451701ee85e232ed0618751
|
[
"Apache-2.0"
] | 930
|
2015-01-04T08:06:03.000Z
|
2022-03-13T18:47:13.000Z
|
horizon/test/django_pyscss_fix/__init__.py
|
hemantsonawane95/horizon-apelby
|
01a5e72219aeca8c1451701ee85e232ed0618751
|
[
"Apache-2.0"
] | 26
|
2015-02-23T16:37:31.000Z
|
2020-07-02T08:37:41.000Z
|
horizon/test/django_pyscss_fix/__init__.py
|
hemantsonawane95/horizon-apelby
|
01a5e72219aeca8c1451701ee85e232ed0618751
|
[
"Apache-2.0"
] | 1,040
|
2015-01-01T18:48:28.000Z
|
2022-03-19T08:35:18.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import django
import six
import six.moves
# Temporary workaround for a situation that django-pyscss depends on
# a vendored version of six, django.utils.six which was dropped in Django 3.0.
# TODO(amotoki): Drop the workaround once django-pyscss supports Django 3.0+.
if django.VERSION[0] >= 3:
sys.modules['django.utils.six'] = six
sys.modules['django.utils.six.moves'] = six.moves
| 37.8
| 78
| 0.759788
| 151
| 945
| 4.754967
| 0.549669
| 0.083565
| 0.058496
| 0.044568
| 0.066852
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012594
| 0.159788
| 945
| 24
| 79
| 39.375
| 0.891688
| 0.779894
| 0
| 0
| 0
| 0
| 0.196891
| 0.11399
| 0
| 0
| 0
| 0.041667
| 0
| 1
| 0
| true
| 0
| 0.571429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
5e04c6696eb0ec53959d177633f07e3b6893b1be
| 542
|
py
|
Python
|
dateutil.py
|
chenjianlong/my-gist
|
7c6444c7d118fb6a1fec1c2fa1fe80451e94e509
|
[
"Unlicense"
] | null | null | null |
dateutil.py
|
chenjianlong/my-gist
|
7c6444c7d118fb6a1fec1c2fa1fe80451e94e509
|
[
"Unlicense"
] | null | null | null |
dateutil.py
|
chenjianlong/my-gist
|
7c6444c7d118fb6a1fec1c2fa1fe80451e94e509
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8:et
"""Date and Time util
"""
__author__ = ["Jianlong Chen <jianlong99@gmail.com>"]
__date__ = "2013-07-17"
import datetime
def year():
return datetime.datetime.strftime(datetime.datetime.now(), '%Y')
def date_time():
return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
def date():
return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
def hour():
return datetime.datetime.strftime(datetime.datetime.now(), '%H')
| 24.636364
| 83
| 0.669742
| 76
| 542
| 4.657895
| 0.473684
| 0.361582
| 0.248588
| 0.338983
| 0.573446
| 0.573446
| 0.573446
| 0.435028
| 0.293785
| 0.293785
| 0
| 0.025263
| 0.123616
| 542
| 21
| 84
| 25.809524
| 0.72
| 0.110701
| 0
| 0
| 0
| 0
| 0.165198
| 0.048458
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.090909
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5e1023e9abaef5d844dacf010c876f58b9057518
| 255
|
py
|
Python
|
src/custom/test.py
|
CheshireCaat/BAS.Python
|
a15f67c847733cbe88ae4b4850e6939b8686ae88
|
[
"MIT"
] | 4
|
2019-08-27T12:24:27.000Z
|
2021-06-01T05:24:06.000Z
|
src/custom/test.py
|
CheshireCaat/BAS.Python
|
a15f67c847733cbe88ae4b4850e6939b8686ae88
|
[
"MIT"
] | null | null | null |
src/custom/test.py
|
CheshireCaat/BAS.Python
|
a15f67c847733cbe88ae4b4850e6939b8686ae88
|
[
"MIT"
] | null | null | null |
def invoke(_bas_vars, _bas_api, _bas_print):
bas_print = _bas_print
bas_vars = _bas_vars
bas_api = _bas_api
from src.testa import testa as test_a
from src.testb import testb as test_b
bas_print(test_a())
bas_print(test_b())
| 21.25
| 44
| 0.701961
| 44
| 255
| 3.590909
| 0.340909
| 0.253165
| 0.189873
| 0.164557
| 0.35443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227451
| 255
| 11
| 45
| 23.181818
| 0.80203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.375
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
eab3a1d3d3b0d1d798ccf43a0f589d5650ba29f4
| 165
|
py
|
Python
|
untitled5.py
|
rahulneal/InterctiveMap
|
c81bc15dbe2b4aaedf46c0698f9772e38a60e843
|
[
"MIT"
] | null | null | null |
untitled5.py
|
rahulneal/InterctiveMap
|
c81bc15dbe2b4aaedf46c0698f9772e38a60e843
|
[
"MIT"
] | null | null | null |
untitled5.py
|
rahulneal/InterctiveMap
|
c81bc15dbe2b4aaedf46c0698f9772e38a60e843
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 01:14:13 2020
@author: ghanta
"""
import webbrowser
webbrowser.open('http://localhost:1000/CrimeReport/addtodb')
| 18.333333
| 61
| 0.690909
| 23
| 165
| 4.956522
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118056
| 0.127273
| 165
| 9
| 61
| 18.333333
| 0.673611
| 0.454545
| 0
| 0
| 0
| 0
| 0.493976
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
eab4b11429cd8affbb33aef88fe8199668b7107a
| 67
|
py
|
Python
|
rime/plugins/judge_system/__init__.py
|
matsu7874/rime
|
f6fd81a65dd4128d0385b0e1a11b7f7f03b342eb
|
[
"MIT"
] | 31
|
2017-02-20T05:04:06.000Z
|
2022-01-21T09:05:17.000Z
|
rime/plugins/judge_system/__init__.py
|
matsu7874/rime
|
f6fd81a65dd4128d0385b0e1a11b7f7f03b342eb
|
[
"MIT"
] | 62
|
2017-02-14T10:10:06.000Z
|
2021-05-17T00:00:01.000Z
|
rime/plugins/judge_system/__init__.py
|
matsu7874/rime
|
f6fd81a65dd4128d0385b0e1a11b7f7f03b342eb
|
[
"MIT"
] | 21
|
2017-02-13T16:41:42.000Z
|
2021-08-19T00:34:49.000Z
|
#!/usr/bin/python
"""Submodules for supporting judge systems.
"""
| 13.4
| 43
| 0.701493
| 8
| 67
| 5.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 67
| 4
| 44
| 16.75
| 0.79661
| 0.850746
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ead6b50c31b97afb22d854e061d42a9aa6c2fa49
| 120
|
py
|
Python
|
practice0/pattern_match.py
|
DeercoderPractice/python
|
4a32cc8922f47baea390e8167e34f185f67ae0fd
|
[
"MIT"
] | null | null | null |
practice0/pattern_match.py
|
DeercoderPractice/python
|
4a32cc8922f47baea390e8167e34f185f67ae0fd
|
[
"MIT"
] | null | null | null |
practice0/pattern_match.py
|
DeercoderPractice/python
|
4a32cc8922f47baea390e8167e34f185f67ae0fd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import re
match = re.match('Hello[ \t].(.*)world', 'Hello Python world')
print match.group(1)
| 20
| 65
| 0.65
| 19
| 120
| 4.105263
| 0.684211
| 0.179487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009709
| 0.141667
| 120
| 5
| 66
| 24
| 0.747573
| 0.166667
| 0
| 0
| 0
| 0
| 0.414141
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d808d35b692a866728c232df07f9479109a6e1c0
| 5,043
|
py
|
Python
|
tests/defxmlschema/test_chapter10.py
|
nimish/xsdata
|
7afe2781b66982428cc1731f53c065086acd35c1
|
[
"MIT"
] | null | null | null |
tests/defxmlschema/test_chapter10.py
|
nimish/xsdata
|
7afe2781b66982428cc1731f53c065086acd35c1
|
[
"MIT"
] | null | null | null |
tests/defxmlschema/test_chapter10.py
|
nimish/xsdata
|
7afe2781b66982428cc1731f53c065086acd35c1
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from click.testing import CliRunner
from tests.conftest import load_class
from tests.conftest import validate_bindings
from xsdata import cli
os.chdir(Path(__file__).parent.parent.parent)
def test_integration():
schema = Path("tests/fixtures/defxmlschema/chapter10/chapter10.xsd")
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [str(schema), "--package", package])
if result.exception:
raise result.exception
clazz = load_class(result.output, "Sizes")
validate_bindings(schema, clazz)
def test_example1001():
schema = "tests/fixtures/defxmlschema/chapter10/example1001.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1002():
schema = "tests/fixtures/defxmlschema/chapter10/example1002.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1003():
schema = "tests/fixtures/defxmlschema/chapter10/example1003.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1004():
schema = "tests/fixtures/defxmlschema/chapter10/example1004.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1005():
schema = "tests/fixtures/defxmlschema/chapter10/example1005.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1007():
schema = "tests/fixtures/defxmlschema/chapter10/example1007.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1009():
schema = "tests/fixtures/defxmlschema/chapter10/example1009.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1010():
schema = "tests/fixtures/defxmlschema/chapter10/example1010.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1012():
schema = "tests/fixtures/defxmlschema/chapter10/example1012.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1013():
schema = "tests/fixtures/defxmlschema/chapter10/example1013.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1014():
schema = "tests/fixtures/defxmlschema/chapter10/example1014.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1015():
schema = "tests/fixtures/defxmlschema/chapter10/example1015.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1016():
schema = "tests/fixtures/defxmlschema/chapter10/example1016.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1018():
schema = "tests/fixtures/defxmlschema/chapter10/example1018.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example10211():
schema = "tests/fixtures/defxmlschema/chapter10/example10211.xsd"
package = "tests.fixtures.defxmlschema.chapter10"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
| 28.817143
| 72
| 0.707515
| 526
| 5,043
| 6.737643
| 0.10076
| 0.117381
| 0.225734
| 0.306998
| 0.815463
| 0.646163
| 0.646163
| 0.646163
| 0.646163
| 0.630079
| 0
| 0.045095
| 0.17331
| 5,043
| 174
| 73
| 28.982759
| 0.804989
| 0
| 0
| 0.652893
| 0
| 0
| 0.314892
| 0.285346
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132231
| false
| 0
| 0.049587
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d82a8e0b5b960274f285fc74a6fb713f8dff3127
| 241
|
py
|
Python
|
tests/test.py
|
palewire/nws-wwa
|
0c9f65443a3b98febbb2d3ec212d5b83beb7cfe5
|
[
"MIT"
] | null | null | null |
tests/test.py
|
palewire/nws-wwa
|
0c9f65443a3b98febbb2d3ec212d5b83beb7cfe5
|
[
"MIT"
] | 4
|
2021-10-04T23:30:31.000Z
|
2021-10-04T23:32:07.000Z
|
tests/test.py
|
palewire/nws-wwa
|
0c9f65443a3b98febbb2d3ec212d5b83beb7cfe5
|
[
"MIT"
] | null | null | null |
import pytest
from nws_wwa import get_all, get_hazards, get_warnings
@pytest.mark.vcr()
def test_all():
get_all()
@pytest.mark.vcr()
def test_hazards():
get_hazards()
@pytest.mark.vcr()
def test_warnings():
get_warnings()
| 12.684211
| 54
| 0.705394
| 36
| 241
| 4.444444
| 0.361111
| 0.1875
| 0.24375
| 0.3
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161826
| 241
| 18
| 55
| 13.388889
| 0.792079
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| true
| 0
| 0.181818
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
dc1e08ac28e02d91118ddf166da29b27f9487b68
| 13,031
|
py
|
Python
|
custom/icds_reports/tests/agg_tests/reports/test_bihar_api_mother_details.py
|
tobiasmcnulty/commcare-hq
|
234aa1fba98a96de1b625bbd70b2066fc877eed1
|
[
"BSD-3-Clause"
] | 1
|
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
custom/icds_reports/tests/agg_tests/reports/test_bihar_api_mother_details.py
|
tobiasmcnulty/commcare-hq
|
234aa1fba98a96de1b625bbd70b2066fc877eed1
|
[
"BSD-3-Clause"
] | null | null | null |
custom/icds_reports/tests/agg_tests/reports/test_bihar_api_mother_details.py
|
tobiasmcnulty/commcare-hq
|
234aa1fba98a96de1b625bbd70b2066fc877eed1
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from datetime import date
from django.test import TestCase
from mock import patch
from custom.icds_reports.reports.bihar_api import get_mother_details
from custom.icds_reports.tasks import update_bihar_api_table
@patch(
'custom.icds_reports.utils.aggregation_helpers.distributed.bihar_api_demographics.BiharApiDemographicsHelper.bihar_state_id',
'st1')
class BiharAPIMotherTest(TestCase):
def test_pregnant_mother_current_month_positive_ifa(self):
data, count = get_mother_details(
month=date(2017, 5, 1).strftime("%Y-%m-%d"),
state_id='st1',
last_ccs_case_id=''
)
for case in data:
if case['ccs_case_id'] == 'c7c368d4-e554-4e12-a62f-87403453a515':
ccs_case_details = case
break
self.assertEqual(
{
'add': datetime.date(2017, 6, 1),
'anc_1': None,
'anc_2': None,
'anc_3': None,
'anc_4': None,
'causes_for_ifa': None,
'ccs_case_id': 'c7c368d4-e554-4e12-a62f-87403453a515',
'edd': datetime.date(2017, 8, 31),
'hb': None,
'household_id': None,
'husband_id': None,
'husband_name': None,
'ifa_consumed_7_days': 4,
'is_pregnant': 1,
'last_preg_tt': None,
'last_preg_year': None,
'lmp': datetime.date(2016, 11, 24),
'married': None,
'maternal_complications': None,
'person_id': '3c9658d5-e522-4f2c-b636-3ed194d89cb1',
'preg_reg_date': datetime.date(2017, 4, 12),
'total_ifa_tablets_received': 10,
'tt_1': None,
'tt_2': None,
'tt_booster': None
},
ccs_case_details
)
def test_pregnant_mother_current_month_negative_ifa(self):
data, count = get_mother_details(
month=date(2017, 5, 1).strftime("%Y-%m-%d"),
state_id='st1',
last_ccs_case_id=''
)
for case in data:
if case['ccs_case_id'] == 'f3d9aeb7-d3f5-49d1-aa78-4f33aa5cee77':
ccs_case_details = case
break
self.assertEqual(
{
'add': datetime.date(2017, 6, 1),
'anc_1': None,
'anc_2': None,
'anc_3': None,
'anc_4': None,
'causes_for_ifa': None,
'ccs_case_id': 'f3d9aeb7-d3f5-49d1-aa78-4f33aa5cee77',
'edd': datetime.date(2017, 7, 9),
'hb': None,
'household_id': None,
'husband_id': None,
'husband_name': None,
'ifa_consumed_7_days': 4,
'is_pregnant': 1,
'last_preg_tt': None,
'last_preg_year': None,
'lmp': datetime.date(2016, 10, 2),
'married': None,
'maternal_complications': None,
'person_id': 'e94393fd-5007-48f3-95fb-fc1c7f6d3bce',
'preg_reg_date': datetime.date(2017, 4, 12),
'total_ifa_tablets_received': -10,
'tt_1': None,
'tt_2': None,
'tt_booster': None
},
ccs_case_details
)
def test_non_pregnant_mother_current_month_negative_ifa(self):
data, count = get_mother_details(
month=date(2017, 5, 1).strftime("%Y-%m-%d"),
state_id='st1',
last_ccs_case_id=''
)
for case in data:
if case['ccs_case_id'] == '10a53900-f65e-46b7-ae0c-f32a208c0677':
ccs_case_details = case
break
self.assertEqual(
{
'add': datetime.date(2017, 3, 20),
'anc_1': None,
'anc_2': None,
'anc_3': None,
'anc_4': None,
'causes_for_ifa': None,
'ccs_case_id': '10a53900-f65e-46b7-ae0c-f32a208c0677',
'edd': datetime.date(2017, 8, 8),
'hb': None,
'household_id': None,
'husband_id': None,
'husband_name': None,
'ifa_consumed_7_days': 2,
'is_pregnant': None,
'last_preg_tt': None,
'last_preg_year': None,
'lmp': datetime.date(2016, 11, 1),
'married': None,
'maternal_complications': None,
'person_id': '177bdbb3-d4db-4077-9720-86a99b12ba25',
'preg_reg_date': None,
'total_ifa_tablets_received': -90,
'tt_1': None,
'tt_2': None,
'tt_booster': None
},
ccs_case_details
)
def test_pregnant_mother_past_month_negative_ifa(self):
data, count = get_mother_details(
month=date(2017, 5, 1).strftime("%Y-%m-%d"),
state_id='st1',
last_ccs_case_id=''
)
for case in data:
if case['ccs_case_id'] == '9bd35459-6a3c-43ad-a144-0c0013b4272e':
ccs_case_details = case
break
self.assertEqual(
{
'add': datetime.date(2017, 6, 1),
'anc_1': None,
'anc_2': None,
'anc_3': None,
'anc_4': None,
'causes_for_ifa': None,
'ccs_case_id': '9bd35459-6a3c-43ad-a144-0c0013b4272e',
'edd': datetime.date(2017, 8, 31),
'hb': 2,
'household_id': None,
'husband_id': None,
'husband_name': None,
'ifa_consumed_7_days': 4,
'is_pregnant': 1,
'last_preg_tt': None,
'last_preg_year': None,
'lmp': datetime.date(2016, 11, 24),
'married': None,
'maternal_complications': None,
'person_id': '8c5f8d3f-c6cd-4737-a994-2e494aeda05a',
'preg_reg_date': datetime.date(2017, 4, 12),
'total_ifa_tablets_received': -98,
'tt_1': None,
'tt_2': None,
'tt_booster': None
},
ccs_case_details
)
def test_non_pregnant_mother_past_month_negative_ifa(self):
data, count = get_mother_details(
month=date(2017, 5, 1).strftime("%Y-%m-%d"),
state_id='st1',
last_ccs_case_id=''
)
for case in data:
if case['ccs_case_id'] == 'a87c92ec-d5a6-4dae-a8a1-14e7abf31441':
ccs_case_details = case
break
self.assertEqual(
{
'add': datetime.date(2017, 5, 2),
'anc_1': None,
'anc_2': None,
'anc_3': None,
'anc_4': None,
'causes_for_ifa': None,
'ccs_case_id': 'a87c92ec-d5a6-4dae-a8a1-14e7abf31441',
'edd': datetime.date(2017, 7, 12),
'hb': None,
'household_id': None,
'husband_id': None,
'husband_name': None,
'ifa_consumed_7_days': 2,
'is_pregnant': None,
'last_preg_tt': None,
'last_preg_year': None,
'lmp': datetime.date(2016, 10, 5),
'married': None,
'maternal_complications': None,
'person_id': 'c013cab2-e924-43f6-9a2d-cff137774229',
'preg_reg_date': None,
'total_ifa_tablets_received': -99,
'tt_1': None,
'tt_2': None,
'tt_booster': None
},
ccs_case_details
)
def test_pregnant_mother_past_month_positive_ifa(self):
data, count = get_mother_details(
month=date(2017, 5, 1).strftime("%Y-%m-%d"),
state_id='st1',
last_ccs_case_id=''
)
for case in data:
if case['ccs_case_id'] == '08d215e7-81c7-4ad3-9c7d-1b27f0ed4bb5':
ccs_case_details = case
break
self.assertEqual(
{
"household_id": 'b6a55583-e07d-4367-ae5c-f3ff22f85271',
"person_id": "cc75916b-a71e-4c4d-a537-5c7bef95b12f",
"ccs_case_id": "08d215e7-81c7-4ad3-9c7d-1b27f0ed4bb5",
"married": 1,
"husband_name": "test_husband_name",
"husband_id": "b1e7f7d8-149e-4ffc-a876-2a70a469edbc",
"last_preg_year": 12,
"is_pregnant": 1,
"preg_reg_date": datetime.date(2017, 4, 12),
"tt_1": datetime.date(2017, 5, 1),
'tt_2': datetime.date(2017, 5, 2),
"tt_booster": datetime.date(2017, 5, 3),
"hb": 2,
"add": datetime.date(2017, 6, 1),
"last_preg_tt": None,
"lmp": datetime.date(2016, 10, 2),
"anc_1": datetime.date(2016, 10, 8),
"anc_2": datetime.date(2016, 11, 7),
"anc_3": datetime.date(2016, 12, 7),
"anc_4": datetime.date(2017, 1, 6),
"edd": datetime.date(2017, 7, 9),
"total_ifa_tablets_received": 10,
"ifa_consumed_7_days": 4,
"causes_for_ifa": "side_effects",
"maternal_complications": 'Discharge'
},
ccs_case_details
)
def test_non_pregnant_mother_past_month_positive_ifa(self):
data, count = get_mother_details(
month=date(2017, 5, 1).strftime("%Y-%m-%d"),
state_id='st1',
last_ccs_case_id=''
)
for case in data:
if case['ccs_case_id'] == 'f77d2181-5850-4675-9abe-2276acca4198':
ccs_case_details = case
break
self.assertEqual(
{
'add': datetime.date(2017, 6, 1),
'anc_1': None,
'anc_2': None,
'anc_3': None,
'anc_4': None,
'causes_for_ifa': None,
'ccs_case_id': 'f77d2181-5850-4675-9abe-2276acca4198',
'edd': datetime.date(2017, 7, 9),
'hb': None,
'household_id': None,
'husband_id': None,
'husband_name': None,
'ifa_consumed_7_days': 4,
'is_pregnant': 1,
'last_preg_tt': None,
'last_preg_year': None,
'lmp': datetime.date(2016, 10, 2),
'married': None,
'maternal_complications': None,
'person_id': 'ef12d6ce-a9af-4cf5-8459-0286432c5465',
'preg_reg_date': datetime.date(2017, 4, 12),
'total_ifa_tablets_received': None,
'tt_1': None,
'tt_2': None,
'tt_booster': None
},
ccs_case_details
)
def test_non_pregnant_mother_current_month_positive_ifa(self):
data, count = get_mother_details(
month=date(2017, 5, 1).strftime("%Y-%m-%d"),
state_id='st1',
last_ccs_case_id=''
)
for case in data:
if case['ccs_case_id'] == 'f491263a-4846-4ffd-a64d-a6653c03dd03':
ccs_case_details = case
break
self.assertEqual(
{
"household_id": None,
"person_id": "1d3e45d5-bd58-487c-9f93-7da2cf67c8d7",
"ccs_case_id": "f491263a-4846-4ffd-a64d-a6653c03dd03",
"married": None,
"husband_name": None,
"husband_id": None,
"last_preg_year": 14,
"is_pregnant": None,
"preg_reg_date": None,
"tt_1": datetime.date(2017, 5, 31),
'tt_2': datetime.date(2017, 6, 1),
"tt_booster": datetime.date(2017, 6, 2),
"hb": None,
"add": datetime.date(2017, 3, 20),
"last_preg_tt": None,
"lmp": datetime.date(2016, 11, 1),
"anc_1": datetime.date(2016, 11, 7),
"anc_2": datetime.date(2016, 12, 7),
"anc_3": datetime.date(2017, 1, 6),
"anc_4": datetime.date(2017, 2, 5),
"edd": datetime.date(2017, 8, 8),
"total_ifa_tablets_received": 100,
"ifa_consumed_7_days": 2,
"causes_for_ifa": "dont_remember",
"maternal_complications": "Discharge"
},
ccs_case_details
)
| 36.29805
| 129
| 0.476479
| 1,371
| 13,031
| 4.238512
| 0.143691
| 0.088797
| 0.082602
| 0.015488
| 0.838582
| 0.793667
| 0.738943
| 0.714679
| 0.593874
| 0.577009
| 0
| 0.122909
| 0.40373
| 13,031
| 358
| 130
| 36.399441
| 0.624968
| 0
| 0
| 0.60303
| 0
| 0
| 0.247487
| 0.110659
| 0
| 0
| 0
| 0
| 0.024242
| 1
| 0.024242
| false
| 0
| 0.018182
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
dc1ec478a120a2478735ace34d0d4d1cf70d39ea
| 998
|
py
|
Python
|
bronze/libs/slack/__init__.py
|
HumanCellAtlas/bronze-chime
|
4915813035b79f3d4f3add087d122880cd71b6ad
|
[
"BSD-3-Clause"
] | null | null | null |
bronze/libs/slack/__init__.py
|
HumanCellAtlas/bronze-chime
|
4915813035b79f3d4f3add087d122880cd71b6ad
|
[
"BSD-3-Clause"
] | null | null | null |
bronze/libs/slack/__init__.py
|
HumanCellAtlas/bronze-chime
|
4915813035b79f3d4f3add087d122880cd71b6ad
|
[
"BSD-3-Clause"
] | null | null | null |
class User:
"""TODO: potentially switch to slack official api client: https://github.com/slackapi/python-slackclient"""
def __init__(self, slack_id: str, slack_name: str = None, **kwargs):
self.slack_id, self.slack_name = slack_id, slack_name
for k, v in kwargs.items():
setattr(self, k, v)
def valid_name(self) -> bool:
return self.slack_name != ''
def valid_id(self) -> bool:
return len(self.slack_id) == 9 and self.slack_id.startswith('U')
@property
def tag(self) -> str:
return f"<@{self.slack_id}>"
def __str__(self):
return f"{self.slack_id}"
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash((self.slack_id, self.slack_name))
def __eq__(self, other) -> bool:
return self.slack_id == other.slack_id if isinstance(other, User) else False
def __json__(self):
return {'slack_name': self.slack_name, 'slack_id': self.slack_id}
| 30.242424
| 111
| 0.630261
| 140
| 998
| 4.142857
| 0.357143
| 0.201724
| 0.17069
| 0.082759
| 0.201724
| 0.155172
| 0
| 0
| 0
| 0
| 0
| 0.001314
| 0.237475
| 998
| 32
| 112
| 31.1875
| 0.760841
| 0.101202
| 0
| 0
| 0
| 0
| 0.058361
| 0
| 0
| 0
| 0
| 0.03125
| 0
| 1
| 0.409091
| false
| 0
| 0
| 0.363636
| 0.818182
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
dc20c843551381a8f6b8f6ecdf0f5e3bb26254b5
| 452
|
py
|
Python
|
two_point_clustering/_nbdev.py
|
saitosmst/hetdex_cosmology
|
e0dc86f48e048f1ce2b046b6513a24a8809e55e3
|
[
"Apache-2.0"
] | null | null | null |
two_point_clustering/_nbdev.py
|
saitosmst/hetdex_cosmology
|
e0dc86f48e048f1ce2b046b6513a24a8809e55e3
|
[
"Apache-2.0"
] | null | null | null |
two_point_clustering/_nbdev.py
|
saitosmst/hetdex_cosmology
|
e0dc86f48e048f1ce2b046b6513a24a8809e55e3
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"Field_on_grid": "field_on_grid.ipynb",
"NTHREAD": "field_on_grid.ipynb",
"EPS": "field_on_grid.ipynb"}
modules = ["field_on_grid.py"]
doc_url = "https://{saitosmst}.github.io/two_point_clustering/"
git_url = "https://github.com/{saitosmst}/two_point_clustering/tree/{branch}/"
def custom_doc_links(name): return None
| 28.25
| 78
| 0.701327
| 64
| 452
| 4.5625
| 0.546875
| 0.119863
| 0.188356
| 0.164384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128319
| 452
| 15
| 79
| 30.133333
| 0.741117
| 0.079646
| 0
| 0
| 1
| 0
| 0.599034
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0.125
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.