hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c726a91b132ee60f058b25c0fba09ddeb748fb2
| 5,324
|
py
|
Python
|
tests/rlax_dqn/test_experience_buffer.py
|
vsois/hanabi-agents
|
86a5bc16a07631ef8307f117b8831a681da1cc71
|
[
"MIT"
] | 1
|
2021-02-18T13:59:45.000Z
|
2021-02-18T13:59:45.000Z
|
tests/rlax_dqn/test_experience_buffer.py
|
vsois/hanabi-agents
|
86a5bc16a07631ef8307f117b8831a681da1cc71
|
[
"MIT"
] | null | null | null |
tests/rlax_dqn/test_experience_buffer.py
|
vsois/hanabi-agents
|
86a5bc16a07631ef8307f117b8831a681da1cc71
|
[
"MIT"
] | null | null | null |
import numpy as onp
from hanabi_agents.rlax_dqn.experience_buffer import ExperienceBuffer
def test_ctor():
obs_len = 5
lm_len = 3
reward_len = 1
capacity = 7
exp_buf = ExperienceBuffer(obs_len, lm_len, reward_len, capacity)
assert exp_buf._obs_tm1_buf.shape == (capacity, obs_len)
assert exp_buf._obs_t_buf.shape == (capacity, obs_len)
assert exp_buf._act_tm1_buf.shape == (capacity, 1)
assert exp_buf._lms_t_buf.shape == (capacity, lm_len)
assert exp_buf._rew_t_buf.shape == (capacity, 1)
assert exp_buf.capacity == capacity
assert exp_buf.size == 0
assert exp_buf.cur_idx == 0
def test_add_transition():
obs_len = 5
lm_len = 3
reward_len = 1
capacity = 7
exp_buf = ExperienceBuffer(obs_len, lm_len, reward_len, capacity)
trns_size = 4
obs1 = onp.random.randint(0, 2, (trns_size, obs_len))
obs2 = onp.random.randint(0, 2, (trns_size, obs_len))
while onp.all(obs1 == obs2):
obs2 = onp.random.randint(0, 2, (trns_size, obs_len))
assert not onp.all(obs1 == obs2)
acts = onp.random.randint(0, lm_len + 1, (trns_size, 1))
rew = onp.random.random((trns_size, 1))
lms = onp.random.randint(0, 2, (trns_size, lm_len))
term = onp.random.randint(0, 2, (trns_size, 1))
exp_buf.add_transitions(
obs1,
acts,
rew,
obs2,
lms,
term)
assert exp_buf.size == trns_size
assert exp_buf.cur_idx == trns_size
assert onp.all(exp_buf._obs_tm1_buf[:trns_size] == obs1)
assert onp.all(exp_buf._act_tm1_buf[:trns_size] == acts)
assert onp.all(exp_buf._rew_t_buf[:trns_size] == rew)
assert onp.all(exp_buf._obs_t_buf[:trns_size] == obs2)
assert onp.all(exp_buf._lms_t_buf[:trns_size] == lms)
assert onp.all(exp_buf._terminal_t_buf[:trns_size] == term)
def test_add_transition_fill_capacity():
obs_len = 5
lm_len = 3
reward_len = 1
capacity = 7
exp_buf = ExperienceBuffer(obs_len, lm_len, reward_len, capacity)
trns_size = capacity
obs1 = onp.random.randint(0, 2, (trns_size, obs_len))
obs2 = onp.random.randint(0, 2, (trns_size, obs_len))
while onp.all(obs1 == obs2):
obs2 = onp.random.randint(0, 2, (trns_size, obs_len))
assert not onp.all(obs1 == obs2)
acts = onp.random.randint(0, lm_len + 1, (trns_size, 1))
rew = onp.random.random((trns_size, 1))
lms = onp.random.randint(0, 2, (trns_size, lm_len))
term = onp.random.randint(0, 2, (trns_size, 1))
exp_buf.add_transitions(
obs1,
acts,
rew,
obs2,
lms,
term)
assert exp_buf.size == capacity
assert exp_buf.cur_idx == 0
assert onp.all(exp_buf._obs_tm1_buf == obs1)
assert onp.all(exp_buf._act_tm1_buf == acts)
assert onp.all(exp_buf._rew_t_buf == rew)
assert onp.all(exp_buf._obs_t_buf == obs2)
assert onp.all(exp_buf._lms_t_buf == lms)
assert onp.all(exp_buf._terminal_t_buf == term)
def test_add_transition_capacity_overflow():
obs_len = 5
lm_len = 3
reward_len = 1
capacity = 7
exp_buf = ExperienceBuffer(obs_len, lm_len, reward_len, capacity)
trns_size = capacity + 1
obs1 = onp.random.randint(0, 2, (trns_size, obs_len))
obs2 = onp.random.randint(0, 2, (trns_size, obs_len))
while onp.all(obs1 == obs2):
obs2 = onp.random.randint(0, 2, (trns_size, obs_len))
assert not onp.all(obs1 == obs2)
acts = onp.random.randint(0, lm_len + 1, (trns_size, 1))
rew = onp.random.random((trns_size, 1))
lms = onp.random.randint(0, 2, (trns_size, lm_len))
term = onp.random.randint(0, 2, (trns_size, 1))
exp_buf.add_transitions(
obs1,
acts,
rew,
obs2,
lms,
term)
assert exp_buf.size == capacity
assert exp_buf.cur_idx == 1
obs1[:1] = obs1[-1:]
acts[:1] = acts[-1:]
rew[:1] = rew[-1:]
obs2[:1] = obs2[-1:]
lms[:1] = lms[-1:]
term[:1] = term[-1:]
assert onp.all(exp_buf._obs_tm1_buf == obs1[:-1])
assert onp.all(exp_buf._act_tm1_buf == acts[:-1])
assert onp.all(exp_buf._rew_t_buf == rew[:-1])
assert onp.all(exp_buf._obs_t_buf == obs2[:-1])
assert onp.all(exp_buf._lms_t_buf == lms[:-1])
assert onp.all(exp_buf._terminal_t_buf == term[:-1])
def test_getter():
obs_len = 5
lm_len = 3
reward_len = 1
capacity = 7
exp_buf = ExperienceBuffer(obs_len, lm_len, reward_len, capacity)
trns_size = capacity
obs1 = onp.random.randint(0, 2, (trns_size, obs_len))
obs2 = onp.random.randint(0, 2, (trns_size, obs_len))
acts = onp.random.randint(0, lm_len + 1, (trns_size, 1))
rew = onp.random.random((trns_size, 1))
lms = onp.random.randint(0, 2, (trns_size, lm_len))
term = onp.random.randint(0, 2, (trns_size, 1))
exp_buf.add_transitions(
obs1,
acts,
rew,
obs2,
lms,
term)
indices = [1, 3, 6]
samples = exp_buf[indices]
assert onp.all(samples.observation_tm1 == obs1[indices])
assert onp.all(samples.action_tm1 == acts[indices])
assert onp.all(samples.reward_t == rew[indices])
assert onp.all(samples.observation_t == obs2[indices])
assert onp.all(samples.legal_moves_t == lms[indices])
assert onp.all(samples.terminal_t == term[indices])
| 29.910112
| 69
| 0.638054
| 844
| 5,324
| 3.75
| 0.074645
| 0.079621
| 0.090995
| 0.123539
| 0.865087
| 0.800316
| 0.769352
| 0.750395
| 0.720379
| 0.55861
| 0
| 0.038873
| 0.226897
| 5,324
| 177
| 70
| 30.079096
| 0.730078
| 0
| 0
| 0.643357
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.286713
| 1
| 0.034965
| false
| 0
| 0.013986
| 0
| 0.048951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0c81391a3826d75032ffdf87e3da72da8c79c88e
| 21
|
py
|
Python
|
plugins/snowflake/dbt/adapters/snowflake/__version__.py
|
alexells/dbt
|
9c58f3465bf9907a2b62942de548f80650cd6288
|
[
"Apache-2.0"
] | null | null | null |
plugins/snowflake/dbt/adapters/snowflake/__version__.py
|
alexells/dbt
|
9c58f3465bf9907a2b62942de548f80650cd6288
|
[
"Apache-2.0"
] | null | null | null |
plugins/snowflake/dbt/adapters/snowflake/__version__.py
|
alexells/dbt
|
9c58f3465bf9907a2b62942de548f80650cd6288
|
[
"Apache-2.0"
] | null | null | null |
version = '0.21.0a1'
| 10.5
| 20
| 0.619048
| 4
| 21
| 3.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.277778
| 0.142857
| 21
| 1
| 21
| 21
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0c848e7f982cd0f9f73ed4e8efa82df71c1e8867
| 223
|
py
|
Python
|
gym_simplifiedtetris/helpers/__init__.py
|
OliverOverend/gym-simplifiedtetristemp
|
832a0e99b52ec0c13bad1badc0dc1ba6453a6981
|
[
"MIT"
] | 3
|
2021-10-04T19:38:14.000Z
|
2022-03-15T09:15:09.000Z
|
gym_simplifiedtetris/helpers/__init__.py
|
OliverOverend/gym-simplifiedtetristemp
|
832a0e99b52ec0c13bad1badc0dc1ba6453a6981
|
[
"MIT"
] | 2
|
2021-10-05T18:19:29.000Z
|
2021-10-05T18:29:37.000Z
|
gym_simplifiedtetris/helpers/__init__.py
|
OliverOverend/gym-simplifiedtetristemp
|
832a0e99b52ec0c13bad1badc0dc1ba6453a6981
|
[
"MIT"
] | 3
|
2021-11-19T20:50:07.000Z
|
2022-03-24T16:37:37.000Z
|
"""Initialise the helpers package."""
from gym_simplifiedtetris.helpers.eval_agent import eval_agent
from gym_simplifiedtetris.helpers.train_q_learning import train_q_learning
__all__ = ["eval_agent", "train_q_learning"]
| 31.857143
| 74
| 0.834081
| 30
| 223
| 5.7
| 0.466667
| 0.157895
| 0.245614
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080717
| 223
| 6
| 75
| 37.166667
| 0.834146
| 0.139013
| 0
| 0
| 0
| 0
| 0.139785
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0c9a3427cadbc1e2c9a59aa25623e098eb4c2d58
| 1,052
|
py
|
Python
|
python/anyascii/_data/_08a.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_08a.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_08a.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
b='Yan Yan Ding Fu Qiu Qiu Jiao Hong Ji Fan Xun Diao Hong Chai Tao Xu Jie Yi Ren Xun Yin Shan Qi Tuo Ji Xun Yin E Fen Ya Yao Song Shen Yin Xin Jue Xiao Ne Chen You Zhi Xiong Fang Xin Chao She Yan Sa Zhun Xu Yi Yi Su Chi He Shen He Xu Zhen Zhu Zheng Gou Zi Zi Zhan Gu Fu Jian Die Ling Di Yang Li Nao Pan Zhou Gan Yi Ju Yao Zha Yi Yi Qu Zhao Ping Bi Xiong Qu Ba Da Zu Tao Zhu Ci Zhe Yong Xu Xun Yi Huang He Shi Cha Xiao Shi Hen Cha Gou Gui Quan Hui Jie Hua Gai Xiang Wei Shen Zhou Tong Mi Zhan Ming E Hui Yan Xiong Gua Er Bing Tiao Yi Lei Zhu Kuang Kua Wu Yu Teng Ji Zhi Ren Cu Lang E Kuang Ei Shi Ting Dan Bei Chan You Keng Qiao Qin Shua An Yu Xiao Cheng Jie Xian Wu Wu Gao Song Bu Hui Jing Shuo Zhen Shuo Du Hua Chang Shui Jie Ke Qu Cong Xiao Sui Wang Xian Fei Chi Ta Yi Ni Yin Diao Pi Zhuo Chan Chen Zhun Ji Qi Tan Zhui Wei Ju Qing Dong Zheng Ze Zou Qian Zhuo Liang Jian Chu Hao Lun Shen Biao Hua Pian Yu Die Xu Pian Shi Xuan Shi Hun Hua E Zhong Di Xie Fu Pu Ting Jian Qi Yu Zi Zhuan Xi Hui Yin An Xian Nan Chen Feng Zhu Yang Yan Huang Xuan Ge Nuo Qi'
| 1,052
| 1,052
| 0.754753
| 257
| 1,052
| 3.089494
| 0.583658
| 0.015113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.242395
| 1,052
| 1
| 1,052
| 1,052
| 0.996236
| 0
| 0
| 0
| 0
| 1
| 0.995252
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4903f1f7f8bc6a5a6ba86d3c7f73f0a9b0e7735c
| 126
|
py
|
Python
|
server/test_api.py
|
SOLARMA/hackafake-backend
|
d2f2a7c144cde0446649cdde776cd1e05ccb4f85
|
[
"BSD-3-Clause"
] | 1
|
2021-11-05T11:52:43.000Z
|
2021-11-05T11:52:43.000Z
|
server/test_api.py
|
hackafake/hackafake-backend
|
d2f2a7c144cde0446649cdde776cd1e05ccb4f85
|
[
"BSD-3-Clause"
] | 30
|
2018-04-18T07:14:40.000Z
|
2022-01-10T07:39:24.000Z
|
server/test_api.py
|
SOLARMA/hackafake-backend
|
d2f2a7c144cde0446649cdde776cd1e05ccb4f85
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Blueprint
from flask_json import as_json
from random import randint
test_bp = Blueprint('test', __name__)
| 18
| 37
| 0.809524
| 19
| 126
| 5
| 0.578947
| 0.189474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 126
| 6
| 38
| 21
| 0.87963
| 0
| 0
| 0
| 0
| 0
| 0.032
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 5
|
4905d32c4c045508a4d92e9eb3c3db8ee456c441
| 43
|
py
|
Python
|
stats/models/__init__.py
|
48ix/stats
|
4b7ae032c4db3d7e01ee48e4af071d793753da1a
|
[
"MIT"
] | null | null | null |
stats/models/__init__.py
|
48ix/stats
|
4b7ae032c4db3d7e01ee48e4af071d793753da1a
|
[
"MIT"
] | null | null | null |
stats/models/__init__.py
|
48ix/stats
|
4b7ae032c4db3d7e01ee48e4af071d793753da1a
|
[
"MIT"
] | 1
|
2020-10-22T00:00:42.000Z
|
2020-10-22T00:00:42.000Z
|
"""Configuration & API Response Models."""
| 21.5
| 42
| 0.697674
| 4
| 43
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 43
| 1
| 43
| 43
| 0.789474
| 0.837209
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0b41288edef54f18a42c88fce43cba58a022ebb0
| 97
|
py
|
Python
|
orgdynamic/insertdatetime.py
|
Sinamore/orgextended
|
60f6ae8e347697a4ffad3f0b89889c31031de9d1
|
[
"MIT"
] | 120
|
2020-11-24T16:22:18.000Z
|
2022-03-26T08:25:52.000Z
|
orgdynamic/insertdatetime.py
|
Sinamore/orgextended
|
60f6ae8e347697a4ffad3f0b89889c31031de9d1
|
[
"MIT"
] | 50
|
2021-01-11T11:10:19.000Z
|
2022-03-14T13:33:10.000Z
|
orgdynamic/insertdatetime.py
|
Sinamore/orgextended
|
60f6ae8e347697a4ffad3f0b89889c31031de9d1
|
[
"MIT"
] | 8
|
2021-02-16T08:03:22.000Z
|
2022-02-11T12:22:24.000Z
|
import sublime
import datetime
def Execute(view, params):
return [str(datetime.datetime.now())]
| 19.4
| 38
| 0.773196
| 13
| 97
| 5.769231
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103093
| 97
| 5
| 38
| 19.4
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
0bb68df80cb81cbceb4e47f5eebe0fd577ddc966
| 110
|
py
|
Python
|
ptgaze/head_pose_estimation/__init__.py
|
YW-Ma/pytorch_mpiigaze_demo
|
9d097b34c92ea53b1510d830b0d3c535fa42f20b
|
[
"MIT"
] | 1
|
2021-11-04T01:54:56.000Z
|
2021-11-04T01:54:56.000Z
|
ptgaze/head_pose_estimation/__init__.py
|
YW-Ma/pytorch_mpiigaze_demo
|
9d097b34c92ea53b1510d830b0d3c535fa42f20b
|
[
"MIT"
] | null | null | null |
ptgaze/head_pose_estimation/__init__.py
|
YW-Ma/pytorch_mpiigaze_demo
|
9d097b34c92ea53b1510d830b0d3c535fa42f20b
|
[
"MIT"
] | null | null | null |
from .face_landmark_estimator import LandmarkEstimator
from .head_pose_normalizer import HeadPoseNormalizer
| 36.666667
| 55
| 0.890909
| 12
| 110
| 7.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 110
| 2
| 56
| 55
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f02d8dac5175536cf705ad744a8dd4c087f13d4c
| 33
|
py
|
Python
|
coworks/middleware/__init__.py
|
sidneyarcidiacono/coworks
|
7f51b83e8699ced991d16a5a43ad19e569b6e814
|
[
"MIT"
] | null | null | null |
coworks/middleware/__init__.py
|
sidneyarcidiacono/coworks
|
7f51b83e8699ced991d16a5a43ad19e569b6e814
|
[
"MIT"
] | null | null | null |
coworks/middleware/__init__.py
|
sidneyarcidiacono/coworks
|
7f51b83e8699ced991d16a5a43ad19e569b6e814
|
[
"MIT"
] | null | null | null |
from .xray import XRayMiddleware
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6518355ad2d6d19bb408959eb84d0ae6aba795dd
| 229
|
py
|
Python
|
src/python/serif/model/base_model.py
|
BBN-E/text-open
|
c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9
|
[
"Apache-2.0"
] | 2
|
2022-03-24T14:37:51.000Z
|
2022-03-24T19:56:45.000Z
|
src/python/serif/model/base_model.py
|
BBN-E/text-open
|
c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9
|
[
"Apache-2.0"
] | null | null | null |
src/python/serif/model/base_model.py
|
BBN-E/text-open
|
c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC
from abc import abstractmethod
class BaseModel(ABC):
def __init__(self,**kwargs):
pass
@abstractmethod
def process(self, serif_doc):
pass
def reload_model(self):
pass
| 17.615385
| 33
| 0.650655
| 28
| 229
| 5.107143
| 0.571429
| 0.097902
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275109
| 229
| 13
| 34
| 17.615385
| 0.861446
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0.3
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
6518fd0bb2c5ab7c54f5be577d99c4e0b951785d
| 41,988
|
py
|
Python
|
tests/test_all.py
|
biosimulations/biosimulations-modeldb
|
85b211c799d82e47a9d0424fd425958bab6add4c
|
[
"MIT"
] | null | null | null |
tests/test_all.py
|
biosimulations/biosimulations-modeldb
|
85b211c799d82e47a9d0424fd425958bab6add4c
|
[
"MIT"
] | 3
|
2022-02-28T14:15:04.000Z
|
2022-03-21T10:34:04.000Z
|
tests/test_all.py
|
biosimulations/biosimulations-modeldb
|
85b211c799d82e47a9d0424fd425958bab6add4c
|
[
"MIT"
] | null | null | null |
from biosimulations_modeldb import __main__
from biosimulations_modeldb._version import __version__
from biosimulations_modeldb.core import (
get_project_ids, get_project, get_paper_metadata,
get_metadata_for_project,
export_project_metadata_for_project_to_omex_metadata,
init_combine_archive_from_dir,
create_sedml_for_xpp_file,
build_combine_archive_for_project,
make_directories,
import_project, import_projects,
TAXA,
ARTICLE_FIGURES_COMBINE_ARCHIVE_SUBDIRECTORY,
)
from biosimulations_modeldb.config import get_config
from biosimulators_utils.combine.data_model import CombineArchiveContent, CombineArchiveContentFormat
from biosimulators_utils.combine.io import CombineArchiveReader
from biosimulators_utils.omex_meta.io import BiosimulationsOmexMetaReader
from biosimulators_utils.ref.data_model import JournalArticle
from biosimulators_utils.sedml.io import SedmlSimulationWriter, SedmlSimulationReader
from biosimulators_utils.sedml.validation import validate_doc
from unittest import mock
import Bio.Entrez
import biosimulations_modeldb.__main__
import capturer
import git
import os
import requests
import requests_cache
import shutil
import tempfile
import unittest
Bio.Entrez.email = 'biosimulations.daemon@gmail.com'
class MockCrossRefSessionResponse:
def raise_for_status(self):
pass
def json(self):
return {
'message': {
'title': [''],
'container-title': [''],
'volume': '',
'published': {
'date-parts': [
[
2021,
12,
31,
]
]
}
}
}
class MockCrossRefSession:
def get(self, url):
return MockCrossRefSessionResponse()
class MockS3Bucket:
def __init__(self, name):
pass
def upload_file(self, *args, **kwargs):
pass
class TestCase(unittest.TestCase):
def setUp(self):
self.case_dirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.case_dirname)
@classmethod
def setUpClass(cls):
cls.dirname = tempfile.mkdtemp()
git.Repo.init(cls.dirname)
cls.pkg_dirname = os.path.join(cls.dirname, 'biosimulations_modeldb')
os.mkdir(cls.pkg_dirname)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.dirname)
def test_get_project_ids(self):
config = get_config(
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
)
ids = get_project_ids(config, 'XPP')
self.assertIn(35358, ids)
self.assertGreater(len(ids), 100)
def test_get_paper_metadata(self):
config = get_config(
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
)
paper = {
'object_id': 5225,
}
get_paper_metadata(paper, config)
self.assertEqual(paper, {
'object_id': 5225,
'uris': {
'doi': None,
'pubmed': '8792224',
'url': None,
},
'citation': paper['citation'],
})
self.assertEqual(paper['citation'].authors, ['Pinsky PF', 'Rinzel J'])
self.assertEqual(paper['citation'].title, 'Intrinsic and network rhythmogenesis in a reduced Traub model for CA3 neurons')
self.assertEqual(paper['citation'].journal, 'J Comput Neurosci')
self.assertEqual(paper['citation'].volume, '1')
self.assertEqual(paper['citation'].issue, None)
self.assertEqual(paper['citation'].pages, '39-60')
self.assertEqual(paper['citation'].year, 1994)
def test_get_paper_metadata_without_pages(self):
config = get_config(
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
)
def json():
return {
'title': {'value': 'XYZ'},
'authors': {'value': [{'object_name': 'abc'}]},
'journal': {'value': 'def'},
'year': {'value': 2022},
}
def get(url):
return mock.Mock(
raise_for_status=lambda: None,
json=json,
)
config['source_session'] = mock.Mock(
get=get
)
paper = {
'object_id': 186225,
}
get_paper_metadata(paper, config)
self.assertEqual(paper, {
'object_id': 186225,
'uris': {
'doi': None,
'pubmed': None,
'url': None,
},
'citation': paper['citation'],
})
self.assertEqual(paper['citation'].authors, ['abc'])
self.assertEqual(paper['citation'].title, 'XYZ')
self.assertEqual(paper['citation'].journal, 'def')
self.assertEqual(paper['citation'].volume, None)
self.assertEqual(paper['citation'].issue, None)
self.assertEqual(paper['citation'].pages, None)
self.assertEqual(paper['citation'].year, 2022)
def test_get_paper_metadata_with_pages(self):
config = get_config(
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
)
def json():
return {
'title': {'value': 'XYZ'},
'authors': {'value': [{'object_name': 'abc'}]},
'journal': {'value': 'def'},
'volume': {'value': 'ghi'},
'first_page': {'value': '1'},
'last_page': {'value': '2'},
'year': {'value': 2022},
'doi': {'value': 'jkl'},
'pubmed_id': {'value': 'mno'},
'url': {'value': 'pqr'},
}
def get(url):
return mock.Mock(
raise_for_status=lambda: None,
json=json,
)
config['source_session'] = mock.Mock(
get=get
)
paper = {
'object_id': 186225,
}
get_paper_metadata(paper, config)
self.assertEqual(paper, {
'object_id': 186225,
'uris': {
'doi': 'jkl',
'pubmed': 'mno',
'url': 'pqr',
},
'citation': paper['citation'],
})
self.assertEqual(paper['citation'].authors, ['abc'])
self.assertEqual(paper['citation'].title, 'XYZ')
self.assertEqual(paper['citation'].journal, 'def')
self.assertEqual(paper['citation'].volume, 'ghi')
self.assertEqual(paper['citation'].issue, None)
self.assertEqual(paper['citation'].pages, '1-2')
self.assertEqual(paper['citation'].year, 2022)
def test_get_project(self):
config = get_config(
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
)
project = get_project(35358, config)
self.assertEqual(project['id'], 35358)
self.assertEqual(project['name'], 'CA3 pyramidal cell: rhythmogenesis in a reduced Traub model (Pinsky, Rinzel 1994)')
self.assertEqual(project['created'], '2004-02-09T17:12:24')
def test_get_metadata_for_project(self):
base_config = get_config()
config = get_config(
base_dirname=self.pkg_dirname,
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
final_dirname=os.path.join(self.pkg_dirname, 'final'),
curators_filename=base_config['curators_filename'],
)
config['source_projects_dirname'] = base_config['source_projects_dirname']
if not os.path.isdir(config['source_thumbnails_dirname']):
os.makedirs(config['source_thumbnails_dirname'])
if not os.path.isdir(os.path.join(config['final_metadata_dirname'])):
os.makedirs(os.path.join(config['final_metadata_dirname']))
project = {
'id': 136097,
'species': {
'value': [{'object_name': 'Drosophila'}],
},
'region': {
'value': [{'object_name': 'Auditory brainstem'}],
},
'model_paper': {
'value': [{
'object_id': 136105,
'uris': {
'doi': '10.1093/chemse/bjp032',
'pubmed': None,
'url': None,
},
'citation': JournalArticle(),
}],
},
}
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '136097'), config)
self.assertTrue(description.startswith('This is the readme for the model associated with the AChems abstract'))
self.assertEqual(taxa, [{'uri': 'http://identifiers.org/taxonomy:7215', 'label': 'Drosophila'}])
self.assertEqual(len(references), 1)
self.assertEqual(references[0]['uri'], 'http://identifiers.org/doi:10.1093/chemse/bjp032')
self.assertTrue(references[0]['label'].startswith('Abstracts from the Thirty-first Annual Meeting'))
self.assertEqual(thumbnails, [])
# read from cache
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '136097'), config)
self.assertTrue(description.startswith('This is the readme for the model associated with the AChems abstract'))
self.assertEqual(taxa, [{'uri': 'http://identifiers.org/taxonomy:7215', 'label': 'Drosophila'}])
self.assertEqual(len(references), 1)
self.assertEqual(references[0]['uri'], 'http://identifiers.org/doi:10.1093/chemse/bjp032')
self.assertTrue(references[0]['label'].startswith('Abstracts from the Thirty-first Annual Meeting'))
self.assertEqual(thumbnails, [])
def test_get_metadata_for_project_no_description(self):
base_config = get_config()
config = get_config(
base_dirname=self.pkg_dirname,
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
final_dirname=os.path.join(self.pkg_dirname, 'final'),
curators_filename=base_config['curators_filename'],
)
config['source_projects_dirname'] = base_config['source_projects_dirname']
if not os.path.isdir(config['source_thumbnails_dirname']):
os.makedirs(config['source_thumbnails_dirname'])
if not os.path.isdir(os.path.join(config['final_metadata_dirname'])):
os.makedirs(os.path.join(config['final_metadata_dirname']))
project = {
'id': 58712,
'region': {
'value': [{'object_name': 'Drosophila'}],
}
}
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '58712'), config)
self.assertEqual(description, None)
self.assertEqual(taxa, [{'uri': 'http://identifiers.org/taxonomy:7215', 'label': 'Drosophila'}])
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '58712'), config)
self.assertEqual(description, None)
self.assertEqual(taxa, [{'uri': 'http://identifiers.org/taxonomy:7215', 'label': 'Drosophila'}])
def test_get_metadata_for_project_no_annotated_taxonomy(self):
base_config = get_config()
config = get_config(
base_dirname=self.pkg_dirname,
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
final_dirname=os.path.join(self.pkg_dirname, 'final'),
curators_filename=base_config['curators_filename'],
)
config['source_projects_dirname'] = base_config['source_projects_dirname']
if not os.path.isdir(config['source_thumbnails_dirname']):
os.makedirs(config['source_thumbnails_dirname'])
if not os.path.isdir(os.path.join(config['final_metadata_dirname'])):
os.makedirs(os.path.join(config['final_metadata_dirname']))
project = {
'id': 57910,
'species': {
'value': [{'object_name': 'Escherichia coli'}],
}
}
with self.assertRaisesRegex(ValueError, 'Taxonomy must be annotated'):
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '57910'), config)
def test_get_metadata_for_project_with_images(self):
base_config = get_config()
config = get_config(
base_dirname=self.pkg_dirname,
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
final_dirname=os.path.join(self.pkg_dirname, 'final'),
curators_filename=base_config['curators_filename'],
)
config['source_projects_dirname'] = base_config['source_projects_dirname']
if not os.path.isdir(config['source_thumbnails_dirname']):
os.makedirs(config['source_thumbnails_dirname'])
if not os.path.isdir(os.path.join(config['final_metadata_dirname'])):
os.makedirs(os.path.join(config['final_metadata_dirname']))
project = {
'id': 57910,
'model_paper': {
'value': [{
'object_id': 57915,
'uris': {
'doi': None,
'pubmed': '9192303',
'url': None,
},
'citation': JournalArticle(),
}],
},
}
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '57910'), config)
self.assertTrue(description.startswith('This is the readme.txt for the model associated with the paper'))
self.assertEqual(taxa, [])
self.assertEqual(len(references), 1)
self.assertEqual(references[0]['uri'], 'http://identifiers.org/doi:10.1111/j.1469-7793.1997.313bn.x')
self.assertTrue(references[0]['label'].startswith('Nicoletta Chiesa,'))
self.assertEqual(thumbnails, [{
'local_filename': os.path.join(config['source_projects_dirname'], '57910', 'samplerun.jpg'),
'archive_filename': 'samplerun.jpg',
'format': 'jpeg',
}])
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '57910'), config)
self.assertTrue(description.startswith('This is the readme.txt for the model associated with the paper'))
self.assertEqual(taxa, [])
self.assertEqual(len(references), 1)
self.assertEqual(references[0]['uri'], 'http://identifiers.org/doi:10.1111/j.1469-7793.1997.313bn.x')
self.assertTrue(references[0]['label'].startswith('Nicoletta Chiesa,'))
self.assertEqual(thumbnails, [{
'local_filename': os.path.normpath(os.path.join(config['source_projects_dirname'], '57910', 'samplerun.jpg')),
'archive_filename': 'samplerun.jpg',
'format': 'jpeg',
}])
def test_get_metadata_for_project_no_doi_pubmed(self):
base_config = get_config()
config = get_config(
base_dirname=self.pkg_dirname,
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
final_dirname=os.path.join(self.pkg_dirname, 'final'),
curators_filename=base_config['curators_filename'],
)
config['source_projects_dirname'] = base_config['source_projects_dirname']
if not os.path.isdir(config['source_thumbnails_dirname']):
os.makedirs(config['source_thumbnails_dirname'])
if not os.path.isdir(os.path.join(config['final_metadata_dirname'])):
os.makedirs(os.path.join(config['final_metadata_dirname']))
project = {
'id': 76879,
'model_paper': {
'value': [{
'object_id': 57915,
'uris': {
'doi': None,
'pubmed': None,
'url': 'http://example.com',
},
'citation': JournalArticle(title='Example'),
}],
},
}
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '76879'), config)
self.assertTrue(description.startswith('From Excitatory and Inhibitory Interactions in'))
self.assertEqual(taxa, [])
self.assertEqual(references, [{
'uri': 'http://example.com',
'label': 'Example.',
}])
self.assertEqual(thumbnails, [])
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '76879'), config)
self.assertTrue(description.startswith('From Excitatory and Inhibitory Interactions in'))
self.assertEqual(taxa, [])
self.assertEqual(references, [{
'uri': 'http://example.com',
'label': 'Example.',
}])
self.assertEqual(thumbnails, [])
def test_get_metadata_for_project_no_citations(self):
base_config = get_config()
config = get_config(
base_dirname=self.pkg_dirname,
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
final_dirname=os.path.join(self.pkg_dirname, 'final'),
curators_filename=base_config['curators_filename'],
)
config['source_projects_dirname'] = base_config['source_projects_dirname']
if not os.path.isdir(config['source_thumbnails_dirname']):
os.makedirs(config['source_thumbnails_dirname'])
if not os.path.isdir(os.path.join(config['final_metadata_dirname'])):
os.makedirs(os.path.join(config['final_metadata_dirname']))
project = {
'id': 45513,
}
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '45513'), config)
self.assertTrue(description.startswith('This is the readme.txt for the models associated with the paper'))
self.assertEqual(taxa, [])
self.assertEqual(references, [])
self.assertEqual(thumbnails, [])
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '45513'), config)
self.assertTrue(description.startswith('This is the readme.txt for the models associated with the paper'))
self.assertEqual(taxa, [])
self.assertEqual(references, [])
self.assertEqual(thumbnails, [])
def test_get_metadata_for_project_with_pmc_images(self):
base_config = get_config()
config = get_config(
base_dirname=self.pkg_dirname,
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
final_dirname=os.path.join(self.pkg_dirname, 'final'),
curators_filename=base_config['curators_filename'],
)
config['source_projects_dirname'] = base_config['source_projects_dirname']
if not os.path.isdir(config['source_thumbnails_dirname']):
os.makedirs(config['source_thumbnails_dirname'])
if not os.path.isdir(os.path.join(config['final_metadata_dirname'])):
os.makedirs(os.path.join(config['final_metadata_dirname']))
project = {
'id': 83558,
'model_paper': {
'value': [{
'object_id': 1234,
'uris': {
'doi': None,
'pubmed': '16965177',
'url': None,
},
'citation': JournalArticle(),
}],
},
}
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '83558'), config)
self.assertTrue(description.startswith('This is the readme for the model code associated with the publication:'))
self.assertEqual(taxa, [])
self.assertEqual(len(references), 1)
self.assertEqual(references[0]['uri'], 'http://identifiers.org/doi:10.1371/journal.pcbi.0020119')
self.assertTrue(references[0]['label'].startswith('Maria Lindskog,'))
self.assertEqual(thumbnails[0]['id'], 'pcbi.0020119/pcbi-0020119-g001')
self.assertEqual(thumbnails[0]['local_filename'], os.path.join(
config['source_thumbnails_dirname'], 'PMC1562452', 'PMC1562452', 'pcbi.0020119.g001.jpg'))
self.assertEqual(thumbnails[0]['archive_filename'], os.path.join(
ARTICLE_FIGURES_COMBINE_ARCHIVE_SUBDIRECTORY, 'PMC1562452', 'pcbi.0020119.g001.jpg'))
self.assertEqual(thumbnails[0]['format'], 'jpeg')
self.assertEqual(thumbnails[0]['label'], 'Figure 1')
self.assertTrue(thumbnails[0]['caption'].startswith('<title '))
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '83558'), config)
self.assertTrue(description.startswith('This is the readme for the model code associated with the publication:'))
self.assertEqual(taxa, [])
self.assertEqual(len(references), 1)
self.assertEqual(references[0]['uri'], 'http://identifiers.org/doi:10.1371/journal.pcbi.0020119')
self.assertTrue(references[0]['label'].startswith('Maria Lindskog,'))
self.assertEqual(thumbnails[0]['id'], 'pcbi.0020119/pcbi-0020119-g001')
self.assertEqual(thumbnails[0]['local_filename'], os.path.join(
config['source_thumbnails_dirname'], 'PMC1562452', 'PMC1562452', 'pcbi.0020119.g001.jpg'))
self.assertEqual(thumbnails[0]['archive_filename'], os.path.join(
ARTICLE_FIGURES_COMBINE_ARCHIVE_SUBDIRECTORY, 'PMC1562452', 'pcbi.0020119.g001.jpg'))
self.assertEqual(thumbnails[0]['format'], 'jpeg')
self.assertEqual(thumbnails[0]['label'], 'Figure 1')
self.assertTrue(thumbnails[0]['caption'].startswith('<title '))
def test_get_metadata_for_project_without_doi(self):
base_config = get_config()
config = get_config(
base_dirname=self.pkg_dirname,
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
final_dirname=os.path.join(self.pkg_dirname, 'final'),
curators_filename=base_config['curators_filename'],
)
config['source_projects_dirname'] = base_config['source_projects_dirname']
if not os.path.isdir(config['source_thumbnails_dirname']):
os.makedirs(config['source_thumbnails_dirname'])
if not os.path.isdir(os.path.join(config['final_metadata_dirname'])):
os.makedirs(os.path.join(config['final_metadata_dirname']))
project = {
'id': 64171,
'model_paper': {
'value': [{
'object_id': 55860,
'uris': {
'doi': None,
'pubmed': '15239590',
'url': None,
},
'citation': JournalArticle(),
}],
},
}
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '64171'), config)
self.assertTrue(description.startswith('This is the readme.txt for the models associated with the paper'))
self.assertEqual(len(references), 1)
self.assertEqual(references[0]['uri'], 'http://identifiers.org/pubmed:15239590')
self.assertTrue(references[0]['label'].startswith('Wu SN.'))
description, taxa, references, thumbnails = get_metadata_for_project(
project, os.path.join(config['source_projects_dirname'], '64171'), config)
self.assertTrue(description.startswith('This is the readme.txt for the models associated with the paper'))
self.assertEqual(len(references), 1)
self.assertEqual(references[0]['uri'], 'http://identifiers.org/pubmed:15239590')
self.assertTrue(references[0]['label'].startswith('Wu SN.'))
def test_export_project_metadata_for_project_to_omex_metadata(self):
base_config = get_config()
config = get_config(
base_dirname=self.pkg_dirname,
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
final_dirname=os.path.join(self.pkg_dirname, 'final'),
curators_filename=base_config['curators_filename'],
)
config['source_projects_dirname'] = base_config['source_projects_dirname']
if not os.path.isdir(config['source_thumbnails_dirname']):
os.makedirs(config['source_thumbnails_dirname'])
if not os.path.isdir(os.path.join(config['final_metadata_dirname'])):
os.makedirs(os.path.join(config['final_metadata_dirname']))
project = {
'id': 57910,
'name': 'abc',
'notes': {
'value': 'def',
},
'model_type': {
'value': [{
'object_id': '123',
'object_name': 'My model type',
}]
},
'region': {
'value': [
{
'object_id': '456',
'object_name': 'Generic',
},
{
'object_id': '789',
'object_name': 'My region (xyz)',
},
]
},
'other_type': {
'value': 'My other type',
},
'implemented_by': {
'value': [
{
'object_name': 'Jane Doe [email at domain]',
},
{
'object_name': 'Jack Doer',
},
],
},
'public_submitter_name': {
'value': 'John Doe'
},
'public_submitter_email': {
'value': 'email2@domain',
},
'created': '2022-01-01',
'ver_date': '2022-01-02',
}
description = 'ghi'
taxa = [{
'uri': 'http://identifiers.org/taxonomy:7215',
'label': 'Drosophila',
}]
references = [{
'uri': 'http://identifiers.org/doi:10.1093/chemse/bjp032',
'label': 'jkl',
}]
thumbnails = [{
'local_filename': os.path.join(config['source_projects_dirname'], '57910', 'samplerun.jpg'),
'archive_filename': './samplerun.jpg',
'format': 'jpeg',
}]
metadata_filename = os.path.join(self.case_dirname, 'metadata.rdf')
export_project_metadata_for_project_to_omex_metadata(project, description, taxa, references, thumbnails, metadata_filename, config)
metadata, errors, warnings = BiosimulationsOmexMetaReader().run(
metadata_filename, working_dir=os.path.join(config['source_projects_dirname'], '57910'))
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
expected_metadata = {
"uri": '.',
"combine_archive_uri": 'http://omex-library.org/57910.omex',
'title': 'abc',
'abstract': 'def',
'keywords': [],
'description': 'ghi',
'taxa': taxa,
'encodes': [
{
'uri': 'http://modeldb.science/ModelList?id=123',
'label': 'My model type',
},
{
'uri': None,
'label': 'My other type',
},
{
'uri': 'http://modeldb.science/ModelList?id=789',
'label': 'My region',
},
],
'thumbnails': [
thumbnail['archive_filename']
for thumbnail in thumbnails
],
'sources': [],
'predecessors': [],
'successors': [],
'see_also': [],
'creators': [
{
'uri': 'mailto:email@domain',
'label': 'Jane Doe',
},
{
'uri': None,
'label': 'Jack Doer',
},
],
'contributors': [
{
'uri': 'mailto:email2@domain',
'label': 'John Doe',
},
{
'uri': 'https://senselab.med.yale.edu/',
'label': 'Sense Lab at Yale University',
},
{
'uri': 'http://identifiers.org/orcid:0000-0002-2605-5080',
'label': 'Jonathan R. Karr',
},
],
'identifiers': [
{
'uri': 'https://identifiers.org/modeldb:57910',
'label': 'modeldb:57910',
},
],
'citations': references,
'references': [],
'license': None,
'funders': [],
'created': '2022-01-01',
'modified': [
'2022-01-02',
],
'other': [],
}
self.assertEqual(metadata, [expected_metadata])
with self.assertRaisesRegex(ValueError, 'metadata is not valid'):
with mock.patch.object(BiosimulationsOmexMetaReader, 'run', return_value=[None, [['My error']], []]):
export_project_metadata_for_project_to_omex_metadata(
project, description, taxa, references, thumbnails, metadata_filename, config)
def test_init_combine_archive_from_dir(self):
config = get_config()
dirname = os.path.join(config['source_projects_dirname'], '57910')
init_combine_archive_from_dir(dirname)
with open(os.path.join(self.case_dirname, 'test1.unknown'), 'w') as file:
file.write('here')
with open(os.path.join(self.case_dirname, 'test2.xml'), 'w') as file:
file.write('<sbml xmlns="http://www.sbml.org/sbml/"></sbml>')
with open(os.path.join(self.case_dirname, 'test3.xml'), 'w') as file:
file.write('<nml xmlns="http://morphml.org/neuroml/schema"></nml>')
with open(os.path.join(self.case_dirname, 'test4.xml'), 'w') as file:
file.write('<xml></xml>')
with open(os.path.join(self.case_dirname, 'test5.xml'), 'w') as file:
file.write('<xml></xml')
with open(os.path.join(self.case_dirname, 'test6.jpg'), 'w') as file:
file.write('here')
with open(os.path.join(self.case_dirname, 'desktop.ini'), 'w') as file:
file.write('here')
with self.assertWarnsRegex(UserWarning, 'is not known'):
archive = init_combine_archive_from_dir(self.case_dirname)
archive.contents.sort(key=lambda content: content.location)
self.assertEqual(len(archive.contents), 5)
self.assertEqual(archive.contents[0].location, 'test1.unknown')
self.assertEqual(archive.contents[0].format, CombineArchiveContentFormat.OTHER.value)
self.assertEqual(archive.contents[1].location, 'test2.xml')
self.assertEqual(archive.contents[1].format, CombineArchiveContentFormat.SBML.value)
self.assertEqual(archive.contents[2].location, 'test3.xml')
self.assertEqual(archive.contents[2].format, CombineArchiveContentFormat.NeuroML.value)
self.assertEqual(archive.contents[3].location, 'test4.xml')
self.assertEqual(archive.contents[3].format, CombineArchiveContentFormat.XML.value)
self.assertEqual(archive.contents[4].location, 'test5.xml')
self.assertEqual(archive.contents[4].format, CombineArchiveContentFormat.OTHER.value)
def test_create_sedml_for_xpp_file(self):
config = get_config()
dirname = os.path.join(config['source_projects_dirname'], '35358')
sed_doc = create_sedml_for_xpp_file(35358, dirname, 'booth_bose.ode')
sedml_filename = os.path.join(dirname, '_test_.sedml')
SedmlSimulationWriter().run(sed_doc, sedml_filename)
sed_doc_2 = SedmlSimulationReader().run(sedml_filename)
os.remove(sedml_filename)
# SedmlSimulationWriter().run(sed_doc, 'test.sedml',
# validate_semantics=False,
# validate_models_with_languages=False,
# validate_targets_with_model_sources=False)
errors, warnings = validate_doc(sed_doc, dirname)
self.assertEqual(errors, [])
def test_create_sedml_for_xpp_file_with_set_file(self):
config = get_config()
dirname = os.path.join(config['source_projects_dirname'], '116867')
sed_doc = create_sedml_for_xpp_file(116867, dirname, 'rubin_terman_pd.ode')
sedml_filename = os.path.join(dirname, '_test_.sedml')
SedmlSimulationWriter().run(sed_doc, sedml_filename)
sed_doc_2 = SedmlSimulationReader().run(sedml_filename)
os.remove(sedml_filename)
# SedmlSimulationWriter().run(sed_doc, 'test.sedml',
# validate_semantics=False,
# validate_models_with_languages=False,
# validate_targets_with_model_sources=False)
errors, warnings = validate_doc(sed_doc, dirname)
self.assertEqual(errors, [])
def test_build_combine_archive_for_project(self):
config = get_config()
id = 57910
source_project_dirname = os.path.join(config['source_projects_dirname'], '57910')
final_project_dirname = os.path.join(self.case_dirname, 'archive')
archive_filename = os.path.join(self.case_dirname, 'archive.omex')
extra_contents = {
os.path.join(config['source_projects_dirname'], '57910', 'samplerun.jpg'): CombineArchiveContent(
location='samplerun.jpg',
format=CombineArchiveContentFormat.JPEG.value,
),
'MANIFEST.in': CombineArchiveContent(
location='MANIFEST.in',
format=CombineArchiveContentFormat.TEXT.value,
),
}
build_combine_archive_for_project(id, source_project_dirname, final_project_dirname, archive_filename, extra_contents)
archive_dirname = os.path.join(self.case_dirname, 'unpacked')
archive = CombineArchiveReader().run(archive_filename, archive_dirname)
def test_build_combine_archive_for_project_extra_content_in_sub_dir(self):
config = get_config()
id = 57910
source_project_dirname = os.path.join(config['source_projects_dirname'], '57910')
final_project_dirname = os.path.join(self.case_dirname, 'archive')
archive_filename = os.path.join(self.case_dirname, 'archive.omex')
extra_contents = {
os.path.join(config['source_projects_dirname'], '57910', 'samplerun.jpg'): CombineArchiveContent(
location=os.path.join('subdir', 'samplerun.jpg'),
format=CombineArchiveContentFormat.JPEG.value,
),
'MANIFEST.in': CombineArchiveContent(
location='MANIFEST.in',
format=CombineArchiveContentFormat.TEXT.value,
),
}
build_combine_archive_for_project(id, source_project_dirname, final_project_dirname, archive_filename, extra_contents)
archive_dirname = os.path.join(self.case_dirname, 'unpacked')
archive = CombineArchiveReader().run(archive_filename, archive_dirname)
self.assertTrue(os.path.isfile(os.path.join(archive_dirname, 'subdir', 'samplerun.jpg')))
def test_import_project(self):
base_config = get_config()
config = get_config(
base_dirname=self.pkg_dirname,
source_dirname=os.path.join(self.pkg_dirname, 'source'),
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
final_dirname=os.path.join(self.pkg_dirname, 'final'),
curators_filename=base_config['curators_filename'],
issues_filename=base_config['issues_filename'],
status_filename=os.path.join(self.pkg_dirname, 'final', 'status.yml'),
max_projects=1,
bucket_name='bucket',
)
make_directories(config)
config['cross_ref_session'] = MockCrossRefSession()
project = get_project(57910, config)
auth = ''
with mock.patch('biosimulators_utils.biosimulations.utils.run_simulation_project', return_value='*' * 32):
with mock.patch('boto3.resource', return_value=mock.Mock(Bucket=MockS3Bucket)):
import_project(project, True, auth, config)
def test_import_projects(self):
base_config = get_config()
config = get_config(
base_dirname=self.pkg_dirname,
source_dirname=os.path.join(self.pkg_dirname, 'source'),
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
final_dirname=os.path.join(self.pkg_dirname, 'final'),
curators_filename=base_config['curators_filename'],
issues_filename=base_config['issues_filename'],
status_filename=os.path.join(self.pkg_dirname, 'final', 'status.yml'),
max_projects=1,
bucket_name='bucket',
)
config['cross_ref_session'] = MockCrossRefSession()
with mock.patch('biosimulators_utils.biosimulations.utils.run_simulation_project', return_value='*' * 32):
with mock.patch('biosimulators_utils.biosimulations.utils.get_authorization_for_client', return_value='xxx yyy'):
with mock.patch('boto3.resource', return_value=mock.Mock(Bucket=MockS3Bucket)):
import_projects(config)
def test_import_projects_dry_run(self):
base_config = get_config()
config = get_config(
base_dirname=self.pkg_dirname,
source_dirname=os.path.join(self.pkg_dirname, 'source'),
sessions_dirname=os.path.join(self.pkg_dirname, 'source'),
final_dirname=os.path.join(self.pkg_dirname, 'final'),
curators_filename=base_config['curators_filename'],
issues_filename=base_config['issues_filename'],
status_filename=os.path.join(self.pkg_dirname, 'final', 'status.yml'),
max_projects=1,
bucket_name='bucket',
)
config['cross_ref_session'] = MockCrossRefSession()
config['dry_run'] = True
config['simulate_projects'] = False
config['publish_projects'] = False
with mock.patch('biosimulators_utils.biosimulations.utils.get_authorization_for_client', return_value='xxx yyy'):
with mock.patch('boto3.resource', return_value=mock.Mock(Bucket=MockS3Bucket)):
import_projects(config)
def test_cli(self):
base_config = get_config()
with mock.patch.dict('os.environ', {
'BASE_DIRNAME': self.pkg_dirname,
'SOURCE_DIRNAME': os.path.join(self.pkg_dirname, 'source'),
'SESSIONS_DIRNAME': os.path.join(self.pkg_dirname, 'source'),
'FINAL_DIRNAME': os.path.join(self.pkg_dirname, 'final'),
'CURATORS_FILENAME': base_config['curators_filename'],
'ISSUES_FILENAME': base_config['issues_filename'],
'STATUS_FILENAME': os.path.join(self.pkg_dirname, 'final', 'status.yml'),
'BUCKET_NAME': 'bucket',
}):
def mock_get_config(**args):
config = get_config(**args)
config['cross_ref_session'] = MockCrossRefSession()
return config
with mock.patch('biosimulators_utils.biosimulations.utils.run_simulation_project', return_value='*' * 32):
with mock.patch('biosimulators_utils.biosimulations.utils.get_authorization_for_client', return_value='xxx yyy'):
with mock.patch('boto3.resource', return_value=mock.Mock(Bucket=MockS3Bucket)):
import biosimulations_modeldb.config
with mock.patch.object(biosimulations_modeldb.__main__, 'get_config', side_effect=mock_get_config):
with __main__.App(argv=[
'run-projects-and-publish',
'--max-projects', '1',
]) as app:
app.run()
def test_cli_help(self):
with mock.patch('sys.argv', ['', '--help']):
with self.assertRaises(SystemExit):
__main__.main()
def test_version(self):
with __main__.App(argv=['--version']) as app:
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with self.assertRaises(SystemExit) as cm:
app.run()
self.assertEqual(cm.exception.code, 0)
stdout = captured.stdout.get_text()
self.assertEqual(stdout, __version__)
self.assertEqual(captured.stderr.get_text(), '')
| 43.64657
| 139
| 0.587096
| 4,239
| 41,988
| 5.598962
| 0.101203
| 0.031853
| 0.044662
| 0.031263
| 0.782211
| 0.742732
| 0.730345
| 0.716988
| 0.707255
| 0.698028
| 0
| 0.026136
| 0.284677
| 41,988
| 961
| 140
| 43.691988
| 0.764075
| 0.011884
| 0
| 0.55711
| 0
| 0.002331
| 0.210757
| 0.062417
| 0
| 0
| 0
| 0
| 0.149184
| 1
| 0.045455
| false
| 0.003497
| 0.0338
| 0.006993
| 0.092075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
652816f11f719180a48274a87a9d126eb1b683bb
| 17
|
py
|
Python
|
Practicals/Steve/py_test.py
|
TonyJenkins/cfs2160-2019-python-public
|
3bf9f9a905dfeb9f5e664ef5b27905acb224f422
|
[
"Unlicense"
] | 12
|
2019-10-10T10:43:20.000Z
|
2020-01-25T12:42:25.000Z
|
Practicals/Steve/py_test.py
|
TonyJenkins/cfs2160-2019-python-public
|
3bf9f9a905dfeb9f5e664ef5b27905acb224f422
|
[
"Unlicense"
] | null | null | null |
Practicals/Steve/py_test.py
|
TonyJenkins/cfs2160-2019-python-public
|
3bf9f9a905dfeb9f5e664ef5b27905acb224f422
|
[
"Unlicense"
] | 6
|
2019-10-03T14:41:17.000Z
|
2019-12-07T10:59:33.000Z
|
print("GIT TEST")
| 17
| 17
| 0.705882
| 3
| 17
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 17
| 1
| 17
| 17
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
6539cbbadc6ac844afbbb24025ba63f75bfe45cd
| 179
|
py
|
Python
|
src/puzzle/__init__.py
|
hat27/puzzle
|
b96071dc90ec280b50aa0e9f39986e4ad5dac37a
|
[
"MIT"
] | 2
|
2017-12-23T15:15:21.000Z
|
2018-02-27T04:15:30.000Z
|
src/puzzle/__init__.py
|
hat27/puzzle
|
b96071dc90ec280b50aa0e9f39986e4ad5dac37a
|
[
"MIT"
] | null | null | null |
src/puzzle/__init__.py
|
hat27/puzzle
|
b96071dc90ec280b50aa0e9f39986e4ad5dac37a
|
[
"MIT"
] | null | null | null |
#-*- coding: utf8 -*-
__author__ = "Gou.Hattori"
__version__ = "0.0.6"
from . import pz_env
from . import pz_config
from . import PzLog
from . import Piece
from . import Puzzle
| 16.272727
| 26
| 0.698324
| 26
| 179
| 4.423077
| 0.615385
| 0.434783
| 0.208696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027211
| 0.178771
| 179
| 10
| 27
| 17.9
| 0.755102
| 0.111732
| 0
| 0
| 0
| 0
| 0.101266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.714286
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6545e588c6f8c13476f3e1fe251a168fb862a8a1
| 127
|
py
|
Python
|
template_demo/__main__.py
|
dls-controls/template_demo
|
1534f871f0427d96d7aef666107fc5a251444000
|
[
"Apache-2.0"
] | null | null | null |
template_demo/__main__.py
|
dls-controls/template_demo
|
1534f871f0427d96d7aef666107fc5a251444000
|
[
"Apache-2.0"
] | null | null | null |
template_demo/__main__.py
|
dls-controls/template_demo
|
1534f871f0427d96d7aef666107fc5a251444000
|
[
"Apache-2.0"
] | null | null | null |
from template_demo import cli
# test with:
# pipenv run python -m template_demo
if __name__ == "__main__":
cli.main()
| 18.142857
| 40
| 0.692913
| 18
| 127
| 4.333333
| 0.777778
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212598
| 127
| 6
| 41
| 21.166667
| 0.78
| 0.385827
| 0
| 0
| 0
| 0
| 0.106667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e8ea202df726649c91974b6c50c9cc50c6f2a0e7
| 127
|
py
|
Python
|
Introducao python/exercicios/ex046.py
|
Luis12368/python
|
23352d75ad13bcfd09ea85ab422fdc6ae1fcc5e7
|
[
"MIT"
] | null | null | null |
Introducao python/exercicios/ex046.py
|
Luis12368/python
|
23352d75ad13bcfd09ea85ab422fdc6ae1fcc5e7
|
[
"MIT"
] | null | null | null |
Introducao python/exercicios/ex046.py
|
Luis12368/python
|
23352d75ad13bcfd09ea85ab422fdc6ae1fcc5e7
|
[
"MIT"
] | null | null | null |
from time import sleep
from time import sleep
for c in range(10, -1, -1):
print(c)
sleep(0.5)
print('FELIZ ANO NOVO')
| 18.142857
| 27
| 0.653543
| 24
| 127
| 3.458333
| 0.666667
| 0.192771
| 0.337349
| 0.457831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.220472
| 127
| 7
| 28
| 18.142857
| 0.777778
| 0
| 0
| 0.333333
| 0
| 0
| 0.109375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
3311a2a2d201f24798d091f5acc49b88e07eab84
| 78
|
py
|
Python
|
dsp/__init__.py
|
shivarao101/dsp
|
09ca228eb1761ca9af36b810a8ac0f81ab7eba91
|
[
"MIT"
] | null | null | null |
dsp/__init__.py
|
shivarao101/dsp
|
09ca228eb1761ca9af36b810a8ac0f81ab7eba91
|
[
"MIT"
] | null | null | null |
dsp/__init__.py
|
shivarao101/dsp
|
09ca228eb1761ca9af36b810a8ac0f81ab7eba91
|
[
"MIT"
] | null | null | null |
from Addition import Addition
from basicdspalgorithm import basicdspalgorithm
| 39
| 47
| 0.897436
| 8
| 78
| 8.75
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 78
| 2
| 47
| 39
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
332d8523fb13b98cf21a1d2a2a001744ba74cb16
| 107
|
py
|
Python
|
ext/datawald_mage2agency/datawald_mage2agency/__init__.py
|
ideabosque/DataWald-AWS
|
3ea905a40526dad3cb0eff92167c1e4230aa4aa9
|
[
"MIT"
] | null | null | null |
ext/datawald_mage2agency/datawald_mage2agency/__init__.py
|
ideabosque/DataWald-AWS
|
3ea905a40526dad3cb0eff92167c1e4230aa4aa9
|
[
"MIT"
] | null | null | null |
ext/datawald_mage2agency/datawald_mage2agency/__init__.py
|
ideabosque/DataWald-AWS
|
3ea905a40526dad3cb0eff92167c1e4230aa4aa9
|
[
"MIT"
] | null | null | null |
__all__ = ["datawald_mage2agency"]
from .mage2agency import Mage2Agency
from .mage2agent import Mage2Agent
| 26.75
| 36
| 0.831776
| 11
| 107
| 7.636364
| 0.545455
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052083
| 0.102804
| 107
| 3
| 37
| 35.666667
| 0.822917
| 0
| 0
| 0
| 0
| 0
| 0.186916
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
334a9189319d570714eb458ee0b7dd41960aa733
| 145
|
py
|
Python
|
gsplines/basis/__init__.py
|
rafaelrojasmiliani/gsplines
|
663b10f6d53b498a1e892d9eb32a345153de36d2
|
[
"MIT"
] | 3
|
2021-08-28T01:42:40.000Z
|
2021-12-02T22:39:45.000Z
|
gsplines/basis/__init__.py
|
rafaelrojasmiliani/gsplines
|
663b10f6d53b498a1e892d9eb32a345153de36d2
|
[
"MIT"
] | null | null | null |
gsplines/basis/__init__.py
|
rafaelrojasmiliani/gsplines
|
663b10f6d53b498a1e892d9eb32a345153de36d2
|
[
"MIT"
] | null | null | null |
from .basis0010 import cBasis0010
from .basis1010 import cBasis1010
from .basis1000 import cBasis1000
from .basislagrange import cBasisLagrange
| 24.166667
| 41
| 0.855172
| 16
| 145
| 7.75
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 0.117241
| 145
| 5
| 42
| 29
| 0.78125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
686de6875a3a931363ddee27fb0e7a7572a53973
| 26
|
py
|
Python
|
fffit/tests/base_test.py
|
helpscott/fffit
|
22f83c3e804304398822bfdc335704cacab5efc5
|
[
"MIT"
] | null | null | null |
fffit/tests/base_test.py
|
helpscott/fffit
|
22f83c3e804304398822bfdc335704cacab5efc5
|
[
"MIT"
] | null | null | null |
fffit/tests/base_test.py
|
helpscott/fffit
|
22f83c3e804304398822bfdc335704cacab5efc5
|
[
"MIT"
] | 4
|
2021-05-13T19:51:08.000Z
|
2021-12-08T01:22:20.000Z
|
class BaseTest:
pass
| 6.5
| 15
| 0.653846
| 3
| 26
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.307692
| 26
| 3
| 16
| 8.666667
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
68714220d17acb2e9475094f27b4c9ac2f82ca5b
| 4,391
|
py
|
Python
|
uaa-python/app/database/article_db.py
|
suomitek/cubeai
|
cc4c0f5f445a552d239910da63944307c1f06e37
|
[
"Apache-2.0"
] | null | null | null |
uaa-python/app/database/article_db.py
|
suomitek/cubeai
|
cc4c0f5f445a552d239910da63944307c1f06e37
|
[
"Apache-2.0"
] | null | null | null |
uaa-python/app/database/article_db.py
|
suomitek/cubeai
|
cc4c0f5f445a552d239910da63944307c1f06e37
|
[
"Apache-2.0"
] | null | null | null |
from app.globals.globals import g
from app.domain.article import Article
from app.utils.pageable import gen_pageable
async def get_articles(where, pageable):
pageable = gen_pageable(pageable)
sql = 'SELECT * FROM article {} {}'.format(where, pageable)
sql_total_count = 'SELECT COUNT(*) FROM article {}'.format(where)
async with await g.db.pool.Connection() as conn:
async with conn.cursor() as cursor:
await cursor.execute(sql)
records = cursor.fetchall()
article_list = []
for record in records:
article = Article()
article.from_record(record)
article_list.append(article.__dict__)
await cursor.execute(sql_total_count)
total_count = cursor.fetchone()
return total_count[0], article_list
async def get_articles_by_uuid(uuid):
sql = 'SELECT * FROM article WHERE uuid = "{}" limit 1'.format(uuid)
async with await g.db.pool.Connection() as conn:
async with conn.cursor() as cursor:
await cursor.execute(sql)
records = cursor.fetchall()
article_list = []
for record in records:
article = Article()
article.from_record(record)
article_list.append(article.__dict__)
return article_list
async def get_article(id):
sql = 'SELECT * FROM article WHERE id = "{}" limit 1'.format(id)
async with await g.db.pool.Connection() as conn:
async with conn.cursor() as cursor:
await cursor.execute(sql)
records = cursor.fetchall()
article_list = []
for record in records:
article = Article()
article.from_record(record)
article_list.append(article.__dict__)
return article_list[0]
async def create_article(article):
sql = '''
INSERT INTO article (
uuid,
author_login,
author_name,
subject_1,
subject_2,
subject_3,
title,
summary,
tag_1,
tag_2,
tag_3,
picture_url,
content,
display_order,
created_date,
modified_date
) VALUES ( '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')
'''.format(
article.uuid,
article.authorLogin,
article.authorName,
article.subject1,
article.subject2,
article.subject3,
article.title,
article.summary,
article.tag1,
article.tag2,
article.tag3,
article.pictureUrl,
article.content,
article.displayOrder,
article.createdDate,
article.modifiedDate
)
async with await g.db.pool.Connection() as conn:
async with conn.cursor() as cursor:
await cursor.execute(sql)
await conn.commit()
async def update_article(article):
sql = '''
UPDATE article SET
uuid = '{}',
author_login = '{}',
author_name = '{}',
subject_1 = '{}',
subject_2 = '{}',
subject_3 = '{}',
title = '{}',
summary = '{}',
tag_1 = '{}',
tag_2 = '{}',
tag_3 = '{}',
picture_url = '{}',
content = '{}',
display_order = '{}',
created_date = '{}',
modified_date = '{}'
WHERE id = {}
'''.format(
article.uuid,
article.authorLogin,
article.authorName,
article.subject1,
article.subject2,
article.subject3,
article.title,
article.summary,
article.tag1,
article.tag2,
article.tag3,
article.pictureUrl,
article.content,
article.displayOrder,
article.createdDate,
article.modifiedDate,
article.id
)
async with await g.db.pool.Connection() as conn:
async with conn.cursor() as cursor:
await cursor.execute(sql)
await conn.commit()
async def delete_article(id):
sql = 'DELETE FROM article WHERE id = "{}"'.format(id)
async with await g.db.pool.Connection() as conn:
async with conn.cursor() as cursor:
await cursor.execute(sql)
await conn.commit()
| 27.616352
| 114
| 0.53974
| 441
| 4,391
| 5.229025
| 0.185941
| 0.046834
| 0.05464
| 0.063747
| 0.774935
| 0.738942
| 0.738942
| 0.738942
| 0.738942
| 0.738942
| 0
| 0.009692
| 0.342063
| 4,391
| 158
| 115
| 27.791139
| 0.788508
| 0
| 0
| 0.544776
| 0
| 0
| 0.278297
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022388
| 0
| 0.044776
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
688558631c3f21e329b1a10a009f7799c0302723
| 152
|
py
|
Python
|
organizations/admin.py
|
intherenzone/CRM
|
d0f3cffed01e5fddfc39c2281b26b2f376b71152
|
[
"MIT"
] | 2
|
2018-02-15T15:33:00.000Z
|
2018-02-15T16:29:12.000Z
|
organizations/admin.py
|
intherenzone/CRM
|
d0f3cffed01e5fddfc39c2281b26b2f376b71152
|
[
"MIT"
] | 1
|
2018-08-31T08:54:22.000Z
|
2018-08-31T08:54:22.000Z
|
organizations/admin.py
|
intherenzone/CRM
|
d0f3cffed01e5fddfc39c2281b26b2f376b71152
|
[
"MIT"
] | 12
|
2017-11-02T22:32:32.000Z
|
2018-04-12T05:13:25.000Z
|
from django.contrib import admin
# Register your models here.
from organizations.models import Organization
admin.site.register(Organization)
| 19
| 46
| 0.789474
| 18
| 152
| 6.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 152
| 7
| 47
| 21.714286
| 0.9375
| 0.171053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
68adadf00f180a57f63efb3f9a9f9223596b55d9
| 142
|
py
|
Python
|
pyazblob/errors.py
|
RobertoPrevato/PyAzBlob
|
3031d30ef029a3d49ee8eccc9b2732249548e2ff
|
[
"MIT"
] | 4
|
2017-08-09T08:03:50.000Z
|
2020-10-06T20:15:30.000Z
|
pyazblob/errors.py
|
RobertoPrevato/PyAzBlob
|
3031d30ef029a3d49ee8eccc9b2732249548e2ff
|
[
"MIT"
] | null | null | null |
pyazblob/errors.py
|
RobertoPrevato/PyAzBlob
|
3031d30ef029a3d49ee8eccc9b2732249548e2ff
|
[
"MIT"
] | 2
|
2018-12-18T06:13:16.000Z
|
2020-02-19T10:13:49.000Z
|
class ConfigurationError(Exception):
pass
class ApplicationError(Exception):
pass
class UploadFailure(ApplicationError):
pass
| 12.909091
| 38
| 0.760563
| 12
| 142
| 9
| 0.5
| 0.240741
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176056
| 142
| 10
| 39
| 14.2
| 0.923077
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
d79c27f412fc25919add065122c8e7d7661be02f
| 142
|
py
|
Python
|
backend/api/streamflow_api.py
|
jossM/streamflow
|
5c01db1439b25709c0a78a962b42142bfa692279
|
[
"Apache-2.0"
] | null | null | null |
backend/api/streamflow_api.py
|
jossM/streamflow
|
5c01db1439b25709c0a78a962b42142bfa692279
|
[
"Apache-2.0"
] | null | null | null |
backend/api/streamflow_api.py
|
jossM/streamflow
|
5c01db1439b25709c0a78a962b42142bfa692279
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
from fastapi import FastAPI
from api.tasks import add_tasks_resources
app = FastAPI()
# all routes
add_tasks_resources(app)
| 14.2
| 41
| 0.788732
| 22
| 142
| 4.909091
| 0.590909
| 0.148148
| 0.314815
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008197
| 0.140845
| 142
| 9
| 42
| 15.777778
| 0.877049
| 0.161972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0416d564be2ff3cc37d1278e1285d2a45ecd3639
| 45
|
py
|
Python
|
tests/unit/bivariate/__init__.py
|
pvk-developer/Copulas
|
4de54e946ecb1e2bf831874e6a00a7d04d302804
|
[
"MIT"
] | 71
|
2018-06-20T12:07:34.000Z
|
2020-01-03T21:43:01.000Z
|
tests/unit/bivariate/__init__.py
|
Hooddi/Copulas
|
86dc1304fe4ffb51302fc37801d7f18c4ab4d88d
|
[
"MIT"
] | 75
|
2018-06-20T09:46:07.000Z
|
2019-12-23T15:04:19.000Z
|
tests/unit/bivariate/__init__.py
|
Hooddi/Copulas
|
86dc1304fe4ffb51302fc37801d7f18c4ab4d88d
|
[
"MIT"
] | 25
|
2018-06-24T18:01:11.000Z
|
2020-01-02T14:30:09.000Z
|
"""Copulas bivariate unit testing module."""
| 22.5
| 44
| 0.733333
| 5
| 45
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 45
| 1
| 45
| 45
| 0.825
| 0.844444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f08e67ddfa07697c7452af1d6f00b386ef656e5b
| 283
|
py
|
Python
|
src/oscar_accounts/core.py
|
n8snyder/django-oscar-accounts
|
1d473f6ccf795989c7ced9356b4ce20c642debe0
|
[
"BSD-3-Clause"
] | 149
|
2015-01-09T18:36:57.000Z
|
2022-01-19T05:22:11.000Z
|
src/oscar_accounts/core.py
|
n8snyder/django-oscar-accounts
|
1d473f6ccf795989c7ced9356b4ce20c642debe0
|
[
"BSD-3-Clause"
] | 124
|
2015-01-21T05:27:40.000Z
|
2022-02-01T11:05:08.000Z
|
src/oscar_accounts/core.py
|
n8snyder/django-oscar-accounts
|
1d473f6ccf795989c7ced9356b4ce20c642debe0
|
[
"BSD-3-Clause"
] | 89
|
2015-01-10T08:14:14.000Z
|
2021-11-04T10:51:29.000Z
|
from oscar.core.loading import get_model
from oscar_accounts import names
Account = get_model('oscar_accounts', 'Account')
def redemptions_account():
return Account.objects.get(name=names.REDEMPTIONS)
def lapsed_account():
return Account.objects.get(name=names.LAPSED)
| 20.214286
| 54
| 0.780919
| 38
| 283
| 5.657895
| 0.421053
| 0.083721
| 0.186047
| 0.251163
| 0.362791
| 0.362791
| 0.362791
| 0
| 0
| 0
| 0
| 0
| 0.120141
| 283
| 13
| 55
| 21.769231
| 0.863454
| 0
| 0
| 0
| 0
| 0
| 0.074205
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.285714
| 0.285714
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
f0a9d6d837d611db77b32f5e0c18ba59616040be
| 57
|
py
|
Python
|
netlens/visualization/__init__.py
|
deepfx/netlens
|
5bce3cac60076c52974e0526aeaf36e2710fc352
|
[
"MIT"
] | 15
|
2020-01-20T16:15:11.000Z
|
2020-11-15T11:47:27.000Z
|
netlens/visualization/__init__.py
|
deepfx/netlens
|
5bce3cac60076c52974e0526aeaf36e2710fc352
|
[
"MIT"
] | null | null | null |
netlens/visualization/__init__.py
|
deepfx/netlens
|
5bce3cac60076c52974e0526aeaf36e2710fc352
|
[
"MIT"
] | null | null | null |
from .param import ImageParam
from .render import OptVis
| 19
| 29
| 0.824561
| 8
| 57
| 5.875
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140351
| 57
| 2
| 30
| 28.5
| 0.959184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f0c07744eac22946d1ad429741f26e7d34dfa712
| 131
|
py
|
Python
|
fastapi_crud/src/app/main.py
|
balapitchuka/fastapi_snippets
|
a5efc9bd3520412761131fcf4a3c9d11b1053ceb
|
[
"MIT"
] | null | null | null |
fastapi_crud/src/app/main.py
|
balapitchuka/fastapi_snippets
|
a5efc9bd3520412761131fcf4a3c9d11b1053ceb
|
[
"MIT"
] | null | null | null |
fastapi_crud/src/app/main.py
|
balapitchuka/fastapi_snippets
|
a5efc9bd3520412761131fcf4a3c9d11b1053ceb
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
app = FastAPI()
@app.get("/hello")
def hello():
return {"message" : "fastapi is up and running"}
| 16.375
| 52
| 0.664122
| 18
| 131
| 4.833333
| 0.722222
| 0.229885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183206
| 131
| 8
| 52
| 16.375
| 0.813084
| 0
| 0
| 0
| 0
| 0
| 0.287879
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
9bc5b730fb4b0c4f0c18ff636e7a072cb10953dd
| 30
|
py
|
Python
|
rosetta/tests/views.py
|
SergeyKubrak/django-rosetta
|
76e8387f8c838565adb8d6d0b6060c2b8c690436
|
[
"MIT"
] | 24
|
2016-08-06T18:10:54.000Z
|
2022-03-04T11:47:39.000Z
|
rosetta/tests/views.py
|
SergeyKubrak/django-rosetta
|
76e8387f8c838565adb8d6d0b6060c2b8c690436
|
[
"MIT"
] | 1
|
2017-03-28T02:36:50.000Z
|
2017-03-28T07:18:57.000Z
|
rosetta/tests/views.py
|
SergeyKubrak/django-rosetta
|
76e8387f8c838565adb8d6d0b6060c2b8c690436
|
[
"MIT"
] | 13
|
2017-03-28T02:35:32.000Z
|
2022-02-21T23:36:15.000Z
|
def dummy(request):
pass
| 7.5
| 19
| 0.633333
| 4
| 30
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 30
| 3
| 20
| 10
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
9bd5df0e0b3f6b4c2a6e9501cdd160e14a89c0b2
| 149
|
py
|
Python
|
myexman/__init__.py
|
Yif-Yang/simclr-pytorch
|
e962762016837d81a5a358407b552bad418ab162
|
[
"MIT"
] | 90
|
2020-12-10T14:07:16.000Z
|
2022-03-31T18:55:47.000Z
|
myexman/__init__.py
|
Yif-Yang/simclr-pytorch
|
e962762016837d81a5a358407b552bad418ab162
|
[
"MIT"
] | 9
|
2020-12-23T09:53:11.000Z
|
2022-01-28T12:47:49.000Z
|
myexman/__init__.py
|
Yif-Yang/simclr-pytorch
|
e962762016837d81a5a358407b552bad418ab162
|
[
"MIT"
] | 19
|
2021-01-03T13:35:54.000Z
|
2022-01-21T01:56:52.000Z
|
from .parser import (
ExParser,
simpleroot
)
from .index import (
Index
)
from . import index
from . import parser
__version__ = '0.0.2'
| 13.545455
| 21
| 0.66443
| 19
| 149
| 5
| 0.473684
| 0.231579
| 0.315789
| 0.442105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026549
| 0.241611
| 149
| 10
| 22
| 14.9
| 0.814159
| 0
| 0
| 0
| 0
| 0
| 0.033557
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9bdd82ba6d9db095c6234b0f9915023b5d572910
| 27
|
py
|
Python
|
tests/test_extensions/extension1.py
|
CraftSpider/SpiderTools
|
4bf155feec7cb983e8d283d93552902ec85178a2
|
[
"MIT"
] | 5
|
2019-10-14T13:50:02.000Z
|
2021-09-23T18:48:27.000Z
|
tests/test_extensions/extension1.py
|
CraftSpider/SpiderTools
|
4bf155feec7cb983e8d283d93552902ec85178a2
|
[
"MIT"
] | null | null | null |
tests/test_extensions/extension1.py
|
CraftSpider/SpiderTools
|
4bf155feec7cb983e8d283d93552902ec85178a2
|
[
"MIT"
] | null | null | null |
def setup(bot):
pass
| 5.4
| 15
| 0.555556
| 4
| 27
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 27
| 4
| 16
| 6.75
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
9beca84460505c1fced09e281c900f29916becd5
| 140
|
py
|
Python
|
ai_random.py
|
sun2125/class
|
e0058eb35dae903b04945ec9f329e9cbfcc48110
|
[
"MIT"
] | null | null | null |
ai_random.py
|
sun2125/class
|
e0058eb35dae903b04945ec9f329e9cbfcc48110
|
[
"MIT"
] | null | null | null |
ai_random.py
|
sun2125/class
|
e0058eb35dae903b04945ec9f329e9cbfcc48110
|
[
"MIT"
] | null | null | null |
def bet(game, round, funds, game_record, round_record):
import random
return random.randint(0, funds[0]) if round < 9 else funds[0]
| 35
| 65
| 0.707143
| 23
| 140
| 4.217391
| 0.608696
| 0.123711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034783
| 0.178571
| 140
| 3
| 66
| 46.666667
| 0.808696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
504495d7e9ca06307d0cd6bd72ba3d91caa6a1d3
| 131
|
py
|
Python
|
emoji/config.py
|
jdherg/octopus-holdings
|
0db5b9b4e4e0c10e03063128e3e034926e1c5a6f
|
[
"MIT"
] | 38
|
2015-07-11T00:03:10.000Z
|
2021-09-24T20:23:30.000Z
|
emoji/config.py
|
jdherg/octopus-holdings
|
0db5b9b4e4e0c10e03063128e3e034926e1c5a6f
|
[
"MIT"
] | 7
|
2016-02-11T21:50:10.000Z
|
2021-09-22T15:46:54.000Z
|
emoji/config.py
|
jdherg/octopus-holdings
|
0db5b9b4e4e0c10e03063128e3e034926e1c5a6f
|
[
"MIT"
] | 3
|
2016-09-26T02:40:53.000Z
|
2017-05-24T18:21:20.000Z
|
import json
import pathlib
with open(pathlib.Path(__file__).with_name("emoji_config.json")) as f:
EMOJI_CONFIG = json.load(f)
| 21.833333
| 70
| 0.755725
| 21
| 131
| 4.380952
| 0.619048
| 0.23913
| 0.326087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122137
| 131
| 5
| 71
| 26.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.129771
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
acd7ad9d1e49c326f16de93a726d83a2e34e2818
| 291
|
py
|
Python
|
sac/__init__.py
|
hbutsuak95/iv_rl
|
0f72a8f077a238237027ea96b7d1160c35ac9959
|
[
"MIT"
] | 9
|
2022-01-16T11:27:00.000Z
|
2022-03-13T14:04:48.000Z
|
sac/__init__.py
|
hbutsuak95/iv_rl
|
0f72a8f077a238237027ea96b7d1160c35ac9959
|
[
"MIT"
] | null | null | null |
sac/__init__.py
|
hbutsuak95/iv_rl
|
0f72a8f077a238237027ea96b7d1160c35ac9959
|
[
"MIT"
] | null | null | null |
from .sac import SACTrainer, VarSACTrainer
from .ensembleSAC import EnsembleSAC, VarEnsembleSAC
from .iv_sac import IV_EnsembleSAC, IV_VarEnsembleSAC, IV_VarSAC
from .sunrise import SunriseSAC, Sunrise_VarEnsembleSAC
from .uwac import UWACSAC, UWAC_VarEnsembleSAC
from .main import run_sac
| 36.375
| 64
| 0.852234
| 37
| 291
| 6.513514
| 0.405405
| 0.224066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106529
| 291
| 7
| 65
| 41.571429
| 0.926923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ace96b29847d94e0ad359533ffc8ada9431b620b
| 140
|
py
|
Python
|
modules/import_specific_attributes.py
|
magicalcarpet/the_complete_python_course
|
0ac0c5015a93607d7d29258ac0a3fc38dda81bd2
|
[
"MIT"
] | null | null | null |
modules/import_specific_attributes.py
|
magicalcarpet/the_complete_python_course
|
0ac0c5015a93607d7d29258ac0a3fc38dda81bd2
|
[
"MIT"
] | null | null | null |
modules/import_specific_attributes.py
|
magicalcarpet/the_complete_python_course
|
0ac0c5015a93607d7d29258ac0a3fc38dda81bd2
|
[
"MIT"
] | null | null | null |
from calculator import creator, add, subtract
from math import sqrt
print(creator)
print(add(2, 5))
print(subtract(10, 3))
print(sqrt(49))
| 17.5
| 45
| 0.75
| 23
| 140
| 4.565217
| 0.608696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056911
| 0.121429
| 140
| 7
| 46
| 20
| 0.796748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
acedab4b50c728019dba842d5d62c4836acfcfc5
| 2,536
|
py
|
Python
|
Comd/level.py
|
Hotkota/Am
|
da55c4a710e6c90577e1db1f93f107d171462959
|
[
"MIT"
] | 3
|
2020-08-18T04:45:36.000Z
|
2021-01-22T15:58:44.000Z
|
Comd/level.py
|
Hotkota/Am
|
da55c4a710e6c90577e1db1f93f107d171462959
|
[
"MIT"
] | null | null | null |
Comd/level.py
|
Hotkota/Am
|
da55c4a710e6c90577e1db1f93f107d171462959
|
[
"MIT"
] | null | null | null |
import sqlite3
import discord
from discord.ext import commands
class Level(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases = ["level", "lvl", "xp", "опыт"])
async def уровень(self, ctx, *, arg):
if arg != 18:
pass
else:
if member.bot:
await ctx.send("У ботов нет профиля")
else:
with sqlite3.connect("../am/data/DB/Database.db") as conn:
cursor = conn.cursor()
for row in cursor.execute(f"SELECT lvl, xp, name FROM users where id={arg}").fetchall():
emb = discord.Embed(title = f"Профиль {row[1]}",colour = discord.Color.red())
emb.description = f"Уровень: **{row[0]}**\nопыт: **{row[1]}**\nДо нового уровня: **{(5*row[0]**2+50*row[0]+100)-row[1]}**"
await ctx.send(embed = emb)
@commands.command(aliases = ["level", "lvl", "xp", "опыт"])
async def уровень(self, ctx, member: discord.Member):
if member.bot:
await ctx.send("У ботов нет профиля")
else:
with sqlite3.connect("../am/data/DB/Database.db") as conn:
cursor = conn.cursor()
for row in cursor.execute(f"SELECT lvl, xp FROM users where id={member.id}").fetchall():
emb = discord.Embed(title = f"Профиль {ctx.message.author.name}",colour = discord.Color.red())
emb.set_thumbnail(url = ctx.message.author.avatar_url)
emb.description = f"Уровень: **{row[0]}**\nопыт: **{row[1]}**\nДо нового уровня: **{(5*row[0]**2+50*row[0]+100)-row[1]}**"
await ctx.send(embed = emb)
@уровень.error
async def Level_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
with sqlite3.connect("../am/data/DB/Database.db") as conn:
cursor = conn.cursor()
for row in cursor.execute(f"SELECT lvl, xp FROM users where id={ctx.message.author.id}").fetchall():
emb = discord.Embed(title = f"Профиль {ctx.message.author.name}",colour = discord.Color.red())
emb.set_thumbnail(url = ctx.message.author.avatar_url)
emb.description = f"Уровень: **{row[0]}**\nопыт: **{row[1]}**\nДо нового уровня: **{(5*row[0]**2+50*row[0]+100)-row[1]}**"
await ctx.send(embed = emb)
def setup(client):
client.add_cog(Level(client))
| 50.72
| 146
| 0.554416
| 325
| 2,536
| 4.295385
| 0.255385
| 0.025788
| 0.04298
| 0.04298
| 0.770057
| 0.755014
| 0.755014
| 0.729226
| 0.729226
| 0.729226
| 0
| 0.023639
| 0.282729
| 2,536
| 50
| 147
| 50.72
| 0.743815
| 0
| 0
| 0.568182
| 0
| 0.068182
| 0.266456
| 0.106819
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0.022727
| 0.068182
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
acfa4cd8be4c7fb3a584bfa15fcebe72e1973b20
| 13,799
|
py
|
Python
|
networkapiclient/OptionPool.py
|
shildenbrand/GloboNetworkAPI-client-python
|
728ea9d13e3004e62586f5eb6ae2eae2bc41a50e
|
[
"Apache-2.0"
] | 16
|
2015-05-09T16:33:01.000Z
|
2019-10-24T19:06:03.000Z
|
networkapiclient/OptionPool.py
|
shildenbrand/GloboNetworkAPI-client-python
|
728ea9d13e3004e62586f5eb6ae2eae2bc41a50e
|
[
"Apache-2.0"
] | 3
|
2019-08-09T20:18:12.000Z
|
2019-11-11T17:23:48.000Z
|
networkapiclient/OptionPool.py
|
shildenbrand/GloboNetworkAPI-client-python
|
728ea9d13e3004e62586f5eb6ae2eae2bc41a50e
|
[
"Apache-2.0"
] | 15
|
2015-02-03T17:10:59.000Z
|
2021-05-14T21:01:37.000Z
|
# -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapiclient.exception import InvalidParameterError
from networkapiclient.utils import is_valid_int_param
from networkapiclient.ApiGenericClient import ApiGenericClient
class OptionPool(ApiGenericClient):
def __init__(self, networkapi_url, user, password, user_ldap=None):
"""Class constructor receives parameters to connect to the networkAPI.
:param networkapi_url: URL to access the network API.
:param user: User for authentication.
:param password: Password for authentication.
"""
super(
OptionPool,
self).__init__(
networkapi_url,
user,
password,
user_ldap)
def add(self, tipo_opcao, nome_opcao):
"""Inserts a new Option Pool and returns its identifier.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: Following dictionary:
::
{'id': < id > , 'type':<type>, 'name':<name>}
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
#optionpool_map = dict()
#optionpool_map['type'] = tipo_opcao
#optionpool_map['name'] = nome_opcao
url='api/pools/options/save/'
return self.post(url, {'type': tipo_opcao, "name":nome_opcao })
def modify(self, id_option_pool, tipo_opcao, nome_opcao):
"""Change Option Pool from by id.
:param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: None
:raise InvalidParameterError: Option Pool identifier is null or invalid.
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise optionpoolNotFoundError: Option pool not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_pool):
raise InvalidParameterError(
u'The identifier of Option Pool is invalid or was not informed.')
#optionpool_map = dict()
#optionpool_map['type'] = tipo_opcao
#optionpool_map['name'] = nome_opcao_txt
url = 'api/pools/options/' + str(id_option_pool) + '/'
return self.put(url,{'type': tipo_opcao, "name":nome_opcao } )
def remove(self, id_option_pool):
"""Remove Option pool by identifier and all Environment related .
:param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero.
:return: None
:raise InvalidParameterError: Option Pool identifier is null and invalid.
:raise optionpoolNotFoundError: Option Pool not registered.
:raise optionpoolError: Option Pool associated with Pool.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_pool):
raise InvalidParameterError(
u'The identifier of Option Pool is invalid or was not informed.')
url = 'api/pools/options/' + str(id_option_pool) + '/'
return self.delete(url)
def get_option_pool(self, id_option_pool):
"""Search Option Pool by id.
:param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero.
:return: Following dictionary:
::
{‘id’: < id_option_pool >,
‘type’: < tipo_opcao >,
‘name’: < nome_opcao_txt >}
:raise InvalidParameterError: Option Pool identifier is null and invalid.
:raise optionpoolNotFoundError: Option Pool not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_pool):
raise InvalidParameterError(
u'The identifier of Option Pool is invalid or was not informed.')
url = 'api/pools/options/' + str(id_option_pool) + '/'
return self.get(url)
def get_all_option_pool(self, option_type=None):
"""Get all Option Pool.
:return: Dictionary with the following structure:
::
{[{‘id’: < id >,
‘type’: < tipo_opcao >,
‘name’: < nome_opcao_txt >}, ... other option pool ...] }
:raise optionpoolNotFoundError: Option Pool not registered.
:raise DataBaseError: Can't connect to networkapi database.
:raise XMLError: Failed to generate the XML response.
"""
if option_type:
url = 'api/pools/options/?type='+option_type
else:
url = 'api/pools/options/'
return self.get(url)
def get_all_environment_option_pool(self, id_environment=None, option_id=None, option_type=None):
"""Get all Option VIP by Environment .
:return: Dictionary with the following structure:
::
{[{‘id’: < id >,
option: {
'id': <id>
'type':<type>
'name':<name> }
environment: {
'id':<id>
.... all environment info }
etc to option pools ...] }
:raise EnvironmentVipNotFoundError: Environment Pool not registered.
:raise DataBaseError: Can't connect to networkapi database.
:raise XMLError: Failed to generate the XML response.
"""
url='api/pools/environment_options/'
if id_environment:
if option_id:
if option_type:
url = url + "?environment_id=" + str(id_environment)+ "&option_id=" + str(option_id) + "&option_type=" + option_type
else:
url = url + "?environment_id=" + str(id_environment)+ "&option_id=" + str(option_id)
else:
if option_type:
url = url + "?environment_id=" + str(id_environment) + "&option_type=" + option_type
else:
url = url + "?environment_id=" + str(id_environment)
elif option_id:
if option_type:
url = url + "?option_id=" + str(option_id) + "&option_type=" + option_type
else:
url = url + "?option_id=" + str(option_id)
elif option_type:
url = url + "?option_type=" + option_type
return self.get(url)
def associate_environment_option_pool(self, id_option_pool, id_environment):
"""Create a relationship of optionpool with Environment.
:param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero.
:param id_environment: Identifier of the Environment . Integer value and greater than zero.
:return: Dictionary with the following structure:
{‘id’: < id >,
option: {
'id': <id>
'type':<type>
'name':<name> }
environment: {
'id':<id>
.... all environment info }
}
:raise InvalidParameterError: Option Pool/Environment Pool identifier is null and/or invalid.
:raise optionpoolNotFoundError: Option Pool not registered.
:raise EnvironmentVipNotFoundError: Environment Pool not registered.
:raise optionpoolError: Option Pool is already associated with the environment pool.
:raise UserNotAuthorizedError: User does not have authorization to make this association.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_pool):
raise InvalidParameterError(
u'The identifier of Option Pool is invalid or was not informed.')
if not is_valid_int_param(id_environment):
raise InvalidParameterError(
u'The identifier of Environment Pool is invalid or was not informed.')
url= 'api/pools/environment_options/save/'
return self.post(url, {'option_id': id_option_pool,"environment_id":id_environment })
def get_environment_option_pool(self, environment_option_id ):
"""Get Environment Option Pool by id .
:return: Dictionary with the following structure:
::
{‘id’: < id >,
option: {
'id': <id>
'type':<type>
'name':<name> }
environment: {
'id':<id>
.... all environment info }
}
:raise EnvironmentVipNotFoundError: Environment Pool not registered.
:raise DataBaseError: Can't connect to networkapi database.
:raise XMLError: Failed to generate the XML response.
"""
url = 'api/pools/environment_options/' + str(environment_option_id) + '/'
return self.get(url)
def disassociate_environment_option_pool(self, environment_option_id):
"""Remove a relationship of optionpool with Environment.
:param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero.
:param id_environment: Identifier of the Environment Pool. Integer value and greater than zero.
:return: { 'id': < environment_option_id> }
:raise InvalidParameterError: Option Pool/Environment Pool identifier is null and/or invalid.
:raise optionpoolNotFoundError: Option Pool not registered.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise optionpoolError: Option pool is not associated with the environment pool
:raise UserNotAuthorizedError: User does not have authorization to make this association.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(environment_option_id):
raise InvalidParameterError(
u'The identifier of Option Pool is invalid or was not informed.')
if not is_valid_int_param(environment_option_id):
raise InvalidParameterError(
u'The identifier of Environment Pool is invalid or was not informed.')
url = 'api/pools/environment_options/' + str(environment_option_id) + '/'
return self.delete(url)
def modify_environment_option_pool(self, environment_option_id, id_option_pool,id_environment ):
"""Remove a relationship of optionpool with Environment.
:param id_option_pool: Identifier of the Option Pool. Integer value and greater than zero.
:param id_environment: Identifier of the Environment Pool. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{‘id’: < id >,
option: {
'id': <id>
'type':<type>
'name':<name> }
environment: {
'id':<id>
.... all environment info }
}
:raise InvalidParameterError: Option Pool/Environment Pool identifier is null and/or invalid.
:raise optionpoolNotFoundError: Option Pool not registered.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise optionpoolError: Option pool is not associated with the environment pool
:raise UserNotAuthorizedError: User does not have authorization to make this association.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(environment_option_id):
raise InvalidParameterError(
u'The identifier of Environment Option Pool is invalid or was not informed.')
#optionpool_map = dict()
#optionpool_map['option'] = option_id
#optionpool_map['environment'] = environment_id
url = 'api/pools/environment_options/' + str(environment_option_id) + '/'
return self.put(url, {'option_id': id_option_pool,"environment_id":id_environment })
| 39.538682
| 137
| 0.633452
| 1,566
| 13,799
| 5.439336
| 0.121967
| 0.075135
| 0.029584
| 0.022306
| 0.810754
| 0.783517
| 0.756163
| 0.713783
| 0.696995
| 0.689951
| 0
| 0.00132
| 0.286325
| 13,799
| 349
| 138
| 39.538682
| 0.863627
| 0.560258
| 0
| 0.494253
| 0
| 0
| 0.2059
| 0.041099
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126437
| false
| 0.022989
| 0.034483
| 0
| 0.287356
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4a107ca1f19ec60ef21d3c3f2c805d46aff92d02
| 36
|
py
|
Python
|
__init__.py
|
sickless/flask_private_area
|
09b2f9382c0426f5ed63488f9fd8ca6d4b3f751c
|
[
"BSD-3-Clause"
] | null | null | null |
__init__.py
|
sickless/flask_private_area
|
09b2f9382c0426f5ed63488f9fd8ca6d4b3f751c
|
[
"BSD-3-Clause"
] | null | null | null |
__init__.py
|
sickless/flask_private_area
|
09b2f9382c0426f5ed63488f9fd8ca6d4b3f751c
|
[
"BSD-3-Clause"
] | null | null | null |
from .flask_private_area import app
| 18
| 35
| 0.861111
| 6
| 36
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4a16f30b585aa05308a78fa566522aba1b373df7
| 92
|
py
|
Python
|
enthought/scripting/util.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/scripting/util.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/scripting/util.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from apptools.scripting.util import *
| 23
| 38
| 0.836957
| 12
| 92
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119565
| 92
| 3
| 39
| 30.666667
| 0.888889
| 0.130435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4a2086e58a509af6536a764a7ee169742c362e19
| 131
|
py
|
Python
|
recipes-support/pot-watcher/files/play-sound.py
|
masselstine/meta-alexa
|
bbb44fa29a73d1cc9670b24a031acfdcf100e8d1
|
[
"MIT"
] | null | null | null |
recipes-support/pot-watcher/files/play-sound.py
|
masselstine/meta-alexa
|
bbb44fa29a73d1cc9670b24a031acfdcf100e8d1
|
[
"MIT"
] | null | null | null |
recipes-support/pot-watcher/files/play-sound.py
|
masselstine/meta-alexa
|
bbb44fa29a73d1cc9670b24a031acfdcf100e8d1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import pexpect
pexpect.run("ssh 192.168.42.1 'aplay /root/beedoo.wav'", events={'(?i)password':'incendia\n'})
| 21.833333
| 94
| 0.687023
| 21
| 131
| 4.285714
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082645
| 0.076336
| 131
| 5
| 95
| 26.2
| 0.661157
| 0.129771
| 0
| 0
| 0
| 0
| 0.557522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
4a229cfb509c4568da640a057c48ca45e39ebc37
| 31
|
py
|
Python
|
torch/ao/nn/__init__.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 60,067
|
2017-01-18T17:21:31.000Z
|
2022-03-31T21:37:45.000Z
|
torch/ao/nn/__init__.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 66,955
|
2017-01-18T17:21:38.000Z
|
2022-03-31T23:56:11.000Z
|
torch/ao/nn/__init__.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 19,210
|
2017-01-18T17:45:04.000Z
|
2022-03-31T23:51:56.000Z
|
from torch.ao.nn import sparse
| 15.5
| 30
| 0.806452
| 6
| 31
| 4.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c5c585fa95ef4fe083cdfe5840b71c51953033bc
| 74
|
py
|
Python
|
crpm/test_noop.py
|
dmontemayor/CRPM
|
e896831fad7bed42d17574b137e600fc5adbf6b0
|
[
"MIT"
] | null | null | null |
crpm/test_noop.py
|
dmontemayor/CRPM
|
e896831fad7bed42d17574b137e600fc5adbf6b0
|
[
"MIT"
] | null | null | null |
crpm/test_noop.py
|
dmontemayor/CRPM
|
e896831fad7bed42d17574b137e600fc5adbf6b0
|
[
"MIT"
] | null | null | null |
"""NOOP test
"""
def test_noop():
"""This test does nothing.
"""
| 10.571429
| 30
| 0.527027
| 9
| 74
| 4.222222
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.256757
| 74
| 6
| 31
| 12.333333
| 0.690909
| 0.513514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
c5cd8db6c625bf4114c898aef58b0aeceb6760cd
| 55,957
|
py
|
Python
|
DataAnalysis/Data_analysis_script.py
|
RafaBO/Cell-tracing
|
598cc796d3b25f1d66ab3431274d0f11c310a370
|
[
"MIT",
"BSD-3-Clause"
] | 176
|
2018-09-24T10:04:14.000Z
|
2022-03-30T18:38:09.000Z
|
DataAnalysis/Data_analysis_script.py
|
tb901029/Usiigaci
|
263f599e40f31e81c07d78bb756e689b67cc086f
|
[
"MIT",
"BSD-3-Clause"
] | 24
|
2018-11-08T14:12:56.000Z
|
2021-12-10T23:26:26.000Z
|
DataAnalysis/Data_analysis_script.py
|
tb901029/Usiigaci
|
263f599e40f31e81c07d78bb756e689b67cc086f
|
[
"MIT",
"BSD-3-Clause"
] | 70
|
2018-09-07T03:53:06.000Z
|
2022-03-29T12:59:48.000Z
|
'''
Single cell tracking data processing script
Hsieh-Fu Tsai (hsiehfutsai@gmail.com), Tyler Sloan(info@quorumetrix.com), Amy Shen(amy.shen@oist.jp)
purpose:
this notebook aims to be a general tool for analysis of single cell migration data with use of opensource tools.
Input data:
the script can process cell tracking data from ImageJ, Lineage Mapper, Metamorph, or Usiigaci tracker.
If you use this code, please cite the following paper:
Hsieh-Fu Tsai, Joanna Gajda, Tyler Sloan, Andrei Rares, Amy Shen, Usiigaci: Label-free instance-aware cell tracking in phase contrast microscopy using Mask R-CNN.
Version:
v1.0 2018.08.19
License:
This script is released under MIT license
Copyright <2018> <Okinawa Institute of Science and Technology Graduate University>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
#import libraries
import numpy as np
import pandas as pd
import scipy
from IPython.core.display import display
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from matplotlib.collections import LineCollection
from matplotlib import colors as mcolors
from matplotlib.colors import ListedColormap, BoundaryNorm
import seaborn as sns
import os
from itertools import groupby
from operator import itemgetter
import imageio
from read_roi import read_roi_file
from read_roi import read_roi_zip
#Definition
#define the frames throughout the experiments
n_frames = 61
# define the time interval between each frame
t_inc = 10 # in minutes
print("Total frame of time lapse is %d" %(n_frames))
print("Time interval is %d minutes"%(t_inc))
#define the data location
location = r'C:\Users\Davince\Dropbox (OIST)\Deeplearning_system\tracking project\Testautomaticfinding'
#define the location type = 'folder, 'csv
location_type ='folder'
#define the data_type = 'ImageJ', 'Usiigaci', LineageMapper', or 'Metamorph'
data_type = 'Usiigaci'
#input data loading
if data_type=='ImageJ':
if location_type == 'csv':
df_ij = pd.read_csv(location)
n_cells_ij = int(len(df_ij) / n_frames)
timestamps = np.linspace(0, n_frames*t_inc, n_frames+1)
print("Cell track numbers is %d"%(n_cells_ij))
elif data_type=='LineageMapper':
if location_type=='csv':
df_LM = pd.read_csv(location)
count = df_LM['Cell ID'].value_counts()
cell_ids_LM = count[count==n_frames].index.tolist()
n_cells_LM = int(len(cell_ids_LM))
timestamps = np.linspace(0, n_frames*t_inc, n_frames+1)
print("Cell track number is: " + str(n_cells_LM))
col_names = df_LM.columns.tolist()
selected_df = pd.DataFrame(columns=col_names)
for i in cell_ids_LM:
selected_df = selected_df.append(df_LM.loc[df_LM['Cell ID']==i].copy())
selected_df.reset_index(drop=True, inplace=True)
elif data_type=='Metamorph':
if location_type=='csv':
df_meta = pd.read_csv(location)
count = df_meta['Object #'].value_counts()
cell_ids_meta = count[count==n_frames].index.tolist()
n_cells_meta = int(len(cell_ids_meta))
timestamps = np.linspace(0, n_frames*t_inc, n_frames+1)
print("Cell track number is:" + str(n_cells_meta))
col_names = df_meta.columns.tolist()
selected_df = pd.DataFrame(columns=col_names)
for i in cell_ids_meta:
selected_df = selected_df.append(df_meta.loc[df_meta['Object #']==i].copy())
selected_df.reset_index(drop=True, inplace=True)
elif data_type=='Usiigaci':
if location_type=='csv':
df_usiigaci = pd.read_csv(location)
count = df_usiigaci['particle'].value_counts()
cell_ids_usiigaci = count[count==n_frames].index.tolist() # finding only cells that exist through all the framee
n_cells_usiigaci = int(len(cell_ids_usiigaci))
timestamps = np.linspace(0, n_frames*t_inc, n_frames+1)
print("Cell track number is:" + str(n_cells_usiigaci))
col_names = df_usiigaci.columns.tolist()
selected_df = pd.DataFrame(columns=col_names)
for i in cell_ids_usiigaci:
selected_df = selected_df.append(df_usiigaci.loc[df_usiigaci['particle']==i].copy())
selected_df.reset_index(drop=True, inplace=True)
if location_type == 'folder':
#looks for tracks.csv in nested folders
all_files = []
sub_directory = []
for root, dirs, files in os.walk(location):
for file in files:
if file.endswith("tracks.csv"):
relativePath = os.path.relpath(root, location)
if relativePath == ".":
relativePath = ""
all_files.append((relativePath.count(os.path.sep),relativePath, file))
all_files.sort(reverse=True)
for (count, folder), files in groupby(all_files, itemgetter(0, 1)):
sub_directory.append(folder)
print("Found the following directories containing Usiigaci tracked results:")
print("\n".join(str(x) for x in sub_directory))
print("Making new ids and concatenate dataframe")
frame_list = []
for i in range(0, len(sub_directory)):
path = os.path.join(location, str(sub_directory[i]+"\\tracks.csv"))
replicate_id = sub_directory[i].split('_')[0]
df_usiigaci = pd.read_csv(path)
#number of index is
cell_number = df_usiigaci.index.size
new_id_list = []
for i in range(0, df_usiigaci.index.size):
new_id = replicate_id + "_" + str(df_usiigaci.iloc[i, 0])
new_id_list.append(new_id)
df_usiigaci['newid'] = new_id_list
frame_list.append(df_usiigaci)
#display(df)
#create new pandas dataframe with all the csv data.
df_combined = pd.concat(frame_list, ignore_index=True)
df_combined.to_csv(os.path.join(location + "\\combined.csv"))
count = df_combined['newid'].value_counts()
cell_ids_usiigaci = count[count==n_frames].index.tolist() # finding only cells that exist through all the framee
n_cells_usiigaci = int(len(cell_ids_usiigaci))
timestamps = np.linspace(0, n_frames*t_inc, n_frames+1)
print("Cell track number is:" + str(n_cells_usiigaci))
col_names = df_usiigaci.columns.tolist()
selected_df = pd.DataFrame(columns=col_names)
for i in cell_ids_usiigaci:
selected_df = selected_df.append(df_combined.loc[df_combined['newid']==i].copy())
selected_df.reset_index(drop=True, inplace=True)
selected_df.to_csv(os.path.join(location+"\\selected.csv"))
#display(selected_df)
else:
print("Data loading error")
#start processing data:
if data_type=='ImageJ':
print("processing ImageJ data")
# Process the data into a numpy time-array
props_t_array = []
props_t_array = np.empty([n_cells_ij, 14, n_frames]) # Creates a time array, formatted like a spreadsheet, cells in rows, columns for X and Y, and t in Z
#print(np.shape(props_t_array))
cell_dfs = []
ind_i = 0
i_cell = 0
for i in range(1,len(df_ij)): # Using 1 instead of zero here avoids indexing -1, but won't skip first row being copied because ind_i initialized as zero above.
if(df_ij.loc[i-1,'Slice'] > df_ij.loc[i,'Slice']):
ind_f = i - 1
sub_df = df_ij.loc[ind_i:ind_f,:]
ind_i = i
# Copy the measurements of interest into the numpy array
props_t_array[i_cell,0,:] = sub_df['X'] # This will be a problem if the number of frames ever differs between cells.
props_t_array[i_cell,1,:] = sub_df['Y']
props_t_array[i_cell,2,:] = sub_df['Area']
props_t_array[i_cell,3,:] = sub_df['Perim.']
props_t_array[i_cell,4,:] = sub_df['Angle']
props_t_array[i_cell,5,:] = sub_df['Circ.']
cell_dfs.append(sub_df) # add also to a list of dataframes
i_cell = i_cell + 1
if(i == len(df_ij) - 1): # A special case for the last cell in the results file.
ind_f = i
sub_df =df_ij.loc[ind_i:ind_f,:]
# Copy the measurements of interest into the numpy array
props_t_array[i_cell,0,:] = sub_df['X'] # This will be a problem if the number of frames ever differs between cells.
props_t_array[i_cell,1,:] = sub_df['Y']
props_t_array[i_cell,2,:] = sub_df['Area']
props_t_array[i_cell,3,:] = sub_df['Perim.']
props_t_array[i_cell,4,:] = sub_df['Angle']
props_t_array[i_cell,5,:] = sub_df['Circ.']
# Correct the position coordinates so that all cells start at the same location in the plot.
zerod_t_array = np.empty([n_cells_ij, 2, n_frames]) # Creates a time array, formatted like a spreadsheet, cells in rows, columns for X and Y, and t in Z
for i in range(0,n_cells_ij):
for j in range(0,n_frames):
zerod_t_array[i,0,j] = props_t_array[i,0,j] - props_t_array[i,0,0]
zerod_t_array[i,1,j] = props_t_array[i,1,j] - props_t_array[i,1,0]
#print(props_t_array.to_string())
n_cells = n_cells_ij
elif data_type=='Usiigaci':
print("processing Usiigaci data")
# Process the data into a numpy time-array
props_t_array = []
props_t_array = np.empty([n_cells_usiigaci, 14, n_frames]) # Creates a time array, formatted like a spreadsheet, cells in rows, columns for X and Y, and t in Z
n_rows_csv=len(selected_df)
print('Number of cells: '+ str(n_cells_usiigaci))
print('Number of rows: '+str(n_rows_csv))
if(int(n_rows_csv / n_cells_usiigaci) != n_frames): # We can use this to parse the file
print('Error: improper number of rows in tracked file for the number of cells and timepoints.')
cell_dfs = []
ind_i = 0
for i_cell in range(0,n_cells_usiigaci):
ind_f = ind_i + n_frames - 1
sub_df = selected_df.loc[ind_i:ind_f,:]
props_t_array[i_cell,0,:] = sub_df['x']
props_t_array[i_cell,1,:] = sub_df['y']
props_t_array[i_cell,2,:] = sub_df['area']
props_t_array[i_cell,3,:] = sub_df['perimeter']
props_t_array[i_cell,4,:] = sub_df['angle']
#props_t_array[i_cell,5,:] = sub_df['solidity']
# Display the current dataframe and portion of the numpy array.
#display(sub_df)
#print(props_t_array[i_cell,0:2,:])
ind_i = ind_i + n_frames
n_cells = n_cells_usiigaci
elif data_type=='LineageMapper':
print("processing lineage mapper data")
props_t_array = []
props_t_array = np.empty([n_cells_LM, 14, n_frames])
#print(np.shape(props_t_array))
n_rows_csv = len(selected_df)
print('Number of cells: ' + str(n_cells_LM))
print('Number of rows: ' + str(n_rows_csv))
if(int(n_rows_csv / n_cells_LM) != n_frames): # We can use this to parse the file
print('Error: improper number of rows in trk file for the number of cells and timepoints.')
cell_dfs = []
ind_i = 0
for i_cell in range(0,n_cells_LM):
ind_f = ind_i + n_frames - 1
sub_df = selected_df.loc[ind_i:ind_f,:]
props_t_array[i_cell,0,:] = sub_df['X Coordinate']
props_t_array[i_cell,1,:] = sub_df['Y Coordinate']
# Display the current dataframe and portion of the numpy array.
#display(sub_df)
#print(props_t_array[i_cell,0:2,:])
ind_i = ind_i + n_frames
n_cells = n_cells_LM
elif data_type=='Metamorph':
print("processing metamorph data")
props_t_array = []
props_t_array = np.empty([n_cells_meta, 14, n_frames])
#print(np.shape(props_t_array))
n_rows_csv = len(selected_df)
print('Number of cells: ' + str(n_cells_meta))
print('Number of rows: ' + str(n_rows_csv))
if(int(n_rows_csv / n_cells_meta) != n_frames): # We can use this to parse the file
print('Error: improper number of rows in the file for the number of cells and timepoints.')
cell_dfs = []
ind_i = 0
for i_cell in range(0,n_cells_meta):
ind_f = ind_i + n_frames - 1
sub_df = selected_df.loc[ind_i:ind_f,:]
props_t_array[i_cell,0,:] = sub_df['X']
props_t_array[i_cell,1,:] = sub_df['Y']
# Display the current dataframe and portion of the numpy array.
#print(props_t_array[i_cell,0:2,:])
ind_i = ind_i + n_frames
n_cells = n_cells_meta
else:
print("no data found")
#Calculation for cell migration parameters
if data_type=='ImageJ':
for i in range(0,n_cells):
for j in range(0, n_frames):
#Segment length
if(j > 0):
segment = np.sqrt(pow((props_t_array[i,0,j]-props_t_array[i,0,j-1]),2) + pow((props_t_array[i,1,j]-props_t_array[i,1,j-1]),2))
else:
segment = 0
props_t_array[i,6,j] = segment
# Cumulative path length
if(j > 0):
cumulative = cumulative + segment
else:
cumulative = 0
props_t_array[i,7,j] = cumulative
# Orientation # CURRENTLY: If data_imageJ is false, then this is dealing with NaNs from the empty column of the array.
axis_angle = props_t_array[i,4,j] # Angle of the long axis of the cell: Angle (IJ)??
orientation = np.cos(2 * np.radians(axis_angle))
props_t_array[i,8,j] = orientation
# Euclidean distance (From start to current frame)
if(j > 0):
euc_dist = np.sqrt(pow((props_t_array[i,0,j]-props_t_array[i,0,0]),2) + pow((props_t_array[i,1,j]-props_t_array[i,1,0]),2))
else:
euc_dist = 0
props_t_array[i,9,j] = euc_dist
# Migration speed
if(j > 0):
speed = euc_dist / (j*t_inc / 60) # Microns per hour, since t_inc is in minutes
else:
speed = 0 # Or should it be NaN??
props_t_array[i,10,j] = speed
# Directedness (Using the calculation from Paul's spreadsheet, where directedness = deltax / radius (euc_distance))
if(j > 0): # Doesn't make sense to calculate this on the first frame.
directedness = (props_t_array[i,0,j]-props_t_array[i,0,0]) / euc_dist
else:
directedness = 0
props_t_array[i,11,j] = directedness
# Turn angle
if(j > 0): # Doesn't make sense to calculate this on the first frame.
turn_angle_radians = np.arctan((props_t_array[i,1,j] - props_t_array[i,1,j-1]) / (props_t_array[i,0,j] - props_t_array[i,0,j-1]))
turn_angle = np.degrees(turn_angle_radians)
else:
turn_angle = 0
props_t_array[i,12,j] = turn_angle
# Endpoint directionality ratio (confinement ratio, meandering index)
if(j > 0):
ep_dr = cumulative / euc_dist # This is problematic because segment uses i+1 - i, whereas euc_dist uses i - 0.
else:
ep_dr = 0
#endpoint direcionality ratio is defined arbitrarily 0 at first frame
# Direction autocorrelation
if(j > 0):
dir_auto = np.cos(props_t_array[i,12,j] - props_t_array[i,12,j-1])
else:
dir_auto = 0
props_t_array[i,13,j] = dir_auto
elif data_type=='Usiigaci':
for i in range(0,n_cells):
for j in range(0, n_frames):
#Segment length
if(j > 0):
segment = np.sqrt(pow((props_t_array[i,0,j]-props_t_array[i,0,j-1]),2) + pow((props_t_array[i,1,j]-props_t_array[i,1,j-1]),2))
else:
segment = 0
props_t_array[i,6,j] = segment
# Cumulative path length
if(j > 0):
cumulative = cumulative + segment
else:
cumulative = 0
props_t_array[i,7,j] = cumulative
# Orientation # CURRENTLY: If data_imageJ is false, then this is dealing with NaNs from the empty column of the array.
orientation = np.cos(2*props_t_array[i,4,j]) # Angle of the long axis of the cell: Angle (IJ)??
props_t_array[i,8,j] = orientation
# Euclidean distance (From start to current frame)
if(j > 0):
euc_dist = np.sqrt(pow((props_t_array[i,0,j]-props_t_array[i,0,0]),2) + pow((props_t_array[i,1,j]-props_t_array[i,1,0]),2))
else:
euc_dist = 0
props_t_array[i,9,j] = euc_dist
# Migration speed
if(j > 0):
speed = euc_dist / (j*t_inc / 60) # Microns per hour, since t_inc is in minutes
else:
speed = 0 # Or should it be NaN??
props_t_array[i,10,j] = speed
# Directedness (Using the calculation from Paul's spreadsheet, where directedness = deltax / radius (euc_distance))
if(j > 0): # Doesn't make sense to calculate this on the first frame.
directedness = (props_t_array[i,0,j]-props_t_array[i,0,0]) / euc_dist
else:
directedness = 0
props_t_array[i,11,j] = directedness
# Turn angle
if(j > 0): # Doesn't make sense to calculate this on the first frame.
turn_angle_radians = np.arctan((props_t_array[i,1,j] - props_t_array[i,1,j-1]) / (props_t_array[i,0,j] - props_t_array[i,0,j-1]))
turn_angle = np.degrees(turn_angle_radians)
else:
turn_angle = 0
props_t_array[i,12,j] = turn_angle
# Endpoint directionality ratio (confinement ratio, meandering index)
if(j > 0):
ep_dr = cumulative / euc_dist # This is problematic because segment uses i+1 - i, whereas euc_dist uses i - 0.
else:
ep_dr = 0
#endpoint direcionality ratio is defined arbitrarily 0 at first frame
# Direction autocorrelation
if(j > 0):
dir_auto = np.cos(props_t_array[i,12,j] - props_t_array[i,12,j-1])
else:
dir_auto = 0
props_t_array[i,13,j] = dir_auto
else:
for i in range(0,n_cells):
for j in range(0, n_frames):
#Segment length
if(j > 0):
segment = np.sqrt(pow((props_t_array[i,0,j]-props_t_array[i,0,j-1]),2) + pow((props_t_array[i,1,j]-props_t_array[i,1,j-1]),2))
else:
segment = 0
props_t_array[i,6,j] = segment
# Cumulative path length
if(j > 0):
cumulative = cumulative + segment
else:
cumulative = 0
props_t_array[i,7,j] = cumulative
# Euclidean distance (From start to current frame)
if(j > 0):
euc_dist = np.sqrt(pow((props_t_array[i,0,j]-props_t_array[i,0,0]),2) + pow((props_t_array[i,1,j]-props_t_array[i,1,0]),2))
else:
euc_dist = 0
props_t_array[i,9,j] = euc_dist
# Migration speed
if(j > 0):
speed = euc_dist / (j*t_inc / 60) # Microns per hour, since t_inc is in minutes
else:
speed = 0 # Or should it be NaN??
props_t_array[i,10,j] = speed
# Directedness (Using the calculation from Paul's spreadsheet, where directedness = deltax / radius (euc_distance))
if(j > 0): # Doesn't make sense to calculate this on the first frame.
directedness = (props_t_array[i,0,j]-props_t_array[i,0,0]) / euc_dist
else:
directedness = 0
props_t_array[i,11,j] = directedness
# Turn angle
if(j > 0): # Doesn't make sense to calculate this on the first frame.
turn_angle_radians = np.arctan((props_t_array[i,1,j] - props_t_array[i,1,j-1]) / (props_t_array[i,0,j] - props_t_array[i,0,j-1]))
turn_angle = np.degrees(turn_angle_radians)
else:
turn_angle = 0
props_t_array[i,12,j] = turn_angle
# Endpoint directionality ratio (confinement ratio, meandering index)
if(j > 0):
ep_dr = cumulative / euc_dist # This is problematic because segment uses i+1 - i, whereas euc_dist uses i - 0.
else:
ep_dr = 0
#endpoint direcionality ratio is defined arbitrarily 0 at first frame
# Direction autocorrelation
if(j > 0):
dir_auto = np.cos(props_t_array[i,12,j] - props_t_array[i,12,j-1])
else:
dir_auto = 0
props_t_array[i,13,j] = dir_auto
# Correct the position coordinates so that all cells start at the same location in the plot.
zerod_t_array = np.empty([n_cells, 2, n_frames]) # Creates a time array, formatted like a spreadsheet, cells in rows, columns for X and Y, and t in Z
for i in range(0,n_cells):
for j in range(0,n_frames):
zerod_t_array[i,0,j] = props_t_array[i,0,j] - props_t_array[i,0,0]
zerod_t_array[i,1,j] = props_t_array[i,1,j] - props_t_array[i,1,0]
#export the descriptive statistics to a csv file
stats_df = pd.DataFrame(columns=['cell_id','time', 'x_pos_microns', 'y_pos_microns', 'x_pos_corr', 'y_pos_corr',
'area', 'perimeter', 'angle', 'circularity', 'segment_length', 'cumulative_path_length',
'orientation', 'euclidean_distance', 'speed', 'directedness', 'turn_angle', 'direction_autocorrelation', 'solidity']) #deleted velocity
stats_df.round(4)
summary_cell_df = pd.DataFrame(columns=['cell_id', 'avg_area', 'avg_perimeter', 'avg_angle', 'avg_circularity', 'avg_segment_length', 'total_path_length',
'avg_orientation', 'euclidean_distance', 'avg_speed', 'avg_velocity', 'avg_directedness', 'avg_turn_angle', 'avg_direction_autocorrelation', 'avg_solidity'])
summary_cell_df.round(2)
summary_timepoint_df = pd.DataFrame(columns=['time', 'avg_area', 'avg_perimeter', 'avg_angle', 'avg_circularity', 'avg_segment_length', 'total_path_length',
'avg_orientation', 'euclidean_distance', 'avg_speed', 'avg_velocity','avg_directedness', 'avg_turn_angle', 'avg_direction_autocorrelation', 'avg_solidity', 'std_orientation', 'sem_orientation','sem_speed','sem_directedness'])
summary_timepoint_df.round(2)
t = np.linspace(0,(n_frames-1)*t_inc,n_frames)
i_row = 0
if data_type=='ImageJ':
for i in range(0,len(props_t_array[:,0,0])):
for j in range(0,len(props_t_array[0,0,:])):
stats_df.loc[i_row] = i_row
stats_df['cell_id'][i_row] = i + 1
stats_df['time'][i_row] = t[j]
stats_df['x_pos_microns'][i_row] = props_t_array[i,0,j]
stats_df['y_pos_microns'][i_row] = props_t_array[i,1,j]
stats_df['x_pos_corr'][i_row] = zerod_t_array[i,0,j]
stats_df['y_pos_corr'][i_row] = zerod_t_array[i,1,j]
stats_df['area'][i_row] = props_t_array[i,2,j]
stats_df['perimeter'][i_row] = props_t_array[i,3,j]
stats_df['angle'][i_row] = props_t_array[i,4,j]
stats_df['circularity'][i_row] = props_t_array[i,5,j]
stats_df['segment_length'][i_row] = props_t_array[i,6,j]
stats_df['cumulative_path_length'][i_row] = props_t_array[i,7,j]
stats_df['orientation'][i_row] = props_t_array[i,8,j]
stats_df['euclidean_distance'][i_row] = props_t_array[i,9,j]
stats_df['speed'][i_row] = props_t_array[i,10,j]
stats_df['directedness'][i_row] = props_t_array[i,11,j]
stats_df['turn_angle'][i_row] = props_t_array[i,12,j]
stats_df['direction_autocorrelation'][i_row] = props_t_array[i,13,j]
i_row = i_row + 1
elif data_type=='Usiigaci':
for i in range(0,len(props_t_array[:,0,0])):
for j in range(0,len(props_t_array[0,0,:])):
stats_df.loc[i_row] = i_row
stats_df['cell_id'][i_row] = i + 1
stats_df['time'][i_row] = t[j]
stats_df['x_pos_microns'][i_row] = props_t_array[i,0,j]
stats_df['y_pos_microns'][i_row] = props_t_array[i,1,j]
stats_df['x_pos_corr'][i_row] = zerod_t_array[i,0,j]
stats_df['y_pos_corr'][i_row] = zerod_t_array[i,1,j]
stats_df['area'][i_row] = props_t_array[i,2,j]
stats_df['perimeter'][i_row] = props_t_array[i,3,j]
stats_df['angle'][i_row] = props_t_array[i,4,j]
stats_df['solidity'][i_row] = props_t_array[i,5,j]
stats_df['segment_length'][i_row] = props_t_array[i,6,j]
stats_df['cumulative_path_length'][i_row] = props_t_array[i,7,j]
stats_df['orientation'][i_row] = props_t_array[i,8,j]
stats_df['euclidean_distance'][i_row] = props_t_array[i,9,j]
stats_df['speed'][i_row] = props_t_array[i,10,j]
stats_df['directedness'][i_row] = props_t_array[i,11,j]
stats_df['turn_angle'][i_row] = props_t_array[i,12,j]
stats_df['direction_autocorrelation'][i_row] = props_t_array[i,13,j]
i_row = i_row + 1
else:
for i in range(0,len(props_t_array[:,0,0])):
for j in range(0,len(props_t_array[0,0,:])):
stats_df.loc[i_row] = i_row
stats_df['cell_id'][i_row] = i + 1
stats_df['time'][i_row] = t[j]
stats_df['x_pos_microns'][i_row] = props_t_array[i,0,j]
stats_df['y_pos_microns'][i_row] = props_t_array[i,1,j]
stats_df['x_pos_corr'][i_row] = zerod_t_array[i,0,j]
stats_df['y_pos_corr'][i_row] = zerod_t_array[i,1,j]
stats_df['segment_length'][i_row] = props_t_array[i,6,j]
stats_df['cumulative_path_length'][i_row] = props_t_array[i,7,j]
stats_df['euclidean_distance'][i_row] = props_t_array[i,9,j]
stats_df['speed'][i_row] = props_t_array[i,10,j]
stats_df['directedness'][i_row] = props_t_array[i,11,j]
stats_df['turn_angle'][i_row] = props_t_array[i,12,j]
stats_df['direction_autocorrelation'][i_row] = props_t_array[i,13,j]
i_row = i_row + 1
#create avg statistics of individual cells
if data_type=='ImageJ':
for i in range(0,len(props_t_array[:,0,0])):
summary_cell_df.loc[i] = i
summary_cell_df['cell_id'][i] = i + 1
summary_cell_df['avg_area'][i] = np.mean(props_t_array[i,2,:])
summary_cell_df['avg_perimeter'][i] = np.mean(props_t_array[i,3,:])
summary_cell_df['avg_angle'][i] = np.mean(props_t_array[i,4,:])
summary_cell_df['avg_circularity'][i] = np.mean(props_t_array[i,5,:])
summary_cell_df['avg_segment_length'][i] = np.mean(props_t_array[i,6,1:]) # substract time point 0
summary_cell_df['total_path_length'][i] = props_t_array[i,7,-1] # Total path length is cumulative path length at final timepoint
summary_cell_df['avg_orientation'][i] = np.mean(props_t_array[i,8,:])
summary_cell_df['euclidean_distance'][i] = props_t_array[i,9,-1] # Linear distance between first and last point
summary_cell_df['avg_speed'][i] = np.mean(props_t_array[i,10,1:]) #subtract time point 0
summary_cell_df['avg_velocity'][i] = props_t_array[i,9,-1] / ((t[-1] - t[0]) / 60) # Total Euclidean distance per hour.
summary_cell_df['avg_directedness'][i] = np.nanmean(props_t_array[i,11,1:])#subtract time point 0
summary_cell_df['avg_turn_angle'][i] = np.nanmean(props_t_array[i,12,1:])#subtract time point 0
summary_cell_df['avg_direction_autocorrelation'][i] = np.nanmean(props_t_array[i,13,1:])#subtract time point 0
elif data_type=='Usiigaci':
for i in range(0,len(props_t_array[:,0,0])):
summary_cell_df.loc[i] = i
summary_cell_df['cell_id'][i] = i + 1
summary_cell_df['avg_area'][i] = np.mean(props_t_array[i,2,:])
summary_cell_df['avg_perimeter'][i] = np.mean(props_t_array[i,3,:])
summary_cell_df['avg_angle'][i] = np.mean(props_t_array[i,4,:])
summary_cell_df['avg_solidity'][i] = np.mean(props_t_array[i,5,:])
summary_cell_df['avg_segment_length'][i] = np.mean(props_t_array[i,6,1:]) # substract time point 0
summary_cell_df['total_path_length'][i] = props_t_array[i,7,-1] # Total path length is cumulative path length at final timepoint
summary_cell_df['avg_orientation'][i] = np.mean(props_t_array[i,8,:])
summary_cell_df['euclidean_distance'][i] = props_t_array[i,9,-1] # Linear distance between first and last point
summary_cell_df['avg_speed'][i] = np.mean(props_t_array[i,10,1:]) #subtract time point 0
summary_cell_df['avg_velocity'][i] = props_t_array[i,9,-1] / ((t[-1] - t[0]) / 60) # Total Euclidean distance per hour.
summary_cell_df['avg_directedness'][i] = np.nanmean(props_t_array[i,11,1:])#subtract time point 0
summary_cell_df['avg_turn_angle'][i] = np.nanmean(props_t_array[i,12,1:])#subtract time point 0
summary_cell_df['avg_direction_autocorrelation'][i] = np.nanmean(props_t_array[i,13,1:])#subtract time point 0
else:
for i in range(0,len(props_t_array[:,0,0])):
summary_cell_df.loc[i] = i
summary_cell_df['cell_id'][i] = i + 1
summary_cell_df['avg_segment_length'][i] = np.mean(props_t_array[i,6,1:]) # substract time point 0
summary_cell_df['total_path_length'][i] = props_t_array[i,7,-1] # Total path length is cumulative path length at final timepoint
summary_cell_df['euclidean_distance'][i] = props_t_array[i,9,-1] # Linear distance between first and last point
summary_cell_df['avg_speed'][i] = np.mean(props_t_array[i,10,1:]) #subtract time point 0
summary_cell_df['avg_velocity'][i] = props_t_array[i,9,-1] / ((t[-1] - t[0]) / 60) # Total Euclidean distance per hour.
summary_cell_df['avg_directedness'][i] = np.nanmean(props_t_array[i,11,1:])#subtract time point 0
summary_cell_df['avg_turn_angle'][i] = np.nanmean(props_t_array[i,12,1:])#subtract time point 0
summary_cell_df['avg_direction_autocorrelation'][i] = np.nanmean(props_t_array[i,13,1:])#subtract time point 0
#individual time point statistics
if data_type=='ImageJ':
for i in range(0,len(props_t_array[0,0,:])):
summary_timepoint_df.loc[i] = i
summary_timepoint_df['time'][i] = i*t_inc
summary_timepoint_df['avg_area'][i] = np.mean(props_t_array[:,2,i])
summary_timepoint_df['avg_perimeter'][i] = np.mean(props_t_array[:,3,i])
summary_timepoint_df['avg_angle'][i] = np.mean(props_t_array[:,4,i])
summary_timepoint_df['avg_circularity'][i] = np.mean(props_t_array[:,5,i])
summary_timepoint_df['avg_segment_length'][i] = np.mean(props_t_array[:,6,i])
summary_timepoint_df['total_path_length'][i] = np.mean(props_t_array[:,7,i]) # Total path length is cumulative path length at final timepoint
summary_timepoint_df['avg_orientation'][i] = np.mean(props_t_array[:,8,i])
summary_timepoint_df['std_orientation'][i] = np.std(props_t_array[:,8,i])
summary_timepoint_df['sem_orientation'][i] = scipy.stats.sem(props_t_array[:,8,i])
summary_timepoint_df['euclidean_distance'][i] = np.mean(props_t_array[:,9,i]) # Linear distance between first and last point
summary_timepoint_df['avg_speed'][i] = np.mean(props_t_array[:,10,i])
summary_timepoint_df['sem_speed'][i] = scipy.stats.sem(props_t_array[:,10,i])
summary_timepoint_df['avg_velocity'][i] = np.mean(props_t_array[:,9,i]) / ((t[-1] - t[0]) / 60) # Total Euclidean distance per hour.
summary_timepoint_df['avg_directedness'][i] = np.nanmean(props_t_array[:,11,i])
summary_timepoint_df['sem_directedness'][i] = scipy.stats.sem(props_t_array[:,11,i])
summary_timepoint_df['avg_turn_angle'][i] = np.nanmean(props_t_array[:,12,i])
summary_timepoint_df['avg_direction_autocorrelation'][i] = np.nanmean(props_t_array[:,13,i])
elif data_type=='Usiigaci':
for i in range(0,len(props_t_array[0,0,:])):
summary_timepoint_df.loc[i] = i
summary_timepoint_df['time'][i] = i*t_inc
summary_timepoint_df['avg_area'][i] = np.mean(props_t_array[:,2,i])
summary_timepoint_df['avg_perimeter'][i] = np.mean(props_t_array[:,3,i])
summary_timepoint_df['avg_angle'][i] = np.mean(props_t_array[:,4,i])
summary_timepoint_df['avg_solidity'][i] = np.mean(props_t_array[:,5,i])
summary_timepoint_df['avg_segment_length'][i] = np.mean(props_t_array[:,6,i])
summary_timepoint_df['total_path_length'][i] = np.mean(props_t_array[:,7,i]) # Total path length is cumulative path length at final timepoint
summary_timepoint_df['avg_orientation'][i] = np.mean(props_t_array[:,8,i])
summary_timepoint_df['std_orientation'][i] = np.std(props_t_array[:,8,i])
summary_timepoint_df['sem_orientation'][i] = scipy.stats.sem(props_t_array[:,8,i])
summary_timepoint_df['euclidean_distance'][i] = np.mean(props_t_array[:,9,i]) # Linear distance between first and last point
summary_timepoint_df['avg_speed'][i] = np.mean(props_t_array[:,10,i])
summary_timepoint_df['sem_speed'][i] = scipy.stats.sem(props_t_array[:,10,i])
summary_timepoint_df['avg_velocity'][i] = np.mean(props_t_array[:,9,i]) / ((t[-1] - t[0]) / 60) # Total Euclidean distance per hour.
summary_timepoint_df['avg_directedness'][i] = np.nanmean(props_t_array[:,11,i])
summary_timepoint_df['sem_directedness'][i] = scipy.stats.sem(props_t_array[:,11,i])
summary_timepoint_df['avg_turn_angle'][i] = np.nanmean(props_t_array[:,12,i])
summary_timepoint_df['avg_direction_autocorrelation'][i] = np.nanmean(props_t_array[:,13,i])
else:
for i in range(0,len(props_t_array[0,0,:])):
summary_timepoint_df.loc[i] = i
summary_timepoint_df['time'][i] = i*t_inc
summary_timepoint_df['avg_segment_length'][i] = np.mean(props_t_array[:,6,i])
summary_timepoint_df['total_path_length'][i] = np.mean(props_t_array[:,7,i]) # Total path length is cumulative path length at final timepoint
summary_timepoint_df['euclidean_distance'][i] = np.mean(props_t_array[:,9,i]) # Linear distance between first and last point
summary_timepoint_df['avg_speed'][i] = np.mean(props_t_array[:,10,i])
summary_timepoint_df['sem_speed'][i] = scipy.stats.sem(props_t_array[:,10,i])
summary_timepoint_df['avg_velocity'][i] = np.mean(props_t_array[:,9,i]) / ((t[-1] - t[0]) / 60) # Total Euclidean distance per hour.
summary_timepoint_df['avg_directedness'][i] = np.nanmean(props_t_array[:,11,i])
summary_timepoint_df['sem_directedness'][i] = scipy.stats.sem(props_t_array[:,11,i])
summary_timepoint_df['avg_turn_angle'][i] = np.nanmean(props_t_array[:,12,i])
summary_timepoint_df['avg_direction_autocorrelation'][i] = np.nanmean(props_t_array[:,13,i])
export_path = 'export//spreadsheets//'
if not os.path.exists(export_path):
os.makedirs(export_path)
stats_df.to_csv(export_path + "cell_migration_descriptive_statistics.csv", header=True, index=False)
summary_cell_df.to_csv(export_path + "cell_migration_summary.csv", header=True, index=False)
summary_timepoint_df.to_csv(export_path+"timepoint_migration_summary.csv", header=True, index=False)
#Drawing plots
# Set plot limits
xmin = -500
xmax = 500
ymin = -500
ymax = 500
frames = []
fig = plt.figure(frameon=True,facecolor='w')
fig.set_size_inches(10,10)
#ax = plt.Axes(fig, [0., 0., 1., 1.])
#ax.set_axis_off()
#fig.add_axes(ax)
export_path = 'export//scatter//'
if not os.path.exists(export_path):
os.makedirs(export_path)
frames = []
fig = plt.figure(frameon=True,facecolor='w')
fig.set_size_inches(10,10)
#ax = plt.Axes(fig, [0., 0., 1., 1.])
#ax.set_axis_off()
#fig.add_axes(ax)
cell_colors = np.linspace(0,1,n_cells)
print("graphing raw scatter plot")
for t in range(0,n_frames):
ax=plt.subplot(111)
ax.clear()
ax.scatter(props_t_array[:,0,t],props_t_array[:,1,t],s=20, alpha=1, c=cell_colors)
#ax.axis('equal')
ax.axis([xmin, xmax, ymin, ymax]) # Setting the axes like this avoid the zero values in the preallocated empty array.
#ax.text(250, 1050, 'Distribution of cell positions at t = ' + str(int(timestamps[t])) + ' minutes', fontsize=15)
ax.text(xmin + (xmax - xmin) / 8, ymax + 5, 'Distribution of cell positions at t = ' + str(int(timestamps[t])) + ' minutes', fontsize=15)
ax.set_xlabel('X position ($\mu$m)', fontsize=15)
ax.set_ylabel('Y position ($\mu$m)', fontsize=15)
# Draw the figure
fig.canvas.draw()
# Convert to numpy array, and append to list
#np_fig = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
np_fig = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
np_fig = np_fig.reshape(fig.canvas.get_width_height()[::-1] + (3,))
frames.append(np_fig)
imageio.mimsave(export_path + '/scatter_raw.gif', frames)
np.fig=[]
frames = []
fig = plt.figure(frameon=True,facecolor='w')
fig.set_size_inches(10,10)
print("graphing zeroed scatter plot")
for t in range(0,n_frames):
ax = plt.subplot(111)
ax.clear()
ax.scatter(zerod_t_array[:,0,t],zerod_t_array[:,1,t],s=20, alpha=1, c=cell_colors)
#ax.axis('equal')
#plt.axis('off')
ax.axis([xmin, xmax, ymin, ymax]) # Setting the axes like this avoid the zero values in the preallocated empty array.
#ax.text(-50, 90, 'Distribution of cell positions (zeroed) at t = ' + str(int(timestamps[t])) + ' minutes', fontsize=15)
ax.text(xmin + (xmax - xmin) / 8, ymax + 5, 'Distribution of cell positions (zeroed) at t = ' + str(int(timestamps[t])) + ' minutes', fontsize=15)
ax.set_xlabel('Relative X position ($\mu$m)', fontsize=15)
ax.set_ylabel('Relative Y position ($\mu$m)', fontsize=15)
# Draw the figure
fig.canvas.draw()
#uncomment this one if you want to save individual time point into a file
#plt.savefig(export_path + 'scatter%d.png'%(t), format='png', dpi=600)
# Convert to numpy array, and append to list
#np_fig = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
np_fig = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
np_fig = np_fig.reshape(fig.canvas.get_width_height()[::-1] + (3,))
frames.append(np_fig)
imageio.mimsave(export_path + '/scatter_zeroed.gif', frames)
print("graphing 2D trajectory plots")
#2D hair ball plots with each cell track is one color
x = zerod_t_array[:,0,:]
y = zerod_t_array[:,1,:]
t = np.linspace(0,n_frames*t_inc,n_frames)
fig = plt.figure(figsize = (10,10),facecolor='w')
ax = fig.add_subplot(111)
export_path = 'export//2d_hairball//'
if not os.path.exists(export_path):
os.makedirs(export_path)
segs = np.zeros((n_cells, n_frames, 2), float)
segs[:, :, 0] = x
segs[:, :, 1] = y
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xlabel('X position ($\mu$m)')
ax.set_ylabel('Y position ($\mu$m)')
colors = [mcolors.to_rgba(c)
for c in plt.rcParams['axes.prop_cycle'].by_key()['color']]
line_segments = LineCollection(segs,colors=colors, cmap=plt.get_cmap('jet'))
ax.add_collection(line_segments)
ax.set_title('Cell migration trajectories')
#plt.axis('equal')
plt.savefig(export_path + '2d_hairball.png', format='png', dpi=600)
#2D hair ball trajectory plot with color coed accroding to elapsed time (Imaris Like trajectory)
t = np.linspace(0,n_frames*t_inc,n_frames)
fig = plt.figure(figsize = (10,10),facecolor='w')
ax = fig.add_subplot(111)
export_path = 'export//2d_hairball//'
if not os.path.exists(export_path):
os.makedirs(export_path)
for n in range(0,n_cells):
x = zerod_t_array[n,0,:]
y = zerod_t_array[n,1,:]
# Remove the nans from the array
x = x[~np.isnan(x)]
y = y[~np.isnan(y)]
# Set the segments in the correct format
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
# Axis limits and titles
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xlabel('X position ($\mu$m)')
ax.set_ylabel('Y position ($\mu$m)')
# plt.axis('equal')
# Set the colormap
cmap=plt.get_cmap('jet')
line_segments = LineCollection(segments,array=t, cmap=cmap)
ax.add_collection(line_segments)
axcb = fig.colorbar(line_segments)
axcb.set_label('Time (minutes)')
ax.set_title('Cell migration trajectories')
#plt.axis('equal')
plt.savefig(export_path + '2d_hairball_time_cmap.png', format='png', dpi=600)
# 2D hairball with color of entire trjactory is mapped by color
x = zerod_t_array[:,0,:]
y = zerod_t_array[:,1,:]
end_x_pos = np.empty([len(x[:,0]),1])
for i in range(0,len(end_x_pos)): # For each cell
x_vals = np.copy(np.squeeze(x[i,:]))
x_vals = x_vals[~np.isnan(x_vals)]
if(len(x_vals) > 0):
end_x_pos[i] = x_vals[-1]
fig = plt.figure(figsize = (10,10),facecolor='w')
ax = fig.add_subplot(111)
export_path = 'export//2d_hairball//'
if not os.path.exists(export_path):
os.makedirs(export_path)
segs = np.zeros((n_cells, n_frames, 2), float)
segs[:, :, 0] = x
segs[:, :, 1] = y
#ax.set_xlim(np.nanmin(x), np.nanmax(x))
#ax.set_ylim(np.nanmin(y), np.nanmax(y))
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xlabel('X position ($\mu$m)')
ax.set_ylabel('Y position ($\mu$m)')
line_segments = LineCollection(segs,array=np.squeeze(end_x_pos), cmap=plt.get_cmap('jet'))
ax.add_collection(line_segments)
ax.set_title('Cell migration trajectories')
axcb = fig.colorbar(line_segments, orientation="horizontal", pad=0.1)
axcb.set_label('Final x position ($\mu$m)')
plt.savefig(export_path + '2d_hairball_cmap_endPos.png', format='png', dpi=600)
#2d hair ball plot with final position color coding by X direction (Ibidi like)
x = zerod_t_array[:,0,:]
y = zerod_t_array[:,1,:]
x_1 = x[x[:,-1] < 0]
y_1 = y[x[:,-1] < 0]
x_2 = x[x[:,-1] > 0]
y_2 = y[x[:,-1] > 0]
t = np.linspace(0,(n_frames-1)*t_inc,n_frames)
fig = plt.figure(figsize = (10,10),facecolor='w')
ax = fig.add_subplot(111)
export_path = 'export//2d_hairball//'
if not os.path.exists(export_path):
os.makedirs(export_path)
segs_1 = np.zeros((len(x_1[:,0]), n_frames, 2), float)
segs_1[:, :, 0] = x_1
segs_1[:, :, 1] = y_1
segs_2 = np.zeros((len(x_2[:,0]), n_frames, 2), float)
segs_2[:, :, 0] = x_2
segs_2[:, :, 1] = y_2
#ax.set_xlim(np.nanmin(x)-5, np.nanmax(x)+5)
#ax.set_ylim(np.nanmin(y)-5, np.nanmax(y)+5)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xlabel('X position ($\mu$m)')
ax.set_ylabel('Y position ($\mu$m)')
line_segments_1 = LineCollection(segs_1,colors='red')
ax.add_collection(line_segments_1)
line_segments_2 = LineCollection(segs_2,colors='black')
ax.add_collection(line_segments_2)
ax.set_title('Cell migration trajectories')
ax.scatter(x_1[:,-1],y_1[:,-1],s=50, c='red')
ax.scatter(x_2[:,-1],y_2[:,-1],s=50, c='black')
plt.savefig(export_path + '2d_hairball_cmap_endPos_2color.png', format='png', dpi=600)
#3D hairball plot with time as z axis
frames = []
fig = plt.figure(figsize = (15,15))
ax = fig.add_subplot(111, projection='3d')
export_path = 'export//3d_hairball//'
if not os.path.exists(export_path):
os.makedirs(export_path)
for n in range(0,n_cells):
x = zerod_t_array[n,0,:]
y = zerod_t_array[n,1,:]
t = np.linspace(0,n_frames*t_inc,n_frames)
ax.plot(x, y, t)#, c=)
ax.set_xlabel('X position ($\mu$m)')
ax.set_ylabel('Y position ($\mu$m)')
ax.set_zlabel('Time (minutes)')
print("graphing 3D trajectory")
for angle in range(0, 360):
ax.view_init(30, angle)
# Draw the figure
fig.canvas.draw()
# Convert to numpy array, and append to list
#np_fig = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
np_fig = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
np_fig = np_fig.reshape(fig.canvas.get_width_height()[::-1] + (3,))
frames.append(np_fig)
imageio.mimsave(export_path + '/3d_hairball_test.gif', frames)
#plot violin plot for cell area perimeter, orientation, circularity, speed, directedness, turn angle, and direction autocorrelation
print("plotting violin plots")
export_path = 'export//Violin plots//'
if not os.path.exists(export_path):
os.makedirs(export_path)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
t=np.linspace(0,(n_frames-1)*t_inc, n_frames)
ax.violinplot(np.squeeze(props_t_array[:,2,:]), positions=t, widths=10, showmeans=False, showextrema=True, showmedians=True)
ax.set_title('cell area')
ax.set_ylabel('cellarea($\mu m^2$)')
ax.set_xlabel('Time (minutes)')
plt.savefig(export_path + 'area_violin.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax.violinplot(np.squeeze(props_t_array[:,3,:]),positions=t, widths = 10, showmeans=True, showextrema=True, showmedians=False)
ax.set_title('Cell perimeter')
ax.set_ylabel('Cell Perimeter ($\mu$m)')
ax.set_xlabel('Time (minutes)')
plt.savefig(export_path + 'perimeter_violin.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax.violinplot(np.squeeze(props_t_array[:,4,:]),positions=t, widths = 10, showmeans=True, showextrema=True, showmedians=False)
ax.set_title('Orientation angle')
ax.set_ylabel('Angle (degrees)')
ax.set_xlabel('Time (minutes)')
plt.savefig(export_path + 'angle_violin.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax.violinplot(np.squeeze(props_t_array[:,5,:]),positions=t, widths = 10, showmeans=True, showextrema=True, showmedians=False)
ax.set_title('Circularity')
ax.set_ylabel('Circularity (a.u.)')
ax.set_xlabel('Time (minutes)')
plt.savefig(export_path + 'circularity_violin.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax.violinplot(np.squeeze(props_t_array[:,10,:]),positions=t, widths = 10, showmeans=True, showextrema=True, showmedians=False)
ax.set_title('Speed')
ax.set_ylabel('Speed ($\mu$m/h)')
ax.set_xlabel('Time (minutes)')
plt.savefig(export_path + 'speed_violin.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax.violinplot(np.squeeze(props_t_array[:,11,:]),positions=t, widths = 10, showmeans=True, showextrema=True, showmedians=False)
ax.set_title('Directedness')
ax.set_ylabel('Directedness(a.u.)')
ax.set_xlabel('Time (minutes)')
plt.savefig(export_path + 'directedness_violin.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax.violinplot(np.squeeze(props_t_array[:,12,:]),positions=t, widths = 10, showmeans=True, showextrema=True, showmedians=False)
ax.set_title('Turn angle')
ax.set_ylabel('Turn angle (degrees)')
ax.set_xlabel('Time (minutes)')
plt.savefig(export_path + 'turnangle_violin.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax.violinplot(np.squeeze(props_t_array[:,13,:]),positions=t, widths = 10, showmeans=True, showextrema=True, showmedians=False)
ax.set_title('Direction autocorrelation')
ax.set_ylabel('Direction autocorrelation(a.u.)')
ax.set_xlabel('Time (minutes)')
plt.savefig(export_path + 'direction_autocorrelation_violin.png', format='png', dpi=600)
#box plots
export_path = 'export//boxplots//'
if not os.path.exists(export_path):
os.makedirs(export_path)
t = np.linspace(0,(n_frames-1)*t_inc,n_frames)
print("plotting box plots")
linewidth = 1
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.boxplot(data=np.squeeze(props_t_array[:,2,:]),orient="v",linewidth=linewidth,fliersize=2, ax=ax)
ax = sns.swarmplot(data=np.squeeze(props_t_array[:,2,:]), orient="v", linewidth=linewidth, ax=ax)
ax.set_title('Cell area')
ax.set_ylabel('Cell area ($\mu m^2$)')
ax.set_xlabel('Time (frame)')
plt.savefig(export_path + 'cellarea_boxplot.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.boxplot(data=np.squeeze(props_t_array[:,3,:]),orient="v",linewidth=linewidth,fliersize=2, ax=ax)
ax = sns.swarmplot(data=np.squeeze(props_t_array[:,3,:]), orient="v", linewidth=linewidth, ax=ax)
ax.set_title('Cell perimeter')
ax.set_ylabel('Cell perimeter ($\mu m$)')
ax.set_xlabel('Time (frame)')
plt.savefig(export_path + 'perimeter_boxplot.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.boxplot(data=np.squeeze(props_t_array[:,4,:]),orient="v",linewidth=linewidth,fliersize=2, ax=ax)
ax = sns.swarmplot(data=np.squeeze(props_t_array[:,4,:]), orient="v", linewidth=linewidth, ax=ax)
ax.set_title('Orientation angle')
ax.set_ylabel('Orientation angle (degrees)')
ax.set_xlabel('Time (frame)')
plt.savefig(export_path + 'angle_boxplot.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.boxplot(data=np.squeeze(props_t_array[:,8,:]),orient="v",linewidth=linewidth,fliersize=2, ax=ax)
ax = sns.swarmplot(data=np.squeeze(props_t_array[:,8,:]), orient="v", linewidth=linewidth, ax=ax)
ax.set_title('Orientation index')
ax.set_ylabel('Orientation index')
ax.set_xlabel('Time (frame)')
plt.savefig(export_path + 'orientation_boxplot.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.boxplot(data=np.squeeze(props_t_array[:,5,:]),orient="v",linewidth=linewidth,fliersize=2, ax=ax)
ax = sns.swarmplot(data=np.squeeze(props_t_array[:,5,:]), orient="v", linewidth=linewidth, ax=ax)
ax.set_title('Circularity')
ax.set_ylabel('Circularity (a.u.)')
ax.set_xlabel('Time (frame)')
plt.savefig(export_path + 'circularity_boxplot.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.boxplot(data=np.squeeze(props_t_array[:,10,:]),orient="v",linewidth=linewidth,fliersize=2, ax=ax)
ax = sns.swarmplot(data=np.squeeze(props_t_array[:,10,:]), orient="v", linewidth=linewidth, ax=ax)
ax.set_title('Speed')
ax.set_ylabel('Speed ($\mu$m/h)')
ax.set_xlabel('Time (frame)')
plt.savefig(export_path + 'speed_boxplot.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.boxplot(data=np.squeeze(props_t_array[:,11,:]),orient="v",linewidth=linewidth,fliersize=2, ax=ax)
ax = sns.swarmplot(data=np.squeeze(props_t_array[:,11,:]), orient="v", linewidth=linewidth, ax=ax)
ax.set_title('Directedness')
ax.set_ylabel('Directedness(a.u.)')
ax.set_xlabel('Time (frame)')
plt.savefig(export_path + 'directedness_boxplot.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.boxplot(data=np.squeeze(props_t_array[:,12,:]),orient="v",linewidth=linewidth,fliersize=2, ax=ax)
ax = sns.swarmplot(data=np.squeeze(props_t_array[:,12,:]), orient="v", linewidth=linewidth, ax=ax)
ax.set_title('Turn angle')
ax.set_ylabel('Turn angle (degrees)')
ax.set_xlabel('Time (frame)')
plt.savefig(export_path + 'turnangle_boxplot.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.boxplot(data=np.squeeze(props_t_array[:,13,:]),orient="v",linewidth=linewidth,fliersize=2, ax=ax)
ax = sns.swarmplot(data=np.squeeze(props_t_array[:,13,:]), orient="v", linewidth=linewidth, ax=ax)
ax.set_title('Direction autocorrelation')
ax.set_ylabel('Direction autocorrelation(a.u.)')
ax.set_xlabel('Time (frame)')
plt.savefig(export_path + 'direction_autocorrelation_boxplot.png', format='png', dpi=600)
export_path = 'export//Timeseries plots//'
if not os.path.exists(export_path):
os.makedirs(export_path)
t = np.linspace(0,(n_frames-1)*t_inc,n_frames)
print("plotting time series plot")
linewidth = 1
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax=sns.tsplot(np.squeeze(props_t_array[:,2,:]),time=t, condition='Cell Area', value='Cell Area ($\mu m^2$)', err_style="ci_band", ci=[0,95], ax=ax)
ax.set_title('Cell area')
ax.set_ylabel('Cell area ($\mu m^2$)')
ax.set_xlabel('Time (Minutes)')
plt.savefig(export_path + 'cellarea_timeseries.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax =sns.tsplot(np.squeeze(props_t_array[:,3,:]),time=t, condition='Cell Perimeter', value='Cell Perimeter ($\mu$m)',
err_style="ci_band", ci=[0,95], ax=ax)
ax.set_title('Cell perimeter')
ax.set_ylabel('Cell perimeter ($\mu m$)')
ax.set_xlabel('Time (Minutes)')
plt.savefig(export_path + 'perimeter_timeseries.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.tsplot(np.squeeze(props_t_array[:,4,:]),time=t, condition='Orientation angle', value='Orientation Angle (degrees)',
err_style="ci_band", ci=[0,95], ax=ax)
ax.set_title('Orientation angle')
ax.set_ylabel('Orientation angle (degrees)')
ax.set_xlabel('Time (Minutes)')
plt.savefig(export_path + 'angle_timeseries.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.tsplot(np.squeeze(props_t_array[:,8,:]),time=t, condition='Orientation', value='Orientation',
err_style="ci_band", ci=[0,95], ax=ax)
ax.set_title('Orientation')
ax.set_ylabel('Orientation')
ax.set_xlabel('Time (Minutes)')
plt.savefig(export_path + 'orientation_timeseries.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.tsplot(np.squeeze(props_t_array[:,5,:]),time=t, condition='Circularity', value='Circularity (a.u.)',
err_style="ci_band", ci=[0,95], ax=ax)
ax.set_title('Circularity')
ax.set_ylabel('Circularity (a.u.)')
ax.set_xlabel('Time (Minutes)')
plt.savefig(export_path + 'circularity_timeseries.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.tsplot(np.squeeze(props_t_array[:,10,1:-1]),time=t[1:-1], condition='Speed', value='Speed ($\mu$m)/h',
err_style="ci_band", ci=[0,95], ax=ax)
ax.set_title('Speed')
ax.set_ylabel('Speed ($\mu$m/h)')
ax.set_xlabel('Time (Minutes))')
plt.savefig(export_path + 'speed_timeseries.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
ax = sns.tsplot(np.squeeze(props_t_array[:,11,1:-1]),time=t[1:-1], condition='Directedness', value='Directedness (a.u.)',
err_style="ci_band", ci=[0,95], ax=ax)
ax.set_title('Directedness')
ax.set_ylabel('Directedness(a.u.)')
ax.set_xlabel('Time (Minutes)')
plt.savefig(export_path + 'directedness_timeseries.png', format='png', dpi=600)
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
#ax = sns.boxplot(data=np.squeeze(props_t_array[:,12,:]),orient="v",linewidth=linewidth,fliersize=2, ax=ax)
#ax = sns.swarmplot(data=np.squeeze(props_t_array[:,12,:]), orient="v", linewidth=linewidth, ax=ax)
ax = sns.tsplot(np.squeeze(props_t_array[:,12,1:-1]),time=t[1:-1], condition='Turn angle', value='Turn angle (degree)',
err_style="ci_band", ci=[0,95], ax=ax)
ax.set_title('Turn angle')
ax.set_ylabel('Turn angle (degrees)')
ax.set_xlabel('Time (frame)')
plt.savefig(export_path + 'turnangle_timeseries.png', format='png', dpi=600)
#plt.show()
fig = plt.figure(figsize=(18,10), facecolor='w')
ax = fig.add_subplot(111)
#ax = sns.boxplot(data=np.squeeze(props_t_array[:,13,:]),orient="v",linewidth=linewidth,fliersize=2, ax=ax)
#ax = sns.swarmplot(data=np.squeeze(props_t_array[:,13,:]), orient="v", linewidth=linewidth, ax=ax)
sns.tsplot(np.squeeze(props_t_array[:,13,1:-1]),time=t[1:-1], condition='Direction autocorrelation', value='Direction autocorrelation (a.u.)',
err_style="ci_band", ci=[0,95], ax=ax)
ax.set_title('Direction autocorrelation')
ax.set_ylabel('Direction autocorrelation(a.u.)')
ax.set_xlabel('Time (frame)')
plt.savefig(export_path + 'direction_autocorrelation_timeseries.png', format='png', dpi=600)
export_path = 'export//frequency_histogram_subplots//'
if not os.path.exists(export_path):
os.makedirs(export_path)
print("plotting frequency histogram subplots")
sns.set_style({'lines.linewidth': 8.0},{'axes.linewidth': 2.0})
frames = []
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1,4, figsize=(20,5), facecolor='w')
for t in range(0,n_frames):
ax1.clear()
ax2.clear()
ax3.clear()
ax4.clear()
#speed
ax1.hist(props_t_array[:,10,t], color='white',edgecolor='black', linewidth=4,range=(0, np.nanmax(props_t_array[:,10,:])))
ax1.set_xlabel('Speed ($\mu$m/h)', fontsize=20)
ax1.set_ylabel('Frequency', fontsize=20)
ax1.set_xlim(0, np.nanmax(props_t_array[:,10,:]))
ax1.set_ylim(0, n_cells)
#angle
ax2.hist(props_t_array[:,4,t], color='white',edgecolor='black', linewidth=4,range=(0, 180)) #props_t_array[:,8,t])
ax2.set_xlabel('$\Phi$ (degrees)', fontsize=20)
ax2.set_ylabel('Frequency', fontsize=20)
ax2.set_xlim(0, 180)
ax2.set_ylim(0, n_cells)
#turn angle
ax3.hist(props_t_array[:,12,t], color='white',edgecolor='black', linewidth=4,range=(np.nanmin(props_t_array[:,12,:]), np.nanmax(props_t_array[:,12,:])))
ax3.set_xlabel('α(degrees)', fontsize=20)
ax3.set_ylabel('Frequency', fontsize=20)
ax3.set_xlim(np.nanmin(props_t_array[:,12,:]), np.nanmax(props_t_array[:,12,:]))
ax3.set_ylim(0, n_cells)
# plot of 2 variables
p1=sns.kdeplot(zerod_t_array[:,0,t], shade=False, color="b", ax=ax4)
p1=sns.kdeplot(zerod_t_array[:,1,t], shade=False, color="r", ax=ax4)
ax4.text(-150, 0.06, '$\Delta$X', fontsize=30, color='blue')
ax4.text(-150, 0.05, '$\Delta$Y', fontsize=30, color='red')
ax4.set_xlabel('Change in position ($\mu$m)', fontsize=20)
ax4.set_xlim(np.nanmin(zerod_t_array[:,:,:])-5, np.nanmax(zerod_t_array[:,:,:])+5)
ax4.set_ylim(0, 0.1)
# Draw the figure
fig.canvas.draw()
# Convert to numpy array, and append to list
#np_fig = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
np_fig = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
np_fig = np_fig.reshape(fig.canvas.get_width_height()[::-1] + (3,))
frames.append(np_fig)
imageio.mimsave(export_path + '/frequency_histogram_subplots.gif', frames)
| 44.551752
| 460
| 0.705738
| 9,550
| 55,957
| 3.91623
| 0.069843
| 0.051979
| 0.087059
| 0.056791
| 0.794171
| 0.777246
| 0.755535
| 0.74869
| 0.73885
| 0.724011
| 0
| 0.028141
| 0.131887
| 55,957
| 1,255
| 461
| 44.587251
| 0.741771
| 0.183623
| 0
| 0.66107
| 0
| 0
| 0.16541
| 0.030628
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.016789
| 0
| 0.016789
| 0.034627
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c5e9ce1c9d44b8b8c824cbf18c1610d9b643e32c
| 173
|
py
|
Python
|
django_bootstrap_wysiwyg/utils.py
|
Prithvi45/django-bootstrap-wysiwyg
|
7ec93c29221207d793070c2956814b36dcc175a5
|
[
"MIT"
] | 9
|
2015-02-03T07:01:38.000Z
|
2017-10-18T09:08:18.000Z
|
django_bootstrap_wysiwyg/utils.py
|
Prithvi45/django-bootstrap-wysiwyg
|
7ec93c29221207d793070c2956814b36dcc175a5
|
[
"MIT"
] | 4
|
2015-01-06T13:44:59.000Z
|
2020-06-04T19:24:46.000Z
|
django_bootstrap_wysiwyg/utils.py
|
laplacesdemon/django-bootstrap-wysiwyg
|
7ec93c29221207d793070c2956814b36dcc175a5
|
[
"MIT"
] | 8
|
2015-01-06T13:45:21.000Z
|
2020-11-24T17:32:58.000Z
|
from django.conf import settings
def setting(name, default=None):
"""returns the setting value or default if not exists"""
return getattr(settings, name, default)
| 24.714286
| 60
| 0.734104
| 24
| 173
| 5.291667
| 0.791667
| 0.173228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17341
| 173
| 6
| 61
| 28.833333
| 0.888112
| 0.289017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c5f36d201b018d8df09e797ed4f7b19e11ee49e5
| 224
|
py
|
Python
|
share/lib/python/neuron/rxd/geometry3d/__init__.py
|
tommorse/nrn
|
73236b12977118ae0a98d7dbbed60973994cdaee
|
[
"BSD-3-Clause"
] | 1
|
2020-05-28T17:21:52.000Z
|
2020-05-28T17:21:52.000Z
|
share/lib/python/neuron/rxd/geometry3d/__init__.py
|
tommorse/nrn
|
73236b12977118ae0a98d7dbbed60973994cdaee
|
[
"BSD-3-Clause"
] | 2
|
2019-11-09T23:02:28.000Z
|
2019-11-18T00:17:10.000Z
|
share/lib/python/neuron/rxd/geometry3d/__init__.py
|
tommorse/nrn
|
73236b12977118ae0a98d7dbbed60973994cdaee
|
[
"BSD-3-Clause"
] | 1
|
2018-12-18T13:52:16.000Z
|
2018-12-18T13:52:16.000Z
|
from .surface import surface
from .triangularMesh import TriangularMesh
from .voxelize import voxelize
#from .voxelize2 import voxelize2
from .scalarField import ScalarField
from .FullJoinMorph import fullmorph as voxelize2
| 32
| 49
| 0.852679
| 26
| 224
| 7.346154
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 0.116071
| 224
| 6
| 50
| 37.333333
| 0.949495
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a89b95e54048c41e56dc4855c2cecee7bed6ef41
| 3,990
|
py
|
Python
|
tests/api_resources/test_subscription_schedule.py
|
STejas6/stripe-python
|
428f82d41023b54bed618a4eb96f48eb87ea940f
|
[
"MIT"
] | 29
|
2019-09-05T18:40:53.000Z
|
2022-03-10T22:00:57.000Z
|
tests/api_resources/test_subscription_schedule.py
|
STejas6/stripe-python
|
428f82d41023b54bed618a4eb96f48eb87ea940f
|
[
"MIT"
] | 3
|
2020-08-25T17:23:05.000Z
|
2021-10-03T19:47:39.000Z
|
tests/api_resources/test_subscription_schedule.py
|
STejas6/stripe-python
|
428f82d41023b54bed618a4eb96f48eb87ea940f
|
[
"MIT"
] | 15
|
2019-11-05T23:43:27.000Z
|
2022-03-02T12:48:53.000Z
|
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "sub_sched_123"
TEST_REVISION_ID = "sub_sched_rev_123"
class TestSubscriptionScheduleSchedule(object):
def test_is_listable(self, request_mock):
resources = stripe.SubscriptionSchedule.list()
request_mock.assert_requested("get", "/v1/subscription_schedules")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.SubscriptionSchedule)
def test_is_retrievable(self, request_mock):
resource = stripe.SubscriptionSchedule.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/subscription_schedules/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.SubscriptionSchedule)
def test_is_creatable(self, request_mock):
resource = stripe.SubscriptionSchedule.create(customer="cus_123")
request_mock.assert_requested("post", "/v1/subscription_schedules")
assert isinstance(resource, stripe.SubscriptionSchedule)
def test_is_saveable(self, request_mock):
resource = stripe.SubscriptionSchedule.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
resource.save()
request_mock.assert_requested(
"post", "/v1/subscription_schedules/%s" % TEST_RESOURCE_ID
)
def test_is_modifiable(self, request_mock):
resource = stripe.SubscriptionSchedule.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/subscription_schedules/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.SubscriptionSchedule)
def test_can_cancel(self, request_mock):
resource = stripe.SubscriptionSchedule.retrieve(TEST_RESOURCE_ID)
resource = resource.cancel()
request_mock.assert_requested(
"post", "/v1/subscription_schedules/%s/cancel" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.SubscriptionSchedule)
def test_can_cancel_classmethod(self, request_mock):
resource = stripe.SubscriptionSchedule.cancel(TEST_RESOURCE_ID)
request_mock.assert_requested(
"post", "/v1/subscription_schedules/%s/cancel" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.SubscriptionSchedule)
def test_can_release(self, request_mock):
resource = stripe.SubscriptionSchedule.retrieve(TEST_RESOURCE_ID)
resource = resource.release()
request_mock.assert_requested(
"post", "/v1/subscription_schedules/%s/release" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.SubscriptionSchedule)
def test_can_release_classmethod(self, request_mock):
resource = stripe.SubscriptionSchedule.release(TEST_RESOURCE_ID)
request_mock.assert_requested(
"post", "/v1/subscription_schedules/%s/release" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.SubscriptionSchedule)
class TestSubscriptionScheduleRevisions(object):
def test_is_listable(self, request_mock):
resources = stripe.SubscriptionSchedule.list_revisions(
TEST_RESOURCE_ID
)
request_mock.assert_requested(
"get", "/v1/subscription_schedules/%s/revisions" % TEST_RESOURCE_ID
)
assert isinstance(resources.data, list)
assert isinstance(
resources.data[0], stripe.SubscriptionScheduleRevision
)
def test_is_retrievable(self, request_mock):
resource = stripe.SubscriptionSchedule.retrieve_revision(
TEST_RESOURCE_ID, TEST_REVISION_ID
)
request_mock.assert_requested(
"get",
"/v1/subscription_schedules/%s/revisions/%s"
% (TEST_RESOURCE_ID, TEST_REVISION_ID),
)
assert isinstance(resource, stripe.SubscriptionScheduleRevision)
| 39.9
| 79
| 0.700251
| 400
| 3,990
| 6.67
| 0.1475
| 0.090705
| 0.0997
| 0.107196
| 0.845952
| 0.821964
| 0.764993
| 0.720015
| 0.657796
| 0.657796
| 0
| 0.007002
| 0.212531
| 3,990
| 99
| 80
| 40.30303
| 0.842139
| 0
| 0
| 0.385542
| 0
| 0
| 0.115038
| 0.091729
| 0
| 0
| 0
| 0
| 0.277108
| 1
| 0.13253
| false
| 0
| 0.024096
| 0
| 0.180723
| 0.012048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a8ad475afe927fcb71002a4d2296fbcee84a839b
| 5,983
|
py
|
Python
|
test/test_route53_interface.py
|
fossabot/route53-ddns
|
364cc08fc3d4d2a5278c86463b88ce776c220ba8
|
[
"MIT"
] | null | null | null |
test/test_route53_interface.py
|
fossabot/route53-ddns
|
364cc08fc3d4d2a5278c86463b88ce776c220ba8
|
[
"MIT"
] | null | null | null |
test/test_route53_interface.py
|
fossabot/route53-ddns
|
364cc08fc3d4d2a5278c86463b88ce776c220ba8
|
[
"MIT"
] | null | null | null |
from route53_ddns import route53_interface
from unittest.mock import MagicMock, call, patch
import pytest
@patch("route53_ddns.route53_interface.route53")
def test_get_hosted_zone_id_raises_keyerror_if_no_zone(route53_mock):
route53_mock.list_hosted_zones.return_value = {"HostedZones": []}
with pytest.raises(KeyError):
route53_interface.get_hosted_zone_id("my.zone")
@patch("route53_ddns.route53_interface.route53")
def test_get_hosted_zone_id_value_error_if_not_found(route53_mock):
route53_mock.list_hosted_zones.return_value = {"HostedZones": [{"Name": "another.zone"}]}
with pytest.raises(ValueError):
route53_interface.get_hosted_zone_id("my.zone")
@patch("route53_ddns.route53_interface.route53")
def test_get_hosted_zone_id_returns_right_Value(route53_mock):
route53_mock.list_hosted_zones.return_value = {"HostedZones": [{"Name": "my.zone.", "Id": "expected_id"}]}
zone_id = route53_interface.get_hosted_zone_id("my.zone")
assert zone_id == "expected_id"
@patch('route53_ddns.route53_interface.sleep')
@patch("route53_ddns.route53_interface.route53")
def test_wait_for_change_completion(route53_mock, sleep_mock):
sleep_mock.return_value = None
route53_mock.get_change.side_effect = [
{"ChangeInfo": {"Status": "PENDING"}},
{"ChangeInfo": {"Status": "PENDING"}},
{"ChangeInfo": {"Status": "INSYNC"}},
]
route53_interface.wait_for_change_completion(change_id="change_id")
route53_mock.get_change.assert_has_calls([
call(Id="change_id"),
call(Id="change_id"),
call(Id="change_id")
])
@patch("route53_ddns.route53_interface.route53")
def test_get_current_ip_not_found(route53_mock):
route53_mock.list_resource_record_sets.return_value = {
"ResourceRecordSets": [
{
"Name": "record.other.zone.",
"Type": "CNAME",
"TTL": 3600
}
]
}
assert route53_interface.get_current_ip(zone_id="my.zone", record_name="record") == None
@patch("route53_ddns.route53_interface.route53")
def test_get_current_ip_found_wrong_type(route53_mock):
route53_mock.list_resource_record_sets.return_value = {
"ResourceRecordSets": [
{
"Name": "record.my.zone.",
"Type": "CNAME",
"TTL": 3600,
"ResourceRecords": [{"Value": "other.domain"}]
}
]
}
with pytest.raises(ValueError):
route53_interface.get_current_ip(zone_id="my.zone", record_name="record.my.zone")
@patch("route53_ddns.route53_interface.route53")
def test_get_current_ip_too_many_entries(route53_mock):
route53_mock.list_resource_record_sets.return_value = {
"ResourceRecordSets": [
{
"Name": "record.my.zone.",
"Type": "A",
"TTL": 3600,
"ResourceRecords": [{"Value": "10.0.0.1"}, {"Value": "10.0.0.2"}]
}
]
}
with pytest.raises(ValueError):
route53_interface.get_current_ip(zone_id="my.zone", record_name="record.my.zone")
@patch("route53_ddns.route53_interface.route53")
def test_get_current_ip_found_ok(route53_mock):
route53_mock.list_resource_record_sets.return_value = {
"ResourceRecordSets": [
{
"Name": "record.my.zone.",
"Type": "A",
"TTL": 3600,
"ResourceRecords": [{"Value": "10.0.0.1"}]
}
]
}
assert route53_interface.get_current_ip(zone_id="my.zone", record_name="record.my.zone") == "10.0.0.1"
@patch("route53_ddns.route53_interface.get_current_ip")
@patch("route53_ddns.route53_interface.get_hosted_zone_id")
@patch("route53_ddns.route53_interface.route53")
def test_update_record_nothing_to_do(route53_mock, get_hosted_zone_id_mock, get_current_ip_mock):
get_hosted_zone_id_mock.return_value = "zone_id"
get_current_ip_mock.return_value = "10.0.0.1"
route53_interface.update_record("my.zone", "record.my.zone", "10.0.0.1")
route53_mock.change_resource_record_sets.assert_not_called()
@patch("route53_ddns.route53_interface.get_current_ip")
@patch("route53_ddns.route53_interface.get_hosted_zone_id")
@patch("route53_ddns.route53_interface.route53")
def test_update_record_dryrun(route53_mock, get_hosted_zone_id_mock, get_current_ip_mock):
get_hosted_zone_id_mock.return_value = "zone_id"
get_current_ip_mock.return_value = "10.0.0.2"
route53_interface.update_record("my.zone", "record.my.zone", "10.0.0.1", dryrun=True)
route53_mock.change_resource_record_sets.assert_not_called()
@patch("route53_ddns.route53_interface.wait_for_change_completion")
@patch("route53_ddns.route53_interface.get_current_ip")
@patch("route53_ddns.route53_interface.get_hosted_zone_id")
@patch("route53_ddns.route53_interface.route53")
def test_update_record(route53_mock, get_hosted_zone_id_mock, get_current_ip_mock, wait_for_change_completion):
get_hosted_zone_id_mock.return_value = "zone_id"
get_current_ip_mock.return_value = "10.0.0.2"
route53_mock.change_resource_record_sets.return_value = {"ChangeInfo": {"Id": "change-id"}}
wait_for_change_completion.return_value = None
route53_interface.update_record("my.zone", "record.my.zone", "10.0.0.1")
route53_mock.change_resource_record_sets.assert_called_once_with(
HostedZoneId="zone_id",
ChangeBatch={
"Comment": f"Updating record to 10.0.0.1",
"Changes": [
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "record.my.zone",
"Type": "A",
"TTL": 60,
"ResourceRecords": [{"Value": "10.0.0.1"}],
},
}
],
},
)
wait_for_change_completion.assert_called_once()
| 35.402367
| 111
| 0.670232
| 747
| 5,983
| 4.951807
| 0.124498
| 0.13409
| 0.082184
| 0.11814
| 0.81103
| 0.77967
| 0.739389
| 0.720195
| 0.686942
| 0.686402
| 0
| 0.052301
| 0.20107
| 5,983
| 168
| 112
| 35.613095
| 0.721548
| 0
| 0
| 0.430769
| 0
| 0
| 0.278094
| 0.132609
| 0
| 0
| 0
| 0
| 0.061538
| 1
| 0.084615
| false
| 0
| 0.023077
| 0
| 0.107692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a8b56a64f15368e4ce35094fa893ace696dac90d
| 193
|
py
|
Python
|
tests/time_config.py
|
Toloka/toloka-airflow
|
cd7cc8bb755453c6ae2ff7a87445bc27e965e7f1
|
[
"Apache-2.0"
] | 5
|
2022-01-25T11:50:51.000Z
|
2022-02-03T10:06:27.000Z
|
tests/time_config.py
|
Toloka/toloka-airflow
|
cd7cc8bb755453c6ae2ff7a87445bc27e965e7f1
|
[
"Apache-2.0"
] | 1
|
2022-01-25T12:29:31.000Z
|
2022-02-03T10:05:40.000Z
|
tests/time_config.py
|
Toloka/toloka-airflow
|
cd7cc8bb755453c6ae2ff7a87445bc27e965e7f1
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta, timezone
DATA_INTERVAL_START = datetime(2021, 9, 13, tzinfo=timezone(timedelta(hours=3)))
DATA_INTERVAL_END = DATA_INTERVAL_START + timedelta(days=1)
| 38.6
| 80
| 0.80829
| 27
| 193
| 5.555556
| 0.62963
| 0.24
| 0.226667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051429
| 0.093264
| 193
| 4
| 81
| 48.25
| 0.805714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a8c0c69125f0d2db08af6df25e3a2693a5d1a985
| 51
|
py
|
Python
|
protonets/data/__init__.py
|
gabrielhuang/prototypical-networks
|
e363420b627c4a558eccde6b72e179b632d183c5
|
[
"MIT"
] | 26
|
2019-02-21T17:01:19.000Z
|
2021-12-12T08:26:38.000Z
|
protonets/data/__init__.py
|
gabrielhuang/centroid-networks
|
e363420b627c4a558eccde6b72e179b632d183c5
|
[
"MIT"
] | null | null | null |
protonets/data/__init__.py
|
gabrielhuang/centroid-networks
|
e363420b627c4a558eccde6b72e179b632d183c5
|
[
"MIT"
] | null | null | null |
from . import omniglot, miniimagenet, omniglot_ccn
| 25.5
| 50
| 0.823529
| 6
| 51
| 6.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 51
| 1
| 51
| 51
| 0.911111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
76bbe259863b7c662cdd4fdb8d0a26d65afaafef
| 292
|
py
|
Python
|
binwalk/__init__.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
binwalk/__init__.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
binwalk/__init__.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
__all__ = ['scan', 'execute', 'Modules', 'ModuleException']
from binwalk.core.module import Modules, ModuleException
# Convenience functions
def scan(*args, **kwargs):
return Modules(*args, **kwargs).execute()
def execute(*args, **kwargs):
return Modules(*args, **kwargs).execute()
| 29.2
| 59
| 0.705479
| 32
| 292
| 6.3125
| 0.5
| 0.19802
| 0.158416
| 0.227723
| 0.39604
| 0.39604
| 0.39604
| 0
| 0
| 0
| 0
| 0
| 0.123288
| 292
| 9
| 60
| 32.444444
| 0.789063
| 0.071918
| 0
| 0.333333
| 0
| 0
| 0.122677
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.333333
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
76bbe821286bbb6e65af0614717d8561ce4c07a1
| 73
|
py
|
Python
|
sample/django_sample/app/app/__init__.py
|
knroy/celery-rmq
|
63220db4bc82ae7767c18713bf3d19679d44aaf8
|
[
"MIT"
] | null | null | null |
sample/django_sample/app/app/__init__.py
|
knroy/celery-rmq
|
63220db4bc82ae7767c18713bf3d19679d44aaf8
|
[
"MIT"
] | 4
|
2021-03-30T13:14:35.000Z
|
2021-09-22T18:57:04.000Z
|
sample/django_sample/app/app/__init__.py
|
knroy/celery-rmq
|
63220db4bc82ae7767c18713bf3d19679d44aaf8
|
[
"MIT"
] | null | null | null |
from .celery import app, app_provider
__all__ = ['app', 'app_provider']
| 18.25
| 37
| 0.726027
| 10
| 73
| 4.7
| 0.6
| 0.255319
| 0.595745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136986
| 73
| 3
| 38
| 24.333333
| 0.746032
| 0
| 0
| 0
| 0
| 0
| 0.205479
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
76c8ce776b151bd002dfa301383757f026d469ee
| 53
|
py
|
Python
|
src/session/twitter/gui/configuration/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 21
|
2015-08-02T21:26:14.000Z
|
2019-12-27T09:57:44.000Z
|
src/session/twitter/gui/configuration/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 34
|
2015-01-12T00:38:14.000Z
|
2020-08-31T11:19:37.000Z
|
src/session/twitter/gui/configuration/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 15
|
2015-03-24T15:42:30.000Z
|
2020-09-24T20:26:42.000Z
|
from main import TwitterConfigDialog
import panels
| 17.666667
| 37
| 0.849057
| 6
| 53
| 7.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150943
| 53
| 2
| 38
| 26.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4f5370f91f3a2cbea92a87a0fa913f73feda23f1
| 141
|
py
|
Python
|
addons/payment_sips/__init__.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/payment_sips/__init__.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/payment_sips/__init__.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
from . import models
from . import controllers
from odoo.addons.payment.models.payment_acquirer import create_missing_journal_for_acquirers
| 28.2
| 92
| 0.865248
| 19
| 141
| 6.157895
| 0.684211
| 0.17094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092199
| 141
| 4
| 93
| 35.25
| 0.914063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4f87469359e14f5db5b194d722c1c3fafcd16e22
| 371
|
py
|
Python
|
lib/googlecloudsdk/third_party/apis/testing/v1/__init__.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/third_party/apis/testing/v1/__init__.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/third_party/apis/testing/v1/__init__.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | 2
|
2020-11-04T03:08:21.000Z
|
2020-11-05T08:14:41.000Z
|
"""Common imports for generated testing client library."""
# pylint:disable=wildcard-import
import pkgutil
from googlecloudsdk.third_party.apitools.base.py import *
from googlecloudsdk.third_party.apis.testing.v1.testing_v1_client import *
from googlecloudsdk.third_party.apis.testing.v1.testing_v1_messages import *
__path__ = pkgutil.extend_path(__path__, __name__)
| 33.727273
| 76
| 0.830189
| 49
| 371
| 5.877551
| 0.510204
| 0.125
| 0.239583
| 0.291667
| 0.388889
| 0.388889
| 0.388889
| 0.388889
| 0.388889
| 0.388889
| 0
| 0.01173
| 0.080863
| 371
| 10
| 77
| 37.1
| 0.832845
| 0.226415
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8c0c95920311c5496c10f8bf576551b17c23311c
| 209
|
py
|
Python
|
operations/filters/face_detection.py
|
zylamarek/dataset-tools
|
d0f446a6da20b7394bab86bf2253de866dbfc7be
|
[
"MIT"
] | null | null | null |
operations/filters/face_detection.py
|
zylamarek/dataset-tools
|
d0f446a6da20b7394bab86bf2253de866dbfc7be
|
[
"MIT"
] | 6
|
2021-03-19T01:18:16.000Z
|
2022-03-11T23:49:18.000Z
|
operations/filters/face_detection.py
|
zylamarek/dataset-tools
|
d0f446a6da20b7394bab86bf2253de866dbfc7be
|
[
"MIT"
] | null | null | null |
import face_recognition
from .filter import Filter
class FaceDetection(Filter):
def apply_single(self, img):
boxes = face_recognition.face_locations(img, model='hog')
return bool(boxes)
| 20.9
| 65
| 0.722488
| 26
| 209
| 5.653846
| 0.692308
| 0.204082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191388
| 209
| 9
| 66
| 23.222222
| 0.869822
| 0
| 0
| 0
| 0
| 0
| 0.014354
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8c2966d423f802dda3f0bd063fdc1707816a7577
| 370
|
py
|
Python
|
app/utility/schemas.py
|
syth0le/tg_reminder_bot
|
956f552c2c81732aaa41c1f006e31f4167e7cdff
|
[
"MIT"
] | null | null | null |
app/utility/schemas.py
|
syth0le/tg_reminder_bot
|
956f552c2c81732aaa41c1f006e31f4167e7cdff
|
[
"MIT"
] | null | null | null |
app/utility/schemas.py
|
syth0le/tg_reminder_bot
|
956f552c2c81732aaa41c1f006e31f4167e7cdff
|
[
"MIT"
] | null | null | null |
from typing import NamedTuple
class TemporaryReminder(NamedTuple):
id: int
title: str
type: str
is_done: bool
date: str
class PermanentReminder(NamedTuple):
id: int
title: str
type: str
is_done: bool
frequency: int #hours
date: str
class Bookmark(NamedTuple):
id: int
title: str
type: str
is_done: bool
| 14.230769
| 36
| 0.640541
| 47
| 370
| 4.978723
| 0.404255
| 0.153846
| 0.192308
| 0.25641
| 0.512821
| 0.512821
| 0.512821
| 0.512821
| 0.512821
| 0.512821
| 0
| 0
| 0.291892
| 370
| 25
| 37
| 14.8
| 0.89313
| 0.013514
| 0
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.052632
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
8c3182dadc68a822d438fc04808aa6ddc6498ecf
| 146
|
py
|
Python
|
copywriting/signals.py
|
uncommitted-and-forgotten/django-copywriting
|
4a9fab437d255c71920420f6b478d77b5d5bbbfb
|
[
"MIT"
] | 2
|
2015-03-10T15:45:08.000Z
|
2015-10-20T05:00:40.000Z
|
copywriting/signals.py
|
uncommitted-and-forgotten/django-copywriting
|
4a9fab437d255c71920420f6b478d77b5d5bbbfb
|
[
"MIT"
] | 4
|
2015-06-10T08:23:55.000Z
|
2016-01-25T13:18:19.000Z
|
copywriting/signals.py
|
uncommitted-and-forgotten/django-copywriting
|
4a9fab437d255c71920420f6b478d77b5d5bbbfb
|
[
"MIT"
] | 1
|
2021-08-28T15:16:45.000Z
|
2021-08-28T15:16:45.000Z
|
from django.dispatch import Signal
ready_to_review = Signal(providing_args=["articleID"])
ready_to_publish = Signal(providing_args=["articleID"])
| 36.5
| 55
| 0.815068
| 19
| 146
| 5.947368
| 0.631579
| 0.123894
| 0.336283
| 0.495575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068493
| 146
| 4
| 55
| 36.5
| 0.830882
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4fd80fa4ddbc1ae8806802522dce2b88389a66d9
| 141
|
py
|
Python
|
rotypes/__init__.py
|
Sait0Yuuki/ArknightsAutoHelper
|
5ecec0d120482c930181346cfdb8542090e169c1
|
[
"MIT"
] | 1,035
|
2019-05-14T11:58:32.000Z
|
2022-03-16T15:09:53.000Z
|
rotypes/__init__.py
|
Sait0Yuuki/ArknightsAutoHelper
|
5ecec0d120482c930181346cfdb8542090e169c1
|
[
"MIT"
] | 209
|
2019-05-11T13:19:57.000Z
|
2022-03-12T01:42:11.000Z
|
rotypes/__init__.py
|
Sait0Yuuki/ArknightsAutoHelper
|
5ecec0d120482c930181346cfdb8542090e169c1
|
[
"MIT"
] | 254
|
2019-05-13T09:06:54.000Z
|
2022-03-16T09:47:44.000Z
|
from .types import HRESULT, GUID, REFGUID
from . import roapi
from .winstring import HSTRING
from .inspectable import IUnknown, IInspectable
| 28.2
| 47
| 0.815603
| 18
| 141
| 6.388889
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134752
| 141
| 4
| 48
| 35.25
| 0.942623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4fdc48164ee59b8899d54e3e68f83bca823c5e56
| 20,774
|
py
|
Python
|
pyunfurl/provider_data/noembed.py
|
tdarwin/pyunfurl
|
c49833977a1df6d39e6ae502c4f0c4ae38ed1193
|
[
"MIT"
] | 5
|
2019-11-01T01:40:36.000Z
|
2022-01-11T10:37:23.000Z
|
pyunfurl/provider_data/noembed.py
|
tdarwin/pyunfurl
|
c49833977a1df6d39e6ae502c4f0c4ae38ed1193
|
[
"MIT"
] | 3
|
2020-11-17T08:30:29.000Z
|
2022-02-16T15:00:46.000Z
|
pyunfurl/provider_data/noembed.py
|
tdarwin/pyunfurl
|
c49833977a1df6d39e6ae502c4f0c4ae38ed1193
|
[
"MIT"
] | 5
|
2020-10-01T10:05:23.000Z
|
2022-02-14T14:41:30.000Z
|
NOEMBED_PROVIDER_LIST = [
[
"https?://(?:[^\\.]+\\.)?(?:youtu\\.be|youtube\\.com/embed)/([a-zA-Z0-9_-]+)",
"http://noembed.com/embed",
],
[
"https?://(?:[^\\.]+\\.)?youtube\\.com/watch/?\\?(?:.+&)?v=([^&]+)",
"http://noembed.com/embed",
],
[
"https?://www\\.globalgiving\\.org/((micro)?projects|funds)/.*",
"http://noembed.com/embed",
],
[
"https?://www\\.giantbomb\\.com/videos/[^/]+/\\d+-\\d+/?",
"http://noembed.com/embed",
],
["http://bash\\.org/\\?(\\d+)", "http://noembed.com/embed"],
["http://amzn\\.com/([^/]+)", "http://noembed.com/embed"],
[
"http://www\\.amazon\\.com/(?:.+/)?[gd]p/(?:product/)?(?:tags-on-product/)?([a-zA-Z0-9]+)",
"http://noembed.com/embed",
],
["https://tube.switch.ch/videos/([a-z0-9]+)", "http://noembed.com/embed"],
["https?://db\\.tt/[a-zA-Z0-9]+", "http://noembed.com/embed"],
[
"https?://www\\.(dropbox\\.com/s/.+\\.(?:jpg|png|gif))",
"http://noembed.com/embed",
],
["https?://imgur\\.com/(?:[^\\/]+/)?[0-9a-zA-Z]+$", "http://noembed.com/embed"],
["https?://muki\\.io/(embed/)?(.+)", "http://noembed.com/embed"],
["https?://vine.co/v/[a-zA-Z0-9]+", "http://noembed.com/embed"],
[
"https?://reports\\.zoho\\.com/ZDBDataSheetView\\.cc\\?OBJID=1432535000000003002&STANDALONE=true&INTERVAL=120&DATATYPESYMBOL=false&REMTOOLBAR=false&SEARCHBOX=true&INCLUDETITLE=true&INCLUDEDESC=true&SHOWHIDEOPT=true",
"http://noembed.com/embed",
],
["https?://yfrog\\.us/.*", "http://noembed.com/embed"],
["https?://.*\\.yfrog\\.com/.*", "http://noembed.com/embed"],
["https?://.*\\.wizer\\.me/preview/.*", "http://noembed.com/embed"],
["https?://.*\\.wizer\\.me/learn/.*", "http://noembed.com/embed"],
["https?://.*\\.wiredrive\\.com/.*", "http://noembed.com/embed"],
["https?://vlipsy\\.com/.*", "http://noembed.com/embed"],
["https?://player\\.vimeo\\.com/video/.*", "http://noembed.com/embed"],
["https?://vimeo\\.com/ondemand/.*/.*", "http://noembed.com/embed"],
["https?://vimeo\\.com/groups/.*/videos/.*", "http://noembed.com/embed"],
["https?://vimeo\\.com/channels/.*/.*", "http://noembed.com/embed"],
["https?://vimeo\\.com/album/.*/video/.*", "http://noembed.com/embed"],
["https?://vimeo\\.com/.*", "http://noembed.com/embed"],
["https?://vidl\\.it/.*", "http://noembed.com/embed"],
["https?://www\\.videojug\\.com/interview/.*", "http://noembed.com/embed"],
["https?://www\\.videojug\\.com/film/.*", "http://noembed.com/embed"],
["https?://www\\.vevo\\.com/.*", "http://noembed.com/embed"],
["https?://veervr\\.tv/videos/.*", "http://noembed.com/embed"],
["https?://veer\\.tv/videos/.*", "http://noembed.com/embed"],
["https?://uttles\\.com/uttle/.*", "http://noembed.com/embed"],
["https?://utposts\\.com/products/.*", "http://noembed.com/embed"],
["https?://www\\.utposts\\.com/products/.*", "http://noembed.com/embed"],
["https?://.*\\.ustream\\.com/.*", "http://noembed.com/embed"],
["https?://.*\\.ustream\\.tv/.*", "http://noembed.com/embed"],
["https?://.*\\.uol\\.com\\.br/video/.*", "http://noembed.com/embed"],
["https?://.*\\.uol\\.com\\.br/view/.*", "http://noembed.com/embed"],
["https?://player\\.ubideo\\.com/.*", "http://noembed.com/embed"],
["https?://twitter\\.com/.*/status/.*", "http://noembed.com/embed"],
["https?://twitch\\.tv/.*", "http://noembed.com/embed"],
["https?://www\\.twitch\\.tv/.*", "http://noembed.com/embed"],
["https?://clips\\.twitch\\.tv/.*", "http://noembed.com/embed"],
["https?://www\\.topy\\.se/image/.*", "http://noembed.com/embed"],
["https?://www\\.tickcounter\\.com/worldclock/.*", "http://noembed.com/embed"],
["https?://www\\.tickcounter\\.com/ticker/.*", "http://noembed.com/embed"],
["https?://www\\.tickcounter\\.com/countup/.*", "http://noembed.com/embed"],
["https?://www\\.tickcounter\\.com/countdown/.*", "http://noembed.com/embed"],
["https?://theysaidso\\.com/image/.*", "http://noembed.com/embed"],
["https?://.*\\.nytimes\\.com/.*", "http://noembed.com/embed"],
["https?://nytimes\\.com/.*", "http://noembed.com/embed"],
["https?://www\\.nytimes\\.com/svc/oembed", "http://noembed.com/embed"],
["https?://ted\\.com/talks/.*", "http://noembed.com/embed"],
["https?://www\\.sway\\.com/.*", "http://noembed.com/embed"],
["https?://sway\\.com/.*", "http://noembed.com/embed"],
["https?://www\\.sutori\\.com/story/.*", "http://noembed.com/embed"],
["https?://content\\.streamonecloud\\.net/embed/.*", "http://noembed.com/embed"],
["https?://streamable\\.com/.*", "http://noembed.com/embed"],
["https?://.*\\.spreaker\\.com/.*", "http://noembed.com/embed"],
["https?://speakerdeck\\.com/.*/.*", "http://noembed.com/embed"],
["https?://soundsgood\\.co/playlist/.*", "http://noembed.com/embed"],
["https?://play\\.soundsgood\\.co/playlist/.*", "http://noembed.com/embed"],
["https?://soundcloud\\.com/.*", "http://noembed.com/embed"],
["https?://www\\.socialexplorer\\.com/.*/embed", "http://noembed.com/embed"],
["https?://www\\.socialexplorer\\.com/.*/edit", "http://noembed.com/embed"],
["https?://www\\.socialexplorer\\.com/.*/view", "http://noembed.com/embed"],
["https?://www\\.socialexplorer\\.com/.*/explore", "http://noembed.com/embed"],
["https?://.*\\.smugmug\\.com/.*", "http://noembed.com/embed"],
["https?://pt\\.slideshare\\.net/.*/.*", "http://noembed.com/embed"],
["https?://es\\.slideshare\\.net/.*/.*", "http://noembed.com/embed"],
["https?://de\\.slideshare\\.net/.*/.*", "http://noembed.com/embed"],
["https?://fr\\.slideshare\\.net/.*/.*", "http://noembed.com/embed"],
["https?://www\\.slideshare\\.net/.*/.*", "http://noembed.com/embed"],
["https?://sketchfab\\.com/.*/folders/.*", "http://noembed.com/embed"],
["https?://sketchfab\\.com/models/.*", "http://noembed.com/embed"],
["https?://onsizzle\\.com/i/.*", "http://noembed.com/embed"],
["https?://.*\\.silk\\.co/s/embed/.*", "http://noembed.com/embed"],
["https?://.*\\.silk\\.co/explore/.*", "http://noembed.com/embed"],
["https?://showtheway\\.io/to/.*", "http://noembed.com/embed"],
["https?://shoud\\.io/.*", "http://noembed.com/embed"],
["https?://shoudio\\.com/.*", "http://noembed.com/embed"],
["https?://www\\.shortnote\\.jp/view/notes/.*", "http://noembed.com/embed"],
["https?://www\\.scribd\\.com/doc/.*", "http://noembed.com/embed"],
["https?://scribblemaps\\.com/maps/view/.*", "http://noembed.com/embed"],
["https?://www\\.scribblemaps\\.com/maps/view/.*", "http://noembed.com/embed"],
["https?://www\\.screenr\\.com/.*/", "http://noembed.com/embed"],
["https?://.*\\.screen9\\.tv/.*", "http://noembed.com/embed"],
["https?://console\\.screen9\\.com/.*", "http://noembed.com/embed"],
["https?://videos\\.sapo\\.pt/.*", "http://noembed.com/embed"],
["https?://roomshare\\.jp/en/post/.*", "http://noembed.com/embed"],
["https?://roomshare\\.jp/post/.*", "http://noembed.com/embed"],
["https?://www\\.reverbnation\\.com/.*/songs/.*", "http://noembed.com/embed"],
["https?://www\\.reverbnation\\.com/.*", "http://noembed.com/embed"],
[
"https?://repubhub\\.icopyright\\.net/freePost\\.act\\?.*",
"http://noembed.com/embed",
],
["https?://rwire\\.com/.*", "http://noembed.com/embed"],
["https?://reddit\\.com/r/.*/comments/.*/.*", "http://noembed.com/embed"],
["https?://rapidengage\\.com/s/.*", "http://noembed.com/embed"],
["https?://www\\.quizz\\.biz/quizz-.*\\.html", "http://noembed.com/embed"],
["https?://www\\.quiz\\.biz/quizz-.*\\.html", "http://noembed.com/embed"],
["https?://punters\\.com\\.au/.*", "http://noembed.com/embed"],
["https?://www\\.punters\\.com\\.au/.*", "http://noembed.com/embed"],
["https?://portfolium\\.com/entry/.*", "http://noembed.com/embed"],
["https?://app\\.sellwithport\\.com/#/buyer/.*", "http://noembed.com/embed"],
["https?://.*\\.polldaddy\\.com/ratings/.*", "http://noembed.com/embed"],
["https?://.*\\.polldaddy\\.com/poll/.*", "http://noembed.com/embed"],
["https?://.*\\.polldaddy\\.com/s/.*", "http://noembed.com/embed"],
["https?://store\\.pixdor\\.com/map/.*/show", "http://noembed.com/embed"],
[
"https?://store\\.pixdor\\.com/place-marker-widget/.*/show",
"http://noembed.com/embed",
],
["https?://www\\.pastery\\.net/.*", "http://noembed.com/embed"],
["https?://pastery\\.net/.*", "http://noembed.com/embed"],
["https?://www\\.oumy\\.com/v/.*", "http://noembed.com/embed"],
["https?://orbitvu\\.co/001/.*/1/2/orbittour/.*/view", "http://noembed.com/embed"],
["https?://orbitvu\\.co/001/.*/2/orbittour/.*/view", "http://noembed.com/embed"],
["https?://orbitvu\\.co/001/.*/ov3602/.*/view", "http://noembed.com/embed"],
["https?://orbitvu\\.co/001/.*/ov3601/.*/view", "http://noembed.com/embed"],
["https?://orbitvu\\.co/001/.*/ov3601/view", "http://noembed.com/embed"],
["https?://on\\.aol\\.com/video/.*", "http://noembed.com/embed"],
["https?://official\\.fm/playlists/.*", "http://noembed.com/embed"],
["https?://official\\.fm/tracks/.*", "http://noembed.com/embed"],
["https?://mix\\.office\\.com/embed/.*", "http://noembed.com/embed"],
["https?://mix\\.office\\.com/watch/.*", "http://noembed.com/embed"],
["https?://odds\\.com\\.au/.*", "http://noembed.com/embed"],
["https?://www\\.odds\\.com\\.au/.*", "http://noembed.com/embed"],
["https?://.*\\.nfb\\.ca/film/.*", "http://noembed.com/embed"],
["https?://nanoo\\.pro/link/.*", "http://noembed.com/embed"],
["https?://.*\\.nanoo\\.pro/link/.*", "http://noembed.com/embed"],
["https?://nanoo\\.tv/link/.*", "http://noembed.com/embed"],
["https?://.*\\.nanoo\\.tv/link/.*", "http://noembed.com/embed"],
["https?://mybeweeg\\.com/w/.*", "http://noembed.com/embed"],
["https?://beta\\.modelo\\.io/embedded/.*", "http://noembed.com/embed"],
["https?://moby\\.to/.*", "http://noembed.com/embed"],
["https?://www\\.mobypicture\\.com/user/.*/view/.*", "http://noembed.com/embed"],
["https?://www\\.mixcloud\\.com/.*/.*/", "http://noembed.com/embed"],
["https?://meetu\\.ps/.*", "http://noembed.com/embed"],
["https?://meetup\\.com/.*", "http://noembed.com/embed"],
["https?://me\\.me/i/.*", "http://noembed.com/embed"],
["https?://mathembed\\.com/latex\\?inputText=.*", "http://noembed.com/embed"],
["https?://learningapps\\.org/.*", "http://noembed.com/embed"],
["https?://jdr\\.knacki\\.info/meuh/.*", "http://noembed.com/embed"],
["https?://www\\.kitchenbowl\\.com/recipe/.*", "http://noembed.com/embed"],
["https?://kit\\.com/.*/.*", "http://noembed.com/embed"],
["https?://www\\.kidoju\\.com/fr/x/.*/.*", "http://noembed.com/embed"],
["https?://www\\.kidoju\\.com/en/x/.*/.*", "http://noembed.com/embed"],
["https?://www\\.kickstarter\\.com/projects/.*", "http://noembed.com/embed"],
["https?://www\\.isnare\\.com/.*", "http://noembed.com/embed"],
["https?://www\\.instagr\\.am/p/.*", "http://noembed.com/embed"],
["https?://www\\.instagram\\.com/p/.*", "http://noembed.com/embed"],
["https?://instagr\\.am/p/.*", "http://noembed.com/embed"],
["https?://instagram\\.com/p/.*", "http://noembed.com/embed"],
["https?://.*\\.inphood\\.com/.*", "http://noembed.com/embed"],
["https?://www\\.inoreader\\.com/oembed/", "http://noembed.com/embed"],
["https?://infogr\\.am/.*", "http://noembed.com/embed"],
[
"https?://player\\.indacolive\\.com/player/jwp/clients/.*",
"http://noembed.com/embed",
],
["https?://ifttt\\.com/recipes/.*", "http://noembed.com/embed"],
["https?://www\\.ifixit\\.com/Guide/View/.*", "http://noembed.com/embed"],
["https?://www\\.hulu\\.com/watch/.*", "http://noembed.com/embed"],
["https?://huffduffer\\.com/.*/.*", "http://noembed.com/embed"],
["https?://gyazo\\.com/.*", "http://noembed.com/embed"],
["https?://media\\.giphy\\.com/media/.*/giphy\\.gif", "http://noembed.com/embed"],
["https?://gph\\.is/.*", "http://noembed.com/embed"],
["https?://giphy\\.com/gifs/.*", "http://noembed.com/embed"],
["https?://www\\.gfycat\\.com/.*", "http://noembed.com/embed"],
["https?://gfycat\\.com/.*", "http://noembed.com/embed"],
["https?://gty\\.im/.*", "http://noembed.com/embed"],
["https?://germany\\.geograph\\.org/.*", "http://noembed.com/embed"],
["https?://geo\\.hlipp\\.de/.*", "http://noembed.com/embed"],
["https?://geo-en\\.hlipp\\.de/.*", "http://noembed.com/embed"],
["https?://.*\\.channel\\.geographs\\.org/.*", "http://noembed.com/embed"],
["https?://channel-islands\\.geographs\\.org/.*", "http://noembed.com/embed"],
["https?://channel-islands\\.geograph\\.org/.*", "http://noembed.com/embed"],
["https?://.*\\.geograph\\.org\\.je/.*", "http://noembed.com/embed"],
["https?://.*\\.geograph\\.org\\.gg/.*", "http://noembed.com/embed"],
[
"https?://.*\\.wikimedia\\.org/.*_geograph\\.org\\.uk_.*",
"http://noembed.com/embed",
],
["https?://.*\\.geograph\\.ie/.*", "http://noembed.com/embed"],
["https?://.*\\.geograph\\.co\\.uk/.*", "http://noembed.com/embed"],
["https?://.*\\.geograph\\.org\\.uk/.*", "http://noembed.com/embed"],
["https?://www\\.funnyordie\\.com/videos/.*", "http://noembed.com/embed"],
["https?://framebuzz\\.com/v/.*", "http://noembed.com/embed"],
[
"https?://fiso\\.foxsports\\.com\\.au/isomorphic-widget/.*",
"http://noembed.com/embed",
],
["https?://flic\\.kr/p/.*", "http://noembed.com/embed"],
["https?://.*\\.flickr\\.com/photos/.*", "http://noembed.com/embed"],
["https?://.*\\.flat\\.io/score/.*", "http://noembed.com/embed"],
["https?://flat\\.io/score/.*", "http://noembed.com/embed"],
["https?://www\\.facebook\\.com/video\\.php", "http://noembed.com/embed"],
["https?://www\\.facebook\\.com/.*/videos/.*", "http://noembed.com/embed"],
["https?://eyrie\\.io/sparkfun/.*", "http://noembed.com/embed"],
["https?://eyrie\\.io/board/.*", "http://noembed.com/embed"],
["https?://embedarticles\\.com/.*", "http://noembed.com/embed"],
["https?://egliseinfo\\.catholique\\.fr/.*", "http://noembed.com/embed"],
["https?://edocr\\.com/docs/.*", "http://noembed.com/embed"],
["https?://dotsub\\.com/view/.*", "http://noembed.com/embed"],
["https?://www\\.docs\\.com/.*", "http://noembed.com/embed"],
["https?://docs\\.com/.*", "http://noembed.com/embed"],
["https?://docdro\\.id/.*", "http://noembed.com/embed"],
["https?://.*\\.docdroid\\.net/.*", "http://noembed.com/embed"],
["https?://www\\.dipity\\.com/.*/.*/", "http://noembed.com/embed"],
["https?://.*\\.didacte\\.com/a/course/.*", "http://noembed.com/embed"],
["https?://sta\\.sh/.*", "http://noembed.com/embed"],
["https?://fav\\.me/.*", "http://noembed.com/embed"],
["https?://.*\\.deviantart\\.com/.*#/d.*", "http://noembed.com/embed"],
["https?://.*\\.deviantart\\.com/art/.*", "http://noembed.com/embed"],
["https?://www\\.dailymotion\\.com/video/.*", "http://noembed.com/embed"],
["https?://www\\.dailymile\\.com/people/.*/entries/.*", "http://noembed.com/embed"],
["https?://app\\.cyranosystems\\.com/msg/.*", "http://noembed.com/embed"],
["https?://staging\\.cyranosystems\\.com/msg/.*", "http://noembed.com/embed"],
["https?://crowdranking\\.com/.*/.*", "http://noembed.com/embed"],
["https?://coub\\.com/embed/.*", "http://noembed.com/embed"],
["https?://coub\\.com/view/.*", "http://noembed.com/embed"],
["https?://commaful\\.com/play/.*", "http://noembed.com/embed"],
["https?://www\\.collegehumor\\.com/video/.*", "http://noembed.com/embed"],
["https?://codesandbox\\.io/embed/.*", "http://noembed.com/embed"],
["https?://codesandbox\\.io/s/.*", "http://noembed.com/embed"],
["https?://www\\.codepoints\\.net/.*", "http://noembed.com/embed"],
["https?://codepoints\\.net/.*", "http://noembed.com/embed"],
["https?://codepen\\.io/.*", "http://noembed.com/embed"],
["https?://clyp\\.it/playlist/.*", "http://noembed.com/embed"],
["https?://clyp\\.it/.*", "http://noembed.com/embed"],
["https?://www\\.clipland\\.com/v/.*", "http://noembed.com/embed"],
["https?://www\\.circuitlab\\.com/circuit/.*", "http://noembed.com/embed"],
["https?://chirb\\.it/.*", "http://noembed.com/embed"],
["https?://public\\.chartblocks\\.com/c/.*", "http://noembed.com/embed"],
["https?://img\\.catbo\\.at/.*", "http://noembed.com/embed"],
["https?://carbonhealth\\.com/practice/.*", "http://noembed.com/embed"],
["https?://cacoo\\.com/diagrams/.*", "http://noembed.com/embed"],
["https?://buttondown\\.email/.*", "http://noembed.com/embed"],
["https?://blackfire\\.io/profiles/compare/.*/graph", "http://noembed.com/embed"],
["https?://blackfire\\.io/profiles/.*/graph", "http://noembed.com/embed"],
["https?://audiosnaps\\.com/k/.*", "http://noembed.com/embed"],
["https?://www\\.audiomack\\.com/playlist/.*", "http://noembed.com/embed"],
["https?://www\\.audiomack\\.com/album/.*", "http://noembed.com/embed"],
["https?://www\\.audiomack\\.com/song/.*", "http://noembed.com/embed"],
["https?://animoto\\.com/play/.*", "http://noembed.com/embed"],
["https?://animatron\\.com/project/.*", "http://noembed.com/embed"],
["https?://www\\.animatron\\.com/project/.*", "http://noembed.com/embed"],
["https?://live\\.amcharts\\.com/.*", "http://noembed.com/embed"],
["https?://photos\\.app\\.net/.*/.*", "http://noembed.com/embed"],
["https?://alpha\\.app\\.net/.*/post/.*", "http://noembed.com/embed"],
["https?://www\\.23hq\\.com/.*/photo/.*", "http://noembed.com/embed"],
["https?://news.vice\\.com/[^/]+/?", "http://noembed.com/embed"],
["http://www\\.theonion\\.com/articles?/[^/]+/?", "http://noembed.com/embed"],
["http://arstechnica\\.com/[^/]+/\\d+/\\d+/[^/]+/?$", "http://noembed.com/embed"],
["http://tl\\.gd/[^/]+", "http://noembed.com/embed"],
["http://www\\.twitlonger\\.com/show/[a-zA-Z0-9]+", "http://noembed.com/embed"],
["http://(?:www\\.)?twitpic\\.com/([^/]+)", "http://noembed.com/embed"],
["http://picplz\\.com/user/[^/]+/pic/[^/]+", "http://noembed.com/embed"],
["http://gfycat\\.com/([a-zA-Z]+)", "http://noembed.com/embed"],
["http://lockerz\\.com/[sd]/\\d+", "http://noembed.com/embed"],
["http://skit.ch/[^/]+", "http://noembed.com/embed"],
["https?://(?:www.)?skitch.com/([^/]+)/[^/]+/.+", "http://noembed.com/embed"],
["https?://(?:www\\.)?xkcd\\.com/\\d+/?", "http://noembed.com/embed"],
["http://qik\\.com/video/.*", "http://noembed.com/embed"],
["https?://(?:www\\.)?vice\\.com/[^/]+/?", "http://noembed.com/embed"],
[
"https?://(?:www\\.)?wired\\.com/([^/]+/)?\\d+/\\d+/[^/]+/?$",
"http://noembed.com/embed",
],
[
"http://www\\.duffelblog\\.com/\\d{4}/\\d{1,2}/[^/]+/?$",
"http://noembed.com/embed",
],
["http://www.traileraddict.com/trailer/[^/]+/trailer", "http://noembed.com/embed"],
[
"http://(?:www\\.)?theverge\\.com/\\d{4}/\\d{1,2}/\\d{1,2}/\\d+/[^/]+/?$",
"http://noembed.com/embed",
],
[
"http://www\\.monoprice\\.com/products/product\\.asp\\?.*p_id=\\d+",
"http://noembed.com/embed",
],
["http://www\\.asciiartfarts\\.com/[0-9]+\\.html", "http://noembed.com/embed"],
["http://trailers\\.apple\\.com/trailers/[^/]+/[^/]+", "http://noembed.com/embed"],
["https?://(?:www\\.)?vimeo\\.com/.+", "http://noembed.com/embed"],
[
"http://www\\.urbandictionary\\.com/define\\.php\\?term=.+",
"http://noembed.com/embed",
],
[
"https?://(?:www|mobile\\.)?twitter\\.com/(?:#!/)?([^/]+)/status(?:es)?/(\\d+)",
"http://noembed.com/embed",
],
["https?://soundcloud.com/.*/.*", "http://noembed.com/embed"],
["https?://v\\.nldg\\.me/.+", "http://noembed.com/embed"],
["https?://www\\.nooledge\\.com/\\!/Vid/.+", "http://noembed.com/embed"],
["https?://(?:www\\.)spreaker\\.com/.+", "http://noembed.com/embed"],
["https?://(?:www\\.)?avclub\\.com/article/[^/]+/?$", "http://noembed.com/embed"],
["https?://path\\.com/p/([0-9a-zA-Z]+)$", "http://noembed.com/embed"],
[
"http://boingboing\\.net/\\d{4}/\\d{2}/\\d{2}/[^/]+\\.html",
"http://noembed.com/embed",
],
["http://cl\\.ly/(?:image/)?[0-9a-zA-Z]+/?$", "http://noembed.com/embed"],
["http://www\\.clickhole\\.com/[^/]+/[^/]?", "http://noembed.com/embed"],
]
| 62.572289
| 224
| 0.532878
| 2,370
| 20,774
| 4.668354
| 0.170042
| 0.199566
| 0.344179
| 0.467101
| 0.799982
| 0.7218
| 0.538503
| 0.292209
| 0.096891
| 0.071764
| 0
| 0.004647
| 0.098874
| 20,774
| 331
| 225
| 62.761329
| 0.586378
| 0
| 0
| 0.114804
| 0
| 0.009063
| 0.79431
| 0.425291
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4fdf223f766c1d23a2866014f85c41ad1d44d37d
| 138
|
py
|
Python
|
telisar/npc/hightiefling.py
|
evilchili/telisar
|
4152de28ed03afecb579c6065414439146b8b169
|
[
"Unlicense"
] | 1
|
2018-06-29T14:46:18.000Z
|
2018-06-29T14:46:18.000Z
|
telisar/npc/hightiefling.py
|
evilchili/telisar
|
4152de28ed03afecb579c6065414439146b8b169
|
[
"Unlicense"
] | null | null | null |
telisar/npc/hightiefling.py
|
evilchili/telisar
|
4152de28ed03afecb579c6065414439146b8b169
|
[
"Unlicense"
] | 1
|
2018-06-29T14:47:07.000Z
|
2018-06-29T14:47:07.000Z
|
from telisar.languages import infernal
from telisar.npc import tiefling
class NPC(tiefling.NPC):
language = infernal.HighTiefling()
| 19.714286
| 38
| 0.789855
| 17
| 138
| 6.411765
| 0.588235
| 0.201835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137681
| 138
| 6
| 39
| 23
| 0.915966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8b2f46ae7f36031e570a03cdbff059fa47bdd4e6
| 159
|
py
|
Python
|
NowCoder/test.py
|
windcry1/My-ACM-ICPC
|
b85b1c83b72c6b51731dae946a0df57c31d3e7a1
|
[
"MIT"
] | null | null | null |
NowCoder/test.py
|
windcry1/My-ACM-ICPC
|
b85b1c83b72c6b51731dae946a0df57c31d3e7a1
|
[
"MIT"
] | null | null | null |
NowCoder/test.py
|
windcry1/My-ACM-ICPC
|
b85b1c83b72c6b51731dae946a0df57c31d3e7a1
|
[
"MIT"
] | null | null | null |
# >>> Author: WindCry1
# >>> Mail: lanceyu120@gmail.com
# >>> Website: https://windcry1.com
# >>> Date: 1/20/2020 8:18 PM
from math import *
from sys import *
| 22.714286
| 35
| 0.641509
| 23
| 159
| 4.434783
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112782
| 0.163522
| 159
| 6
| 36
| 26.5
| 0.654135
| 0.710692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8c7210c558be27ed471913cf0fc393f6fe20a814
| 77
|
py
|
Python
|
cowait/notebook/__init__.py
|
ProgHaj/cowait
|
e95c30faab8caf8b0413de4e1784529a3a06475d
|
[
"Apache-2.0"
] | 2
|
2021-08-11T08:51:42.000Z
|
2021-08-11T08:55:19.000Z
|
cowait/notebook/__init__.py
|
ProgHaj/cowait
|
e95c30faab8caf8b0413de4e1784529a3a06475d
|
[
"Apache-2.0"
] | null | null | null |
cowait/notebook/__init__.py
|
ProgHaj/cowait
|
e95c30faab8caf8b0413de4e1784529a3a06475d
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa: F401
from .task import NotebookTask
from .spawn import task
| 15.4
| 30
| 0.766234
| 11
| 77
| 5.363636
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.168831
| 77
| 4
| 31
| 19.25
| 0.859375
| 0.233766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8c7695dac616728b5d996c32cfc219c4bfe4d1a8
| 309
|
py
|
Python
|
parlai/agents/sandy/sandy.py
|
vitouphy/ParlAI
|
49258b966b4567902f10ad806a22db19da87b989
|
[
"MIT"
] | null | null | null |
parlai/agents/sandy/sandy.py
|
vitouphy/ParlAI
|
49258b966b4567902f10ad806a22db19da87b989
|
[
"MIT"
] | null | null | null |
parlai/agents/sandy/sandy.py
|
vitouphy/ParlAI
|
49258b966b4567902f10ad806a22db19da87b989
|
[
"MIT"
] | null | null | null |
from parlai.core.torch_agent import TorchAgent, Output
class SandyAgent(TorchAgent):
def train_step(self, batch):
pass
def eval_step(self, batch):
# for each row in batch, convert tensor to back to text strings
return Output([self.dict.vec2txt(row) for row in batch.text_vec])
| 38.625
| 73
| 0.708738
| 46
| 309
| 4.673913
| 0.673913
| 0.074419
| 0.12093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004098
| 0.210356
| 309
| 8
| 73
| 38.625
| 0.877049
| 0.197411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0.166667
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
8caddab25081b472292d0007756ef92dbb4c8217
| 170
|
py
|
Python
|
exercicios_basico/ex047.py
|
montalvas/python
|
483c2097f6f91bfae127dafcb63e3006eeecad1d
|
[
"MIT"
] | null | null | null |
exercicios_basico/ex047.py
|
montalvas/python
|
483c2097f6f91bfae127dafcb63e3006eeecad1d
|
[
"MIT"
] | null | null | null |
exercicios_basico/ex047.py
|
montalvas/python
|
483c2097f6f91bfae127dafcb63e3006eeecad1d
|
[
"MIT"
] | null | null | null |
#Todos os números pares entre 1 e 50
print('TODOS OS PARES ENTRE 1 E 50:')
for c in range(1, 51):
if c % 2 == 0:
print('\033[34m{}\033[m'.format(c), end=' ')
| 28.333333
| 52
| 0.576471
| 34
| 170
| 2.882353
| 0.647059
| 0.142857
| 0.22449
| 0.244898
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147287
| 0.241176
| 170
| 6
| 52
| 28.333333
| 0.612403
| 0.205882
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
50784d54fd3949d0e7849502072e2633d4e53c9b
| 150
|
py
|
Python
|
fastapi_mailman/globals.py
|
daniel-herrero/fastapi-mailman
|
a174d0ec777d3330dc5464f71fafa7829db07bf1
|
[
"MIT"
] | 6
|
2021-10-08T10:20:37.000Z
|
2022-03-30T08:56:10.000Z
|
fastapi_mailman/globals.py
|
daniel-herrero/fastapi-mailman
|
a174d0ec777d3330dc5464f71fafa7829db07bf1
|
[
"MIT"
] | 2
|
2021-11-11T11:44:29.000Z
|
2022-03-08T06:54:54.000Z
|
fastapi_mailman/globals.py
|
daniel-herrero/fastapi-mailman
|
a174d0ec777d3330dc5464f71fafa7829db07bf1
|
[
"MIT"
] | 1
|
2022-03-04T14:43:22.000Z
|
2022-03-04T14:43:22.000Z
|
import typing as t
if t.TYPE_CHECKING:
from . import Mail
Mailman = t.TypeVar("Mailman", bound=Mail)
MAILMAN: t.Optional["Mailman"] = None
| 16.666667
| 46
| 0.686667
| 22
| 150
| 4.636364
| 0.636364
| 0.215686
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193333
| 150
| 8
| 47
| 18.75
| 0.842975
| 0
| 0
| 0
| 0
| 0
| 0.093333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
50e1090fad0bbbe503dbbb07eedeccdf9de66b66
| 164
|
py
|
Python
|
queen.py
|
ChreSyr/iratus
|
bf4cfa514f5f8c59781af0c9c69bf65dea3bb873
|
[
"MIT"
] | null | null | null |
queen.py
|
ChreSyr/iratus
|
bf4cfa514f5f8c59781af0c9c69bf65dea3bb873
|
[
"MIT"
] | null | null | null |
queen.py
|
ChreSyr/iratus
|
bf4cfa514f5f8c59781af0c9c69bf65dea3bb873
|
[
"MIT"
] | null | null | null |
from piece import RollingPiece
class Queen(RollingPiece):
LETTER = "q"
moves = ((-1, -1), (-1, 1), (1, 1), (1, -1), (1, 0), (0, 1), (-1, 0), (0, -1))
| 14.909091
| 82
| 0.469512
| 26
| 164
| 2.961538
| 0.423077
| 0.233766
| 0.272727
| 0.311688
| 0.207792
| 0.116883
| 0.116883
| 0.116883
| 0
| 0
| 0
| 0.130081
| 0.25
| 164
| 10
| 83
| 16.4
| 0.495935
| 0
| 0
| 0
| 0
| 0
| 0.006211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
50f60fd5d4eeb6c0ee5edd93f247a67dcb7b7a3a
| 48
|
py
|
Python
|
neo/Network/core/exceptions.py
|
volekerb/neo-python
|
5bdded2c339219355cf1d31ae58653b0f94c6e51
|
[
"MIT"
] | 387
|
2017-07-17T18:25:54.000Z
|
2021-11-18T06:19:47.000Z
|
neo/Network/core/exceptions.py
|
volekerb/neo-python
|
5bdded2c339219355cf1d31ae58653b0f94c6e51
|
[
"MIT"
] | 967
|
2017-08-19T15:48:03.000Z
|
2021-06-01T21:42:39.000Z
|
neo/Network/core/exceptions.py
|
volekerb/neo-python
|
5bdded2c339219355cf1d31ae58653b0f94c6e51
|
[
"MIT"
] | 286
|
2017-07-17T03:44:36.000Z
|
2021-11-18T06:19:32.000Z
|
class DeserializationError(Exception):
pass
| 16
| 38
| 0.791667
| 4
| 48
| 9.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 39
| 24
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
50fee9464d7606cd3192a09c000952bf246e0ef9
| 142
|
py
|
Python
|
src/django_mysql/exceptions.py
|
Juh10/django-mysql
|
d997be1321086e2b2c46574bc7882a2737a5c43c
|
[
"MIT"
] | null | null | null |
src/django_mysql/exceptions.py
|
Juh10/django-mysql
|
d997be1321086e2b2c46574bc7882a2737a5c43c
|
[
"MIT"
] | null | null | null |
src/django_mysql/exceptions.py
|
Juh10/django-mysql
|
d997be1321086e2b2c46574bc7882a2737a5c43c
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
class TimeoutError(Exception):
"""
Indicates a database operation timed out in some way.
"""
| 17.75
| 57
| 0.711268
| 16
| 142
| 6.0625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21831
| 142
| 7
| 58
| 20.285714
| 0.873874
| 0.373239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0fd2fdf8fc416d6086f4b68873ad97069e1c0da0
| 228
|
py
|
Python
|
src/fecc_object/SemicolonObject.py
|
castor91/fecc
|
bc46059c0d7a428d15b95050b70dec374b4bea28
|
[
"MIT"
] | 1
|
2018-02-04T14:48:15.000Z
|
2018-02-04T14:48:15.000Z
|
src/fecc_object/SemicolonObject.py
|
castor91/fecc
|
bc46059c0d7a428d15b95050b70dec374b4bea28
|
[
"MIT"
] | null | null | null |
src/fecc_object/SemicolonObject.py
|
castor91/fecc
|
bc46059c0d7a428d15b95050b70dec374b4bea28
|
[
"MIT"
] | null | null | null |
from AbstractObject import *
class SemicolonObject(AbstractObject):
def __init__(self, value):
super(SemicolonObject, self).__init__(value)
def generate(self, out_code): pass
def __str__(self): return ''
| 20.727273
| 52
| 0.710526
| 25
| 228
| 5.96
| 0.64
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188596
| 228
| 10
| 53
| 22.8
| 0.805405
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.166667
| 0.166667
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
|
0
| 5
|
0fe42d1cbe70bbf9fb217edb565437b4f5814242
| 46
|
py
|
Python
|
tf/about/usefunc.py
|
ancient-data/text-fabric
|
c1ccd4a4dc451e94a789f138576576c5d7f13474
|
[
"MIT"
] | 10
|
2017-10-30T22:38:00.000Z
|
2018-12-12T06:10:10.000Z
|
tf/about/usefunc.py
|
dirkroorda/text-fabric
|
c0a49f092ceda3e7bab91fd0f1aa84e2dc029cf4
|
[
"MIT"
] | 37
|
2017-10-19T12:06:54.000Z
|
2018-12-13T10:18:23.000Z
|
tf/about/usefunc.py
|
dirkroorda/text-fabric
|
c0a49f092ceda3e7bab91fd0f1aa84e2dc029cf4
|
[
"MIT"
] | 3
|
2018-02-28T12:37:21.000Z
|
2018-06-23T08:32:54.000Z
|
"""
.. include:: ../docs/about/usefunc.md
"""
| 11.5
| 37
| 0.543478
| 5
| 46
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 46
| 3
| 38
| 15.333333
| 0.609756
| 0.804348
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ba08cc69799e83df0030dd0b6c586f291e109e64
| 124
|
py
|
Python
|
sugar_mole/api/apis/my_fox.py
|
Alexis-Jacob/sugar-mole
|
390b977aa1440a4551cf445cd0f62a6201467f81
|
[
"BSD-3-Clause"
] | null | null | null |
sugar_mole/api/apis/my_fox.py
|
Alexis-Jacob/sugar-mole
|
390b977aa1440a4551cf445cd0f62a6201467f81
|
[
"BSD-3-Clause"
] | null | null | null |
sugar_mole/api/apis/my_fox.py
|
Alexis-Jacob/sugar-mole
|
390b977aa1440a4551cf445cd0f62a6201467f81
|
[
"BSD-3-Clause"
] | null | null | null |
from IAPI import IAPI
class NetAtmo(IAPI):
def __init__(self):
self.name = "my fox"
def name(self):
return self.name
| 15.5
| 22
| 0.701613
| 20
| 124
| 4.15
| 0.6
| 0.192771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185484
| 124
| 8
| 23
| 15.5
| 0.821782
| 0
| 0
| 0
| 0
| 0
| 0.048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ba189b10697b8e16a7afc4c843553a8a511418a5
| 107
|
py
|
Python
|
reddit_data_import_run.py
|
PervasiveWellbeingTech/inquire-web-backend
|
0a078943701472897c288ca1f2683ed749685e92
|
[
"Apache-2.0"
] | 1
|
2020-10-07T09:35:47.000Z
|
2020-10-07T09:35:47.000Z
|
reddit_data_import_run.py
|
PervasiveWellbeingTech/inquire-web-backend
|
0a078943701472897c288ca1f2683ed749685e92
|
[
"Apache-2.0"
] | 1
|
2021-06-02T03:08:57.000Z
|
2021-06-02T03:08:57.000Z
|
reddit_data_import_run.py
|
PervasiveWellbeingTech/inquire-web-backend
|
0a078943701472897c288ca1f2683ed749685e92
|
[
"Apache-2.0"
] | null | null | null |
from scripts.load_reddit_may15_sql import run_full_import
if __name__ == "__main__":
run_full_import()
| 26.75
| 57
| 0.803738
| 16
| 107
| 4.4375
| 0.75
| 0.197183
| 0.366197
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.121495
| 107
| 4
| 58
| 26.75
| 0.734043
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e85d818ec4ca24f5de147ad9d7dd17dad15329f8
| 355
|
py
|
Python
|
src/utils.py
|
bayuyuhartono-katadata/flask-project
|
000bafa4f593474eec2171b1b1ce392d64c5c47d
|
[
"MIT"
] | 1
|
2019-10-28T06:53:36.000Z
|
2019-10-28T06:53:36.000Z
|
src/utils.py
|
bayuyuhartono-katadata/flask-project
|
000bafa4f593474eec2171b1b1ce392d64c5c47d
|
[
"MIT"
] | 1
|
2019-12-26T22:21:29.000Z
|
2019-12-29T12:47:33.000Z
|
src/utils.py
|
bayuyuhartono-katadata/flask-project
|
000bafa4f593474eec2171b1b1ce392d64c5c47d
|
[
"MIT"
] | 1
|
2019-11-08T02:03:13.000Z
|
2019-11-08T02:03:13.000Z
|
import os, base64
from werkzeug.routing import BaseConverter
#print(base64.b64encode(os.urandom(64)).decode('utf-8'))
def generateSecKey():
return base64.b64encode(os.urandom(64)).decode('utf-8')
class RegexConverter(BaseConverter):
def __init__(self, url, *regx):
super(RegexConverter, self).__init__(url)
self.regex = regx[0]
| 27.307692
| 59
| 0.715493
| 45
| 355
| 5.466667
| 0.577778
| 0.121951
| 0.138211
| 0.195122
| 0.292683
| 0.292683
| 0.292683
| 0.292683
| 0
| 0
| 0
| 0.055738
| 0.140845
| 355
| 12
| 60
| 29.583333
| 0.75082
| 0.15493
| 0
| 0
| 1
| 0
| 0.016722
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.125
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
e87587486b66e78f3a8b0824b3ea7e13b73ce6a6
| 220
|
py
|
Python
|
lfs_compropago/admin.py
|
misaelnieto/lfs-compropago
|
6622d5021fb3a9a382d36e1e4e98116a69fbf45a
|
[
"MIT"
] | null | null | null |
lfs_compropago/admin.py
|
misaelnieto/lfs-compropago
|
6622d5021fb3a9a382d36e1e4e98116a69fbf45a
|
[
"MIT"
] | null | null | null |
lfs_compropago/admin.py
|
misaelnieto/lfs-compropago
|
6622d5021fb3a9a382d36e1e4e98116a69fbf45a
|
[
"MIT"
] | 1
|
2016-02-08T17:36:41.000Z
|
2016-02-08T17:36:41.000Z
|
# django imports
from django.contrib import admin
from .models import CompropagoTransaction
from .models import CompropagoWebHookHit
admin.site.register(CompropagoTransaction)
admin.site.register(CompropagoWebHookHit)
| 24.444444
| 42
| 0.859091
| 23
| 220
| 8.217391
| 0.478261
| 0.10582
| 0.169312
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086364
| 220
| 8
| 43
| 27.5
| 0.940299
| 0.063636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e8788c9fc7223a38447cc88811a67ef9cd5dd611
| 218
|
py
|
Python
|
World/Object/Unit/Pet/model.py
|
sundayz/idewave-core
|
5bdb88892173c9c3e8c85f431cf9b5dbd9f23941
|
[
"Apache-2.0"
] | 10
|
2019-06-29T19:24:52.000Z
|
2021-02-21T22:45:57.000Z
|
World/Object/Unit/Pet/model.py
|
sundayz/idewave-core
|
5bdb88892173c9c3e8c85f431cf9b5dbd9f23941
|
[
"Apache-2.0"
] | 4
|
2019-08-15T07:03:36.000Z
|
2021-06-02T13:01:25.000Z
|
World/Object/Unit/Pet/model.py
|
sundayz/idewave-core
|
5bdb88892173c9c3e8c85f431cf9b5dbd9f23941
|
[
"Apache-2.0"
] | 8
|
2019-06-30T22:47:48.000Z
|
2021-02-20T19:21:30.000Z
|
from World.Object.Unit.model import Unit
from World.Object.Constants.HighGuid import HighGuid
class Pet(Unit):
def __init__(self):
super().__init__()
self.high_guid = HighGuid.HIGHGUID_PET.value
| 21.8
| 52
| 0.724771
| 29
| 218
| 5.103448
| 0.586207
| 0.121622
| 0.202703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178899
| 218
| 9
| 53
| 24.222222
| 0.826816
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e88127452f8513b689893088aac6b48c4dbe3287
| 271
|
py
|
Python
|
Online-Judges/CodingBat/Python/Logic-01/Logic_1-04-caught_speeding.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3
|
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Online-Judges/CodingBat/Python/Logic-01/Logic_1-04-caught_speeding.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Online-Judges/CodingBat/Python/Logic-01/Logic_1-04-caught_speeding.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
def caught_speeding(speed, is_birthday):
if speed <= 60 or is_birthday is True and speed <= 65:
return 0
elif speed <= 80 or is_birthday is True and speed <= 85:
return 1
elif speed > 80 or is_birthday is True and speed > 85:
return 2
| 33.875
| 60
| 0.638376
| 45
| 271
| 3.733333
| 0.422222
| 0.238095
| 0.214286
| 0.25
| 0.690476
| 0.690476
| 0.690476
| 0.535714
| 0.535714
| 0.535714
| 0
| 0.078947
| 0.298893
| 271
| 7
| 61
| 38.714286
| 0.805263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
e8829e8c852d3d7eb366143651c63280faa26f56
| 8,790
|
py
|
Python
|
Env/ellipsoid_gravity_utils.py
|
Aerospace-AI/Hovering-with-Altimetry
|
be19faa97f14b3ada53217f8941330750d0964c8
|
[
"MIT"
] | 1
|
2021-06-17T11:02:46.000Z
|
2021-06-17T11:02:46.000Z
|
Env/ellipsoid_gravity_utils.py
|
Aerospace-AI/Hovering-with-Altimetry
|
be19faa97f14b3ada53217f8941330750d0964c8
|
[
"MIT"
] | null | null | null |
Env/ellipsoid_gravity_utils.py
|
Aerospace-AI/Hovering-with-Altimetry
|
be19faa97f14b3ada53217f8941330750d0964c8
|
[
"MIT"
] | null | null | null |
import numpy as np
def ellipsoid_gravity_field(x, a, b , c, rho):
G = 6.6695e-11 # Gravitational constant [kg m**3/s**2]
aa = a**2
bb = b**2
cc = c**2
errtol = 1e-10
# find the greatest root LAM
B = -(x[0]**2 + x[1]**2 + x[2]**2 - aa - bb - cc)
C = -cc*(x[0]**2 + x[1]**2) + bb*(cc - x[0]**2 - x[2]**2) + aa*(bb +cc - x[1]**2 - x[2]**2)
D = -bb*cc*x[0]**2 + aa*(-cc*x[1]**2 + bb*(c - x[2])*(c + x[2]))
poly = np.asarray([1, B, C, D])
LAM = np.max(np.roots(poly))
# find elliptic integrals F1 and E1
phi = np.arcsin(np.sqrt((aa-cc)/(LAM + aa)))# Argument phi, s.t. 0 < phi <= np.pi/2
k = np.sqrt((aa-bb)/(aa-cc)) # Modulus k, s.t. 0 < k < 1
F1 = lellipf(phi, k, errtol)
E1 = lellipe(phi, k, errtol)
fac1 = 4*np.pi*G*rho*a*b*c/np.sqrt(aa-cc)
fac2 = np.sqrt((aa-cc)/((aa+LAM)*(bb+LAM)*(cc+LAM)))
# Components of attraction.
X = fac1/(aa-bb)*(E1-F1)
Y = fac1*((-aa+cc)*E1/((aa-bb)*(bb-cc)) + F1/(aa-bb)+ (cc+LAM)*fac2/(bb-cc))
Z = fac1*(-E1 + (bb+LAM)*fac2)/(cc-bb)
return np.asarray([X,Y,Z]), LAM
#
# lellipe(phi, k, errtol)
#
# Inputs:
#
# phi Input angle vector size 1xN.
# k Input parameter vector size 1 or 1xN.
# errtol Error tolerance for Carlson's algorithms.
#
# Matlab function to compute Legendre's (incomplete) elliptic integral
# E(phi, k). Uses a vectorized implementation of Carlson's Duplication Algorithms
# for symmetric elliptic integrals as found in "Computing Elliptic
# Integrals by Duplication," by B. C. Carlson, Numer. Math. 33, 1-16 (1979)
# and also found in ACM TOMS Algorithm 577. Section 4 in the paper cited
# here describes how to convert between the symmetric elliptic integrals
# and Legendre's elliptic integrals.
#
# Returns NaN's for any argument values outside input range.
#
def lellipe(phi, k, errtol):
# Argument checking for vectorization:
phivec = phi
kvec = k
snphi = np.sin(phivec)
csphi = np.cos(phivec)
snphi2 = snphi**2
csphi2 = csphi**2
k2 = kvec**2
y = 1.0 - k2*snphi2
onesvec = 1
f = snphi * rf(csphi2, y, onesvec, errtol) - k2 * snphi * snphi2 * rd(csphi2, y, onesvec, errtol)/3.0
return f
#
# lellipf(phi, k, errtol)
#
# Inputs:
#
# phi Input angle vector size 1 or 1xN.
# k Input parameter vector size 1 or 1xN.
# errtol Error tolerance for Carlson's algorithms.
#
# Matlab function to compute Legendre's (incomplete) elliptic integral
# F(phi, k). Uses a vectorized implementation of Carlson's Duplication Algorithms
# for symmetric elliptic integrals as found in "Computing Elliptic
# Integrals by Duplication," by B. C. Carlson, Numer. Math. 33, 1-16 (1979)
# and also found in ACM TOMS Algorithm 577. Section 4 in the paper cited
# here describes how to convert between the symmetric elliptic integrals
# and Legendre's elliptic integrals.
#
# Returns NaN's for any argument values outside input range.
#
def lellipf(phi, k, errtol):
phivec = phi
kvec = k
snphi = np.sin(phivec)
csphi = np.cos(phivec)
csphi2 = csphi * csphi
onesvec = 1
y = onesvec - kvec*kvec * snphi*snphi
f = snphi * rf(csphi2, y, onesvec, errtol)
return f
# Elliptic Integrals by Duplication," by B. C. Carlson, Numer. Math.
# 33, 1-16 (1979).
#
# Returns NaN's for any argument values outside input range.
#
# Algorithm is also from Carlson's ACM TOMS Algorithm 577.
#
# This code is a complete rewrite of the algorithm in vectorized form.
# It was not produced by running a FORTRAN to Matlab converter.
#
# The following text is copied from ACM TOMS Algorithm 577 FORTRAN code:
#
# X AND Y ARE THE VARIABLES IN THE INTEGRAL RC(X,Y).
#
# ERRTOL IS SET TO THE DESIRED ERROR TOLERANCE.
# RELATIVE ERROR DUE TO TRUNCATION IS LESS THAN
# 16 * ERRTOL ** 6 / (1 - 2 * ERRTOL).
#
# SAMPLE CHOICES: ERRTOL RELATIVE TRUNCATION
# ERROR LESS THAN
# 1.D-3 3.D-19
# 3.D-3 2.D-16
# 1.D-2 3.D-13
# 3.D-2 2.D-10
# 1.D-1 3.D-7
#
# Note by TRH:
#
# Absolute truncation error when the integrals are order 1 quantities
# is closer to errtol, so be careful if you want high absolute precision.
#
# Thomas R. Hoffend Jr., Ph.D.
# 3M Company
# 3M Center Bldg. 236-GC-26
# St. Paul, MN 55144
# trhoffendjr@mmm.com
#
def rf(x, y, z, errtol):
realmin = 1e-100
realmax = 1e100
# Argument limits as set by Carlson:
LoLim = 5.0 * realmin
UpLim = 5.0 * realmax
# Define internally acceptable variable ranges for iterations:
Xi = x
Yi = y
Zi = z
# Carlson's duplication algorithm for Rf:
Xn = Xi
Yn = Yi
Zn = Zi
Mu = (Xn + Yn + Zn) / 3.0
Xndev = 2.0 - (Mu + Xn) / Mu
Yndev = 2.0 - (Mu + Yn) / Mu
Zndev = 2.0 - (Mu + Zn) / Mu
epslon = np.max( np.abs([Xndev, Yndev, Zndev]) )
while epslon >= errtol:
Xnroot = np.sqrt(Xn)
Ynroot = np.sqrt(Yn)
Znroot = np.sqrt(Zn)
lambda1 = Xnroot * (Ynroot + Znroot) + Ynroot * Znroot
Xn = 0.25 * (Xn + lambda1)
Yn = 0.25 * (Yn + lambda1)
Zn = 0.25 * (Zn + lambda1)
Mu = (Xn + Yn + Zn) / 3.0
Xndev = 2.0 - (Mu + Xn) / Mu
Yndev = 2.0 - (Mu + Yn) / Mu
Zndev = 2.0 - (Mu + Zn) / Mu
epslon = np.max( np.abs([Xndev , Yndev , Zndev]) )
C1 = 1.0 / 24.0
C2 = 3.0 / 44.0
C3 = 1.0 / 14.0
E2 = Xndev * Yndev - Zndev * Zndev
E3 = Xndev * Yndev * Zndev
S = 1.0 + (C1 * E2 - 0.1 - C2 * E3) * E2 + C3 * E3
f = S / np.sqrt(Mu)
# Return NaN's where input argument was out of range:
return f
#
# rd(x, y, z, errtol)
#
# Inputs:
#
# x Input vector size 1xN.
# y Input vector size 1xN.
# z Input vector size 1xN.
# errtol Error tolerance.
#
# Matlab function to compute Carlson's symmetric elliptic integral Rd.
# Implementation of Carlson's Duplication Algorithm 4 in "Computing
# Elliptic Integrals by Duplication," by B. C. Carlson, Numer. Math.
# 33, 1-16 (1979).
#
# Returns NaN's for any argument values outside input range.
#
# Algorithm is also from Carlson's ACM TOMS Algorithm 577.
#
# This code is a complete rewrite of the algorithm in vectorized form.
# It was not produced by running a FORTRAN to Matlab converter.
#
# The following text is copied from ACM TOMS Algorithm 577 FORTRAN code:
#
# X AND Y ARE THE VARIABLES IN THE INTEGRAL RC(X,Y).
#
# ERRTOL IS SET TO THE DESIRED ERROR TOLERANCE.
# RELATIVE ERROR DUE TO TRUNCATION IS LESS THAN
# 16 * ERRTOL ** 6 / (1 - 2 * ERRTOL).
#
# SAMPLE CHOICES: ERRTOL RELATIVE TRUNCATION
# ERROR LESS THAN
# 1.D-3 3.D-19
# 3.D-3 2.D-16
# 1.D-2 3.D-13
# 3.D-2 2.D-10
# 1.D-1 3.D-7
#
# Note by TRH:
#
# Absolute truncation error when the integrals are order 1 quantities
# is closer to errtol, so be careful if you want high absolute precision.
#
# Thomas R. Hoffend Jr., Ph.D.
# 3M Company
# 3M Center Bldg. 236-GC-26
# St. Paul, MN 55144
# trhoffendjr@mmm.com
#
def rd(x, y, z, errtol ):
realmin = 1e-100
realmax = 1e100
# Argument limits as set by Carlson:
LoLim = 5.0 * realmin
UpLim = 5.0 * realmax
# Define internally acceptable variable ranges for iterations:
Xi = x
Yi = y
Zi = z
# Carlson's duplication algorithm for Rf:
Xn = Xi
Yn = Yi
Zn = Zi
sigma = 0.0
power4 = 1.0
Mu = (Xn + Yn + 3.0 * Zn) * 0.2
Xndev = (Mu - Xn) / Mu
Yndev = (Mu - Yn) / Mu
Zndev = (Mu - Zn) / Mu
epslon = np.max( np.abs([Xndev, Yndev, Zndev]) )
while epslon >= errtol:
Xnroot = np.sqrt(Xn)
Ynroot = np.sqrt(Yn)
Znroot = np.sqrt(Zn)
lambda1 = Xnroot * (Ynroot + Znroot) + Ynroot * Znroot
sigma = sigma + power4 / (Znroot * (Zn + lambda1))
power4 = 0.25 * power4
Xn = 0.25 * (Xn + lambda1)
Yn = 0.25 * (Yn + lambda1)
Zn = 0.25 * (Zn + lambda1)
Mu = (Xn + Yn + 3.0 * Zn) * 0.2
Xndev = (Mu - Xn) / Mu
Yndev = (Mu - Yn) / Mu
Zndev = (Mu - Zn) / Mu
epslon = np.max( np.abs([Xndev, Yndev, Zndev]) )
C1 = 3.0 / 14.0
C2 = 1.0 / 6.0
C3 = 9.0 / 22.0
C4 = 3.0 / 26.0
EA = Xndev * Yndev
EB = Zndev * Zndev
EC = EA - EB
ED = EA - 6.0 * EB
EF = ED + EC + EC
S1 = ED * (-C1 + 0.25 * C3 * ED - 1.50 * C4 * Zndev * EF)
S2 = Zndev * (C2 * EF + Zndev * (-C3 * EC + Zndev * C4 * EA))
f = 3.0 * sigma + power4 * (1.0 + S1 + S2) / (Mu * np.sqrt(Mu))
return f
| 29.496644
| 106
| 0.572127
| 1,389
| 8,790
| 3.61915
| 0.184305
| 0.014323
| 0.011936
| 0.022678
| 0.754327
| 0.741198
| 0.737816
| 0.722697
| 0.722697
| 0.707181
| 0
| 0.064144
| 0.299431
| 8,790
| 297
| 107
| 29.59596
| 0.752192
| 0.512059
| 0
| 0.551181
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03937
| false
| 0
| 0.007874
| 0
| 0.086614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e89e5b8db3ccc53b7cb13e274bbd9dce1db6ede9
| 173
|
py
|
Python
|
utilities/exceptions.py
|
canberkeh/word-finder
|
23e6c5b76a04d8ebbca5fc6ddd993940ce2d2df5
|
[
"MIT"
] | null | null | null |
utilities/exceptions.py
|
canberkeh/word-finder
|
23e6c5b76a04d8ebbca5fc6ddd993940ce2d2df5
|
[
"MIT"
] | null | null | null |
utilities/exceptions.py
|
canberkeh/word-finder
|
23e6c5b76a04d8ebbca5fc6ddd993940ce2d2df5
|
[
"MIT"
] | null | null | null |
class DatabaseConnectionError(Exception):
pass
class DBCursorError(Exception):
pass
class QueryError(Exception):
pass
class ServiceError(Exception):
pass
| 14.416667
| 41
| 0.751445
| 16
| 173
| 8.125
| 0.4375
| 0.4
| 0.415385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179191
| 173
| 11
| 42
| 15.727273
| 0.915493
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
e8acd0901e443a35c6dfa5bf054bb6e7e1bf906b
| 19
|
py
|
Python
|
email_reply_parser/version.py
|
Mahasweta-usc/email-reply-parser
|
c528e89656347331059d9e485af29fe855c56998
|
[
"MIT"
] | 347
|
2015-01-05T02:11:52.000Z
|
2022-03-31T02:50:20.000Z
|
email_reply_parser/version.py
|
Mahasweta-usc/email-reply-parser
|
c528e89656347331059d9e485af29fe855c56998
|
[
"MIT"
] | 31
|
2015-01-27T13:13:06.000Z
|
2022-03-24T17:21:20.000Z
|
email_reply_parser/version.py
|
Mahasweta-usc/email-reply-parser
|
c528e89656347331059d9e485af29fe855c56998
|
[
"MIT"
] | 91
|
2015-01-24T00:33:20.000Z
|
2022-03-24T11:06:22.000Z
|
VERSION = '0.5.12'
| 9.5
| 18
| 0.578947
| 4
| 19
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.157895
| 19
| 1
| 19
| 19
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e8afd27b5c45ff247b46e1cb240f4e11b512c908
| 48
|
py
|
Python
|
__init__.py
|
leptoid/anime-dl
|
d72825c64ea06e1800e32b16dc95a8f3aee41c9a
|
[
"MIT"
] | 2
|
2019-11-16T01:06:11.000Z
|
2020-07-24T02:34:16.000Z
|
__init__.py
|
leptoid/anime-dl
|
d72825c64ea06e1800e32b16dc95a8f3aee41c9a
|
[
"MIT"
] | null | null | null |
__init__.py
|
leptoid/anime-dl
|
d72825c64ea06e1800e32b16dc95a8f3aee41c9a
|
[
"MIT"
] | null | null | null |
import common
import external
import sites
| 9.6
| 16
| 0.770833
| 6
| 48
| 6.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229167
| 48
| 4
| 17
| 12
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
2cd00dbd0aeccc705296607ac0d6faeb3ca31b78
| 84
|
py
|
Python
|
libaito/__init__.py
|
martinlatrille/RESTinPy
|
dfe56b87dd83130f60a44a329153e5a43398e5b0
|
[
"Apache-2.0"
] | null | null | null |
libaito/__init__.py
|
martinlatrille/RESTinPy
|
dfe56b87dd83130f60a44a329153e5a43398e5b0
|
[
"Apache-2.0"
] | null | null | null |
libaito/__init__.py
|
martinlatrille/RESTinPy
|
dfe56b87dd83130f60a44a329153e5a43398e5b0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import core
import helpers
import printers
import settings
| 12
| 23
| 0.72619
| 11
| 84
| 5.545455
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014286
| 0.166667
| 84
| 6
| 24
| 14
| 0.857143
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2cdb4ea8628586e7bc85615a601d095537efecd0
| 100
|
py
|
Python
|
Apps/rsp/test/arg.py
|
zhanghongce/ila-mcm-fmcad18
|
e7045e38e45e758f2b0e0ecc7d4369f5014b8707
|
[
"MIT"
] | null | null | null |
Apps/rsp/test/arg.py
|
zhanghongce/ila-mcm-fmcad18
|
e7045e38e45e758f2b0e0ecc7d4369f5014b8707
|
[
"MIT"
] | null | null | null |
Apps/rsp/test/arg.py
|
zhanghongce/ila-mcm-fmcad18
|
e7045e38e45e758f2b0e0ecc7d4369f5014b8707
|
[
"MIT"
] | null | null | null |
def f0(a, **l):
print a
def f1(a,b,c):
print a+b+c
d1 = {'a':1,'b':1,'c':1}
f0(**d1)
f1(**d1)
| 8.333333
| 24
| 0.45
| 26
| 100
| 1.730769
| 0.384615
| 0.266667
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0.2
| 100
| 11
| 25
| 9.090909
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0.03
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.285714
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2ce334caa3c540df93a562fe752ae3122a0f3306
| 13,067
|
py
|
Python
|
ope_estimators.py
|
MasaKat0/off_policy_evaluation
|
4b54c321eee522e8c18b478fb455e7d144ab2332
|
[
"MIT"
] | null | null | null |
ope_estimators.py
|
MasaKat0/off_policy_evaluation
|
4b54c321eee522e8c18b478fb455e7d144ab2332
|
[
"MIT"
] | null | null | null |
ope_estimators.py
|
MasaKat0/off_policy_evaluation
|
4b54c321eee522e8c18b478fb455e7d144ab2332
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from kernel_regression import KernelLogit
import warnings
KernelRidge_hyp_param = {'alpha': [0.01, 0.1, 1], 'gamma': [0.01, 0.1, 1]}
KernelLogit_sigma_list = np.array([0.01, 0.1, 1])
KernelLogit_lda_list = np.array([0.01, 0.1, 1])
def kernel_ridge_estimator(X, Y, Z, cv=2):
model = KernelRidge(kernel='rbf')
model = GridSearchCV(
model, {'alpha': [0.01, 0.1, 1], 'gamma': [0.01, 0.1, 1]}, cv=cv)
model.fit(X, Y)
return model.predict(Z)
def kernel_logit_estimator(X, Y, Z, cv=2):
model, KX, KZ = KernelLogit(X, Y, Z, folds=cv, num_basis=100,
sigma_list=KernelLogit_sigma_list,
lda_list=KernelLogit_lda_list, algorithm='Ridge')
model.fit(KX, Y)
return model.predict_proba(KZ)
class OPEestimators():
def __init__(self, classes, pi_evaluation, pi_behavior=None, variance=False):
self.classes = classes
self.pi_behavior = pi_behavior
self.pi_evaluation = pi_evaluation
self.variance = variance
def fit(self, X, A, Y_matrix, est_type,
outcome_estimator=kernel_ridge_estimator,
policy_estimator=kernel_logit_estimator,
warning_samples=10):
self.X = X
self.N_hst, self.dim = X.shape
self.A = A
self.Y = Y_matrix
self.warning_samples = warning_samples
self.outcome_estimator = kernel_ridge_estimator
self.policy_estimator = kernel_logit_estimator
warnings.simplefilter('ignore')
if est_type == 'ipw':
theta, var = self.ipw()
if est_type == 'dm':
theta, var = self.dm()
if est_type == 'aipw_ddm':
theta, var = self.aipw_ddm()
if est_type == 'aipw':
theta, var = self.aipw()
if est_type == 'a2ipw':
theta, var = self.a2ipw()
if est_type == 'adr':
theta, var = self.adr()
if est_type == 'dr_ddm':
theta, var = self.dr_ddm()
if est_type == 'dr':
theta, var = self.dr()
if self.variance:
return theta, var
else:
return theta
def aipw_ddm(self, folds=2):
theta_list = []
cv_fold = np.arange(folds)
cv_split0 = np.floor(np.arange(self.N_hst)*folds/self.N_hst)
cv_index = cv_split0[np.random.permutation(self.N_hst)]
x_cv = []
a_cv = []
y_cv = []
pi_bhv_cv = []
pi_evl_cv = []
for k in cv_fold:
x_cv.append(self.X[cv_index == k])
a_cv.append(self.A[cv_index == k])
y_cv.append(self.Y[cv_index == k])
pi_bhv_cv.append(self.pi_behavior[cv_index == k])
pi_evl_cv.append(self.pi_evaluation[cv_index == k])
for k in range(folds):
count = 0
for j in range(folds):
if j == k:
x_te = x_cv[j]
a_te = a_cv[j]
y_te = y_cv[j]
pi_bhv_te = pi_bhv_cv[j]
pi_evl_te = pi_evl_cv[j]
if j != k:
if count == 0:
x_tr = x_cv[j]
a_tr = a_cv[j]
y_tr = y_cv[j]
pi_bhv_tr = pi_bhv_cv[j]
pi_evl_tr = pi_evl_cv[j]
count += 1
else:
x_tr = np.append(x_tr, x_cv[j], axis=0)
a_tr = np.append(a_tr, a_cv[j], axis=0)
y_tr = np.append(y_tr, y_cv[j], axis=0)
pi_bhv_tr = np.append(pi_bhv_tr, pi_bhv_cv[j], axis=0)
pi_evl_tr = np.append(pi_evl_tr, pi_evl_cv[j], axis=0)
densratio_matrix = pi_evl_te/pi_bhv_te
f_matrix = np.zeros(shape=(len(x_te), len(self.classes)))
for c in self.classes:
f_matrix[:, c] = self.outcome_estimator(
x_tr[a_tr[:, c] == 1], y_tr[:, c][a_tr[:, c] == 1], x_te)
# weight = np.ones(shape=a_te.shape)*np.sum(a_te/pi_bhv_te, axis=0)
weight = len(a_te)
theta = np.sum(a_te*(y_te-f_matrix)*densratio_matrix /
weight) + np.sum(f_matrix*pi_evl_te/weight)
theta_list.append(theta)
theta = np.mean(theta_list)
densratio_matrix = self.pi_evaluation/self.pi_behavior
f_matrix = np.zeros(shape=(self.N_hst, len(self.classes)))
for c in self.classes:
for t in range(self.N_hst):
if np.sum(self.A[:t, c] == 1) > self.warning_samples:
f_matrix[t, c] = self.outcome_estimator(
self.X[:t][self.A[:t, c] == 1],
self.Y[:t][:t, c][self.A[:t, c] == 1],
[self.X[t]])
else:
f_matrix[t, c] = 0
# weight = np.ones(shape=a_te.shape)*np.sum(a_te/pi_bhv_te, axis=0)
score = np.sum(self.A*(self.Y-f_matrix)*densratio_matrix,
axis=1) + np.sum(f_matrix*self.pi_evaluation, axis=1)
var = np.mean((score - theta)**2)
return theta, var
def dr_ddm(self, folds=2):
theta_list = []
cv_fold = np.arange(folds)
cv_split0 = np.floor(np.arange(self.N_hst)*folds/self.N_hst)
cv_index = cv_split0[np.random.permutation(self.N_hst)]
x_cv = []
a_cv = []
y_cv = []
pi_evl_cv = []
for k in cv_fold:
x_cv.append(self.X[cv_index == k])
a_cv.append(self.A[cv_index == k])
y_cv.append(self.Y[cv_index == k])
pi_evl_cv.append(self.pi_evaluation[cv_index == k])
for k in range(folds):
count = 0
for j in range(folds):
if j == k:
x_te = x_cv[j]
a_te = a_cv[j]
y_te = y_cv[j]
pi_evl_te = pi_evl_cv[j]
if j != k:
if count == 0:
x_tr = x_cv[j]
a_tr = a_cv[j]
y_tr = y_cv[j]
pi_evl_tr = pi_evl_cv[j]
count += 1
else:
x_tr = np.append(x_tr, x_cv[j], axis=0)
a_tr = np.append(a_tr, a_cv[j], axis=0)
y_tr = np.append(y_tr, y_cv[j], axis=0)
pi_evl_tr = np.append(pi_evl_tr, pi_evl_cv[j], axis=0)
a_temp = np.where(a_tr == 1)[1]
pi_bhv_te = kernel_logit_estimator(
x_tr, a_temp, x_te)
densratio_matrix = pi_evl_te/pi_bhv_te
f_matrix = np.zeros(shape=(len(x_te), len(self.classes)))
for c in self.classes:
f_matrix[:, c] = self.outcome_estimator(
x_tr[a_tr[:, c] == 1], y_tr[:, c][a_tr[:, c] == 1], x_te)
# weight = np.ones(shape=a_te.shape)*np.sum(a_te/pi_bhv_te, axis=0)
weight = len(a_te)
theta = np.sum(a_te*(y_te-f_matrix)*densratio_matrix /
weight) + np.sum(f_matrix*pi_evl_te/weight)
theta_list.append(theta)
theta = np.mean(theta_list)
a_temp = np.where(self.A == 1)[1]
pi_behavior = kernel_logit_estimator(self.X, a_temp, self.X)
densratio_matrix = self.pi_evaluation/pi_behavior
f_matrix = np.zeros(shape=(self.N_hst, len(self.classes)))
for c in self.classes:
for t in range(self.N_hst):
if np.sum(self.A[:t, c] == 1) > self.warning_samples:
f_matrix[t, c] = self.outcome_estimator(
self.X[:t][self.A[:t, c] == 1],
self.Y[:t][:t, c][self.A[:t, c] == 1],
[self.X[t]])
else:
f_matrix[t, c] = 0
# weight = np.ones(shape=a_te.shape)*np.sum(a_te/pi_bhv_te, axis=0)
score = np.sum(self.A*(self.Y-f_matrix)*densratio_matrix,
axis=1) + np.sum(f_matrix*self.pi_evaluation, axis=1)
var = np.mean((score - theta)**2)
return theta, var
def a2ipw(self):
densratio_matrix = self.pi_evaluation/self.pi_behavior
f_matrix = np.zeros(shape=(self.N_hst, len(self.classes)))
for c in self.classes:
for t in range(self.N_hst):
if np.sum(self.A[:t, c] == 1) > self.warning_samples:
f_matrix[t, c] = self.outcome_estimator(
self.X[:t][self.A[:t, c] == 1],
self.Y[:t][:t, c][self.A[:t, c] == 1],
[self.X[t]])
else:
f_matrix[t, c] = 0
# weight = np.ones(shape=a_te.shape)*np.sum(a_te/pi_bhv_te, axis=0)
score = np.sum(self.A*(self.Y-f_matrix)*densratio_matrix,
axis=1) + np.sum(f_matrix*self.pi_evaluation, axis=1)
theta = np.mean(score)
var = np.mean((score - theta)**2)
return theta, var
def adr(self):
pi_behavior = np.copy(self.pi_evaluation)
pi_behavior[:] = 0.5
for t in range(1, self.N_hst):
if all(np.sum(self.A[:t, :] == 1, axis=0) > self.warning_samples):
a_temp = np.where(self.A[:t] == 1)[1]
pi_behavior[t, :] = kernel_logit_estimator(
self.X[:t], a_temp, np.array([self.X[t]]))
else:
pi_behavior[t, :] = 0.5
densratio_matrix = self.pi_evaluation/pi_behavior
f_matrix = np.zeros(shape=(self.N_hst, len(self.classes)))
for c in self.classes:
for t in range(self.N_hst):
if np.sum(self.A[:t, c] == 1) > self.warning_samples:
f_matrix[t, c] = self.outcome_estimator(
self.X[:t][self.A[:t, c] == 1],
self.Y[:t][:t, c][self.A[:t, c] == 1],
[self.X[t]])
else:
f_matrix[t, c] = 0
# weight = np.ones(shape=a_te.shape)*np.sum(a_te/pi_bhv_te, axis=0)
score = np.sum(self.A*(self.Y-f_matrix)*densratio_matrix,
axis=1) + np.sum(f_matrix*self.pi_evaluation, axis=1)
theta = np.mean(score)
var = np.mean((score - theta)**2)
return theta, var
def ipw(self):
if self.pi_behavior is None:
a_temp = np.where(self.A == 1)[1]
self.pi_behavior = kernel_logit_estimator(self.X, a_temp, self.X)
densratio = self.pi_evaluation/self.pi_behavior
# weight = np.ones(shape=self.A.shape)*np.sum(self.A/self.pi_behavior, axis=0)
score = np.sum(self.A*self.Y*densratio, axis=1)
theta = np.mean(score)
var = np.mean((score - theta)**2)
return theta, var
def dr(self):
a_temp = np.where(self.A == 1)[1]
pi_behavior = kernel_logit_estimator(self.X, a_temp, self.X)
densratio = self.pi_evaluation/pi_behavior
f_matrix = np.zeros(shape=(self.N_hst, len(self.classes)))
for c in self.classes:
f_matrix[:, c] = self.outcome_estimator(
self.X[self.A[:, c] == 1],
self.Y[:, c][self.A[:, c] == 1],
self.X)
# weight = np.ones(shape=self.A.shape)*np.sum(self.A/self.pi_behavior, axis=0)
score = np.sum(self.A*(self.Y-f_matrix)*densratio, axis=1) + \
np.sum(f_matrix*self.pi_evaluation, axis=1)
theta = np.mean(score)
var = np.mean((score - theta)**2)
return theta, var
def aipw(self):
densratio = self.pi_evaluation/self.pi_behavior
f_matrix = np.zeros(shape=(self.N_hst, len(self.classes)))
for c in self.classes:
f_matrix[:, c] = self.outcome_estimator(
self.X[self.A[:, c] == 1],
self.Y[:, c][self.A[:, c] == 1],
self.X)
# weight = np.ones(shape=self.A.shape)*np.sum(self.A/self.pi_behavior, axis=0)
score = np.sum(self.A*(self.Y-f_matrix)*densratio, axis=1) + \
np.sum(f_matrix*self.pi_evaluation, axis=1)
theta = np.mean(score)
var = np.mean((score - theta)**2)
return theta, var
def dm(self, method='Ridge'):
f_matrix = np.zeros(shape=(self.N_hst, len(self.classes)))
for c in self.classes:
f_matrix[:, c] = self.outcome_estimator(
self.X[self.A[:, c] == 1],
self.Y[:, c][self.A[:, c] == 1],
self.X)
score = np.sum(f_matrix*self.pi_evaluation, axis=1)
theta = np.mean(score)
var = np.mean((score - theta)**2)
return theta, var
| 34.206806
| 86
| 0.507385
| 1,894
| 13,067
| 3.291447
| 0.064414
| 0.032082
| 0.024382
| 0.024062
| 0.780879
| 0.747514
| 0.740456
| 0.726821
| 0.718319
| 0.718319
| 0
| 0.016762
| 0.356241
| 13,067
| 381
| 87
| 34.296588
| 0.724322
| 0.047907
| 0
| 0.690647
| 0
| 0
| 0.005792
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043165
| false
| 0
| 0.021583
| 0
| 0.111511
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fa2ba789ab8bf3933288157a2f3dbc2cad5ba2b5
| 92
|
py
|
Python
|
enthought/units/geo_units.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/units/geo_units.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/units/geo_units.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from scimath.units.geo_units import *
| 23
| 38
| 0.836957
| 13
| 92
| 5.461538
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119565
| 92
| 3
| 39
| 30.666667
| 0.876543
| 0.130435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fa41eebf29bc158bda67e5a659c178e331050e78
| 68
|
py
|
Python
|
pose_trackers/lighttrack/graph/unit_test/__init__.py
|
rcourivaud/video-to-pose3D
|
b908014fe2c531c075c11cee72bb798120f970c2
|
[
"MIT"
] | 574
|
2019-07-12T08:35:18.000Z
|
2022-03-28T06:37:44.000Z
|
pose_trackers/lighttrack/graph/unit_test/__init__.py
|
rcourivaud/video-to-pose3D
|
b908014fe2c531c075c11cee72bb798120f970c2
|
[
"MIT"
] | 55
|
2019-07-11T11:31:16.000Z
|
2022-03-11T23:54:54.000Z
|
pose_trackers/lighttrack/graph/unit_test/__init__.py
|
rcourivaud/video-to-pose3D
|
b908014fe2c531c075c11cee72bb798120f970c2
|
[
"MIT"
] | 123
|
2019-09-06T07:08:40.000Z
|
2022-03-26T21:50:28.000Z
|
import os
import sys
sys.path.append(os.path.abspath("../utils/"))
| 13.6
| 45
| 0.705882
| 11
| 68
| 4.363636
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 68
| 4
| 46
| 17
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0.132353
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fa44ccefc9d9f47fbd3fe5e3e6e4301a6113318e
| 60
|
py
|
Python
|
main/controllers/__init__.py
|
billtrn/Comment-Sentiment-Detector
|
3cacca439cf8ada10da021ca620008d8320eeacd
|
[
"MIT"
] | 10
|
2021-05-19T11:24:19.000Z
|
2022-01-07T16:27:23.000Z
|
main/controllers/__init__.py
|
billtrn/Comment_Sentiment_Analysis
|
3cacca439cf8ada10da021ca620008d8320eeacd
|
[
"MIT"
] | 1
|
2021-05-18T15:55:52.000Z
|
2021-05-18T15:55:52.000Z
|
main/controllers/__init__.py
|
billtrn/Comment_Sentiment_Analysis
|
3cacca439cf8ada10da021ca620008d8320eeacd
|
[
"MIT"
] | null | null | null |
def init_routes():
from . import api, index, prediction
| 20
| 40
| 0.7
| 8
| 60
| 5.125
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 60
| 2
| 41
| 30
| 0.854167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fa5d4b9d08e8605dad0aceece94fed5dd3c5f867
| 76
|
py
|
Python
|
pyMR/__init__.py
|
k4rth33k/pyMR
|
6d5caab73b7712e719716cf14c24c92c41d7c347
|
[
"MIT"
] | 2
|
2020-07-19T05:37:26.000Z
|
2021-09-03T10:36:01.000Z
|
pyMR/__init__.py
|
k4rth33k/pyMR
|
6d5caab73b7712e719716cf14c24c92c41d7c347
|
[
"MIT"
] | null | null | null |
pyMR/__init__.py
|
k4rth33k/pyMR
|
6d5caab73b7712e719716cf14c24c92c41d7c347
|
[
"MIT"
] | null | null | null |
from .main import Master
from .chunk import Chunks
from .utils import Queue
| 19
| 25
| 0.802632
| 12
| 76
| 5.083333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 76
| 3
| 26
| 25.333333
| 0.953125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d72e24819bf2b965ae56363664d961a0a010f0e2
| 1,326
|
py
|
Python
|
tests/test_fp_data.py
|
hsamshod/hikvision-isapi-wrapper
|
7d4f9185baa503f53477d9ec39cf246c13eff41a
|
[
"MIT"
] | 8
|
2020-12-23T09:10:31.000Z
|
2022-03-28T20:13:54.000Z
|
tests/test_fp_data.py
|
hsamshod/hikvision-isapi-wrapper
|
7d4f9185baa503f53477d9ec39cf246c13eff41a
|
[
"MIT"
] | null | null | null |
tests/test_fp_data.py
|
hsamshod/hikvision-isapi-wrapper
|
7d4f9185baa503f53477d9ec39cf246c13eff41a
|
[
"MIT"
] | 4
|
2021-01-18T18:36:37.000Z
|
2022-03-01T06:08:53.000Z
|
from types import SimpleNamespace
import hikvision_isapi_wrapper as api
import vcr
@vcr.use_cassette('tests/vcr_cassettes/fp-data-add.yml', filter_headers=['Authorization'])
def test_fp_data_add():
fp_lib_instance = api.FaceData()
response = fp_lib_instance.face_data_add('blackFD', '1', '4', 'tessst', 'male', '19940226T000000+0500', 'Tashkent', 'https://i.ibb.co/P9rJSTQ/murod.jpg')
assert isinstance(response, SimpleNamespace)
assert response.statusString == 'OK', "Successful response should be OK"
@vcr.use_cassette('tests/vcr_cassettes/fp-data-update.yml', filter_headers=['Authorization'])
def test_fp_data_update():
fp_lib_instance = api.FaceData()
response = fp_lib_instance.face_data_update('blackFD', '1', '4', 'tessst', 'male', '19940226T000000+0500', 'Tashkent', 'https://i.ibb.co/P9rJSTQ/murod.jpg')
assert isinstance(response, SimpleNamespace)
assert response.statusString == 'OK', "Successful response should be OK"
@vcr.use_cassette('tests/vcr_cassettes/fp-data-delete.yml', filter_headers=['Authorization'])
def test_fp_data_delete():
fp_lib_instance = api.FaceData()
response = fp_lib_instance.face_data_delete('blackFD', '1', ['4',])
assert isinstance(response, SimpleNamespace)
assert response.statusString == 'OK', "Successful response should be OK"
| 47.357143
| 160
| 0.745098
| 174
| 1,326
| 5.45977
| 0.298851
| 0.037895
| 0.082105
| 0.06
| 0.868421
| 0.868421
| 0.868421
| 0.868421
| 0.696842
| 0.696842
| 0
| 0.037288
| 0.110106
| 1,326
| 27
| 161
| 49.111111
| 0.767797
| 0
| 0
| 0.428571
| 0
| 0
| 0.319005
| 0.08371
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d78138a9bda10e91002541895c1451189c1263ef
| 1,196
|
py
|
Python
|
tests/test_sectiongen.py
|
timskovjacobsen/conctools
|
74ef341f76fa49ca705175b6b4e618b847745859
|
[
"MIT"
] | 8
|
2020-02-22T22:41:42.000Z
|
2021-06-14T13:44:31.000Z
|
tests/test_sectiongen.py
|
timskovjacobsen/conctools
|
74ef341f76fa49ca705175b6b4e618b847745859
|
[
"MIT"
] | 4
|
2020-03-06T17:01:13.000Z
|
2020-06-02T12:43:01.000Z
|
tests/test_sectiongen.py
|
timskovjacobsen/conctools
|
74ef341f76fa49ca705175b6b4e618b847745859
|
[
"MIT"
] | 1
|
2020-10-13T22:17:27.000Z
|
2020-10-13T22:17:27.000Z
|
"""Tests for `sectiongen` module."""
import os
import sys
# import numpy as np
# from numpy.testing import assert_almost_equal, assert_array_almost_equal
# Import module to test
import conctools._sectiongen as sg
# TODO Adjust tests below after code to be tested was changed.
# def test_neutral_axis_locs_traverse_upwards():
# # ----- Setup -----
# bounds = (-300, 300)
# n_locations = 7
# desired = np.array([-300, -200, -100, 0, 100, 200, 300])
# # ----- Exercise -----
# locations = sg.neutral_axis_locs(bounds, n_locations, traverse_upwards=True)
# # Unpack generator into array
# actual = np.array([*locations])
# # ----- Verify -----
# assert_array_almost_equal(actual, desired)
# def test_neutral_axis_locs_traverse_downwards():
# # ----- Setup -----
# bounds = (-300, 300)
# n_locations = 7
# desired = np.array([300, 200, 100, 0, -100, -200, -300])
# # ----- Exercise -----
# locations = sg.neutral_axis_locs(bounds, n_locations, traverse_upwards=False)
# # Unpack generator into array
# actual = np.array([*locations])
# # ----- Verify -----
# assert_array_almost_equal(actual, desired)
| 23.92
| 83
| 0.632107
| 145
| 1,196
| 4.993103
| 0.372414
| 0.060773
| 0.082873
| 0.09116
| 0.671271
| 0.671271
| 0.588398
| 0.588398
| 0.588398
| 0.588398
| 0
| 0.055026
| 0.209866
| 1,196
| 49
| 84
| 24.408163
| 0.711111
| 0.882107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d78c129dfa2f795903d9b8552d0eb5894e83a9ed
| 247
|
py
|
Python
|
src/tlsscout/template_context_processors.py
|
gettis/tlsscout
|
55dd5a1dbc3329aa451bfd82aac9a0f68d52136f
|
[
"BSD-3-Clause"
] | 9
|
2015-03-16T08:40:34.000Z
|
2020-10-13T15:15:38.000Z
|
src/tlsscout/template_context_processors.py
|
gettis/tlsscout
|
55dd5a1dbc3329aa451bfd82aac9a0f68d52136f
|
[
"BSD-3-Clause"
] | 6
|
2015-03-22T19:32:52.000Z
|
2022-02-11T03:39:24.000Z
|
src/tlsscout/template_context_processors.py
|
gettis/tlsscout
|
55dd5a1dbc3329aa451bfd82aac9a0f68d52136f
|
[
"BSD-3-Clause"
] | 8
|
2015-05-02T13:21:40.000Z
|
2020-09-30T17:59:49.000Z
|
from django.conf import settings
def anon_access(request):
return {
'ALLOW_ANONYMOUS_VIEWING': settings.ALLOW_ANONYMOUS_VIEWING
}
def signup_enabled(request):
return {
'SIGNUP_ENABLED': settings.ENABLE_SIGNUP
}
| 17.642857
| 67
| 0.704453
| 27
| 247
| 6.148148
| 0.592593
| 0.156627
| 0.253012
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.218623
| 247
| 13
| 68
| 19
| 0.860104
| 0
| 0
| 0.222222
| 0
| 0
| 0.150407
| 0.093496
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0.222222
| 0.555556
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ad104614d925bb0fb61481e176a823da78cc50d8
| 402
|
py
|
Python
|
environment.py
|
bathicodes/Distributor
|
c24879ef142798fb0dcdb7fe9ca5e7dbcc3f3168
|
[
"MIT"
] | null | null | null |
environment.py
|
bathicodes/Distributor
|
c24879ef142798fb0dcdb7fe9ca5e7dbcc3f3168
|
[
"MIT"
] | null | null | null |
environment.py
|
bathicodes/Distributor
|
c24879ef142798fb0dcdb7fe9ca5e7dbcc3f3168
|
[
"MIT"
] | null | null | null |
from pathlib import Path
# -------------------- path_osx for mac os -------------------- #
home_osx = str(Path.home())
def path_osx():
return f"{home_osx}/Desktop/Distributor"
def documents_osx():
return f"{home_osx}/Documents/"
def music_osx():
return f"{home_osx}/Music/"
def pictures_osx():
return f"{home_osx}/Pictures/"
def movies_osx():
return f"{home_osx}/Movies/"
| 21.157895
| 65
| 0.606965
| 55
| 402
| 4.218182
| 0.345455
| 0.181034
| 0.215517
| 0.301724
| 0.366379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161692
| 402
| 19
| 66
| 21.157895
| 0.688427
| 0.151741
| 0
| 0
| 0
| 0
| 0.313609
| 0.150888
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416667
| false
| 0
| 0.083333
| 0.416667
| 0.916667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ad20d7bff083994381269345ed5c48c7e9c5d508
| 367
|
py
|
Python
|
metu_cafeteria_menu/exceptions.py
|
th0th/metu-cafeteria-menu
|
af62990a27f1250f82b92ab3fb4a848df0dac880
|
[
"MIT"
] | 1
|
2020-12-29T11:57:48.000Z
|
2020-12-29T11:57:48.000Z
|
metu_cafeteria_menu/exceptions.py
|
th0th/metu-cafeteria-menu
|
af62990a27f1250f82b92ab3fb4a848df0dac880
|
[
"MIT"
] | null | null | null |
metu_cafeteria_menu/exceptions.py
|
th0th/metu-cafeteria-menu
|
af62990a27f1250f82b92ab3fb4a848df0dac880
|
[
"MIT"
] | null | null | null |
class RequestException(Exception):
def __init__(self, parent_exception, message):
self.parent_exception = parent_exception
self.message = message
def __str__(self):
return self.message
class DateException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
| 24.466667
| 50
| 0.683924
| 39
| 367
| 5.948718
| 0.282051
| 0.237069
| 0.137931
| 0.172414
| 0.387931
| 0.387931
| 0.387931
| 0.387931
| 0.387931
| 0
| 0
| 0
| 0.237057
| 367
| 15
| 51
| 24.466667
| 0.828571
| 0
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0
| 0
| 0.181818
| 0.727273
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ad500f072701d02195cf2e35de508971670fd1cf
| 766
|
py
|
Python
|
novice/02-02/pytest/test_sample.py
|
fakihAlim/zimera
|
69271dbcfe9d8f9b2ef72e6f6c8ce0ae4c57a9c9
|
[
"MIT"
] | null | null | null |
novice/02-02/pytest/test_sample.py
|
fakihAlim/zimera
|
69271dbcfe9d8f9b2ef72e6f6c8ce0ae4c57a9c9
|
[
"MIT"
] | null | null | null |
novice/02-02/pytest/test_sample.py
|
fakihAlim/zimera
|
69271dbcfe9d8f9b2ef72e6f6c8ce0ae4c57a9c9
|
[
"MIT"
] | null | null | null |
def func(x):
return x + 1
def test_answer():
assert func(3) == 4
# --- HASILNYA ---
# (py39-nlp) C:\Users\DeLL\My Documents\github\zimera\novice\02-02\pytest>pytest test_sample.py
# ================================================= test session starts =================================================
# platform win32 -- Python 3.9.7, pytest-6.2.5, py-1.11.0, pluggy-1.0.0
# rootdir: C:\Users\DeLL\My Documents\github\zimera\novice\02-02\pytest
# plugins: anyio-2.2.0, hypothesis-6.32.1
# collected 1 item
# test_sample.py . [100%]
# ================================================== 1 passed in 0.03s ==================================================
| 36.47619
| 121
| 0.412533
| 85
| 766
| 3.682353
| 0.576471
| 0.038339
| 0.063898
| 0.076677
| 0.313099
| 0.313099
| 0.313099
| 0.313099
| 0.313099
| 0.313099
| 0
| 0.072148
| 0.221932
| 766
| 20
| 122
| 38.3
| 0.45302
| 0.870757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.5
| false
| 0
| 0
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ad59e744139b2ed89cfb03e9ff98043640993360
| 1,061
|
py
|
Python
|
mysite/myapp/migrations/0010_auto_20190416_0314.py
|
Pdhenson/QuestLog
|
8cfe7061fa7ec6b7cf18cea8800763d35a852f79
|
[
"MIT"
] | null | null | null |
mysite/myapp/migrations/0010_auto_20190416_0314.py
|
Pdhenson/QuestLog
|
8cfe7061fa7ec6b7cf18cea8800763d35a852f79
|
[
"MIT"
] | null | null | null |
mysite/myapp/migrations/0010_auto_20190416_0314.py
|
Pdhenson/QuestLog
|
8cfe7061fa7ec6b7cf18cea8800763d35a852f79
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2019-04-16 03:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0009_steps_quest'),
]
operations = [
migrations.AlterField(
model_name='steps',
name='step_five',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='steps',
name='step_four',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='steps',
name='step_one',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='steps',
name='step_three',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='steps',
name='step_two',
field=models.CharField(default='', max_length=255),
),
]
| 27.205128
| 63
| 0.548539
| 102
| 1,061
| 5.539216
| 0.372549
| 0.176991
| 0.221239
| 0.256637
| 0.716814
| 0.716814
| 0.716814
| 0.573451
| 0.573451
| 0.573451
| 0
| 0.046025
| 0.324222
| 1,061
| 38
| 64
| 27.921053
| 0.74198
| 0.040528
| 0
| 0.625
| 1
| 0
| 0.088583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ad88b386a9b6cf7da7ffeec1bca533e736da0ba4
| 109
|
py
|
Python
|
app/web/__main__.py
|
art-solopov/zodb_book_mgmt
|
d3ab28911168dff097125f374ef720059b5acbd4
|
[
"MIT"
] | null | null | null |
app/web/__main__.py
|
art-solopov/zodb_book_mgmt
|
d3ab28911168dff097125f374ef720059b5acbd4
|
[
"MIT"
] | null | null | null |
app/web/__main__.py
|
art-solopov/zodb_book_mgmt
|
d3ab28911168dff097125f374ef720059b5acbd4
|
[
"MIT"
] | null | null | null |
import bottle
from .base import base_app
bottle.run(app=base_app, host='localhost', port='8080', debug=True)
| 27.25
| 67
| 0.770642
| 18
| 109
| 4.555556
| 0.666667
| 0.170732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040404
| 0.091743
| 109
| 3
| 68
| 36.333333
| 0.787879
| 0
| 0
| 0
| 0
| 0
| 0.119266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d1097cda901f0bb3fcfddf658f900dcec49d36d4
| 252
|
py
|
Python
|
jobportal/tests.py
|
zobeltran/webdevtproject
|
5c00c726863b1411e85fca48e44883cefe62b9dd
|
[
"Apache-2.0"
] | null | null | null |
jobportal/tests.py
|
zobeltran/webdevtproject
|
5c00c726863b1411e85fca48e44883cefe62b9dd
|
[
"Apache-2.0"
] | null | null | null |
jobportal/tests.py
|
zobeltran/webdevtproject
|
5c00c726863b1411e85fca48e44883cefe62b9dd
|
[
"Apache-2.0"
] | null | null | null |
from django.test import TestCase
# Create your tests here.
#
# class EmployeeRegistrationTest(TestCase):
# def test_index(self):
# resp = self.client.get('accounts/employee/register.html')
# self.assertEqual(resp.status_code, 200)
| 28
| 67
| 0.710317
| 30
| 252
| 5.9
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014423
| 0.174603
| 252
| 8
| 68
| 31.5
| 0.836538
| 0.813492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d13935aee39e7625eb8390645147f92fb938f99e
| 200
|
py
|
Python
|
example_test.py
|
Alsrec/EC-500A2
|
0572b8ad7bff2b202a639ddb30c7f0c4e24d6f55
|
[
"MIT"
] | null | null | null |
example_test.py
|
Alsrec/EC-500A2
|
0572b8ad7bff2b202a639ddb30c7f0c4e24d6f55
|
[
"MIT"
] | null | null | null |
example_test.py
|
Alsrec/EC-500A2
|
0572b8ad7bff2b202a639ddb30c7f0c4e24d6f55
|
[
"MIT"
] | null | null | null |
from example import*
def test_add3():
assert add3(1, 2, 3) == 6
assert add3(5, 5, 5,) == 15
assert add3("EA", "Z", "Y") == "EAZY"
def test_numpyaround():
assert numpyaround(1.222, 1) == 1.2
| 20
| 39
| 0.595
| 34
| 200
| 3.441176
| 0.558824
| 0.25641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0.2
| 200
| 9
| 40
| 22.222222
| 0.60625
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0.571429
| 1
| 0.285714
| true
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d156751c8c97c1f04813a94da89ac9952b28a502
| 363
|
py
|
Python
|
parser/team28/models/objects/columns_select.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/team28/models/objects/columns_select.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/team28/models/objects/columns_select.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
class ColumnsSelect(object):
def __init__(self, values) :
self._values = values
def __repr__(self):
return str(vars(self))
@property
def values(self):
return self._values
@values.setter
def values(self, values):
self._values = values
# column = ColumnsSelect([1,2,34,51,2])
# print(column.values)
| 22.6875
| 39
| 0.614325
| 43
| 363
| 4.930233
| 0.44186
| 0.235849
| 0.226415
| 0.188679
| 0.245283
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.267218
| 363
| 16
| 40
| 22.6875
| 0.770677
| 0.15978
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0
| 0
| 0.181818
| 0.636364
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
0f39f8313e77473f36e6a452d3c204193ddac78c
| 82
|
py
|
Python
|
engine/models/models.py
|
LloydTao/ecm3423-fur-effect
|
fefa73665b459dfd1648dca97a95e8313cf53dd5
|
[
"MIT"
] | null | null | null |
engine/models/models.py
|
LloydTao/ecm3423-fur-effect
|
fefa73665b459dfd1648dca97a95e8313cf53dd5
|
[
"MIT"
] | null | null | null |
engine/models/models.py
|
LloydTao/ecm3423-fur-effect
|
fefa73665b459dfd1648dca97a95e8313cf53dd5
|
[
"MIT"
] | null | null | null |
import numpy as np
import pygame
from OpenGL.GL import *
class Model:
pass
| 9.111111
| 23
| 0.719512
| 13
| 82
| 4.538462
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.243902
| 82
| 8
| 24
| 10.25
| 0.951613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
0f4498c69b394978b204d57728ba7b082d792cec
| 52
|
py
|
Python
|
booster/pipeline/__init__.py
|
vlievin/booster-pytorch
|
a8f447160c30224112731a25f90f6f97126a34b2
|
[
"MIT"
] | 4
|
2019-12-10T06:41:29.000Z
|
2021-08-06T13:34:59.000Z
|
booster/pipeline/__init__.py
|
vlievin/booster-pytorch
|
a8f447160c30224112731a25f90f6f97126a34b2
|
[
"MIT"
] | null | null | null |
booster/pipeline/__init__.py
|
vlievin/booster-pytorch
|
a8f447160c30224112731a25f90f6f97126a34b2
|
[
"MIT"
] | 1
|
2020-08-20T16:12:53.000Z
|
2020-08-20T16:12:53.000Z
|
from .pipeline import Pipeline, DataParallelPipeline
| 52
| 52
| 0.884615
| 5
| 52
| 9.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 52
| 1
| 52
| 52
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0f7a2d452ecd4eaa893409c49dc4736ef33dc536
| 134
|
py
|
Python
|
roman/__init__.py
|
drMJ2/roman
|
51469b3b1e76a92cc9986106a642bcd2ef3365ad
|
[
"MIT"
] | null | null | null |
roman/__init__.py
|
drMJ2/roman
|
51469b3b1e76a92cc9986106a642bcd2ef3365ad
|
[
"MIT"
] | 1
|
2020-09-18T21:13:24.000Z
|
2020-09-18T21:13:24.000Z
|
roman/__init__.py
|
drMJ2/roman
|
51469b3b1e76a92cc9986106a642bcd2ef3365ad
|
[
"MIT"
] | null | null | null |
from .robot import *
from .ur import arm
from .ur.arm import Joints, Tool
from .rq import hand
from .rq.hand import GraspMode, Finger
| 22.333333
| 38
| 0.761194
| 23
| 134
| 4.434783
| 0.478261
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164179
| 134
| 5
| 39
| 26.8
| 0.910714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.