hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55df714bfde34f9996cfbef7f0c912395303eb4b
| 54,594
|
py
|
Python
|
environments/launchers.py
|
aowen87/ppo_and_friends
|
085c91d9bf2702d5c63a0b81e325144a8ccc2c2c
|
[
"MIT"
] | 1
|
2022-01-27T00:29:20.000Z
|
2022-01-27T00:29:20.000Z
|
environments/launchers.py
|
aowen87/ppo_and_friends
|
085c91d9bf2702d5c63a0b81e325144a8ccc2c2c
|
[
"MIT"
] | 8
|
2022-01-27T23:40:00.000Z
|
2022-03-31T16:27:50.000Z
|
environments/launchers.py
|
aowen87/ppo_and_friends
|
085c91d9bf2702d5c63a0b81e325144a8ccc2c2c
|
[
"MIT"
] | null | null | null |
"""
A home for environment "launchers", defined as simple functions
that initialize training for a specific environment.
"""
import gym
from ppo_and_friends.ppo import PPO
from ppo_and_friends.testing import test_policy
from ppo_and_friends.networks.actor_critic_networks import FeedForwardNetwork, AtariPixelNetwork
from ppo_and_friends.networks.actor_critic_networks import SplitObsNetwork
from ppo_and_friends.networks.actor_critic_networks import LSTMNetwork
from ppo_and_friends.networks.icm import ICM
from ppo_and_friends.networks.encoders import LinearObservationEncoder
from .gym_wrappers import *
import torch.nn as nn
from ppo_and_friends.utils.iteration_mappers import *
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
num_procs = comm.Get_size()
def run_ppo(env_generator,
ac_network,
device,
random_seed,
is_multi_agent = False,
envs_per_proc = 1,
icm_network = ICM,
batch_size = 256,
ts_per_rollout = 1024,
epochs_per_iter = 10,
target_kl = 100.,
lr = 3e-4,
min_lr = 1e-4,
lr_dec = None,
entropy_weight = 0.01,
min_entropy_weight = 0.01,
entropy_dec = None,
max_ts_per_ep = 200,
use_gae = True,
use_icm = False,
save_best_only = False,
icm_beta = 0.8,
ext_reward_weight = 1.0,
intr_reward_weight = 1.0,
actor_kw_args = {},
critic_kw_args = {},
icm_kw_args = {},
gamma = 0.99,
lambd = 0.95,
surr_clip = 0.2,
bootstrap_clip = (-10.0, 10.0),
dynamic_bs_clip = False,
gradient_clip = 0.5,
mean_window_size = 100,
normalize_adv = True,
normalize_obs = True,
normalize_rewards = True,
normalize_values = True,
obs_clip = None,
reward_clip = None,
render = False,
render_gif = False,
load_state = False,
state_path = "./",
num_timesteps = 1,
test = False,
pickle_class = False,
use_soft_resets = True,
obs_augment = False,
num_test_runs = 1):
ppo = PPO(env_generator = env_generator,
ac_network = ac_network,
icm_network = icm_network,
device = device,
is_multi_agent = is_multi_agent,
random_seed = random_seed,
batch_size = batch_size,
envs_per_proc = envs_per_proc,
ts_per_rollout = ts_per_rollout,
lr = lr,
target_kl = target_kl,
min_lr = min_lr,
lr_dec = lr_dec,
max_ts_per_ep = max_ts_per_ep,
use_gae = use_gae,
use_icm = use_icm,
save_best_only = save_best_only,
ext_reward_weight = ext_reward_weight,
intr_reward_weight = intr_reward_weight,
entropy_weight = entropy_weight,
min_entropy_weight = min_entropy_weight,
entropy_dec = entropy_dec,
icm_kw_args = icm_kw_args,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
gamma = gamma,
lambd = lambd,
surr_clip = surr_clip,
bootstrap_clip = bootstrap_clip,
dynamic_bs_clip = dynamic_bs_clip,
gradient_clip = gradient_clip,
normalize_adv = normalize_adv,
normalize_obs = normalize_obs,
normalize_rewards = normalize_rewards,
normalize_values = normalize_values,
obs_clip = obs_clip,
reward_clip = reward_clip,
mean_window_size = mean_window_size,
render = render,
load_state = load_state,
state_path = state_path,
pickle_class = pickle_class,
use_soft_resets = use_soft_resets,
obs_augment = obs_augment,
test_mode = test)
if test:
test_policy(ppo,
render_gif,
num_test_runs,
device)
else:
ppo.learn(num_timesteps)
###############################################################################
# Classic Control #
###############################################################################
def cartpole_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('CartPole-v0')
actor_kw_args = {}
actor_kw_args["activation"] = nn.LeakyReLU()
critic_kw_args = actor_kw_args.copy()
lr = 0.0002
min_lr = 0.0002
lr_dec = LinearDecrementer(
max_iteration = 1,
max_value = lr,
min_value = min_lr)
run_ppo(env_generator = env_generator,
random_seed = random_seed,
batch_size = 256,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
ac_network = FeedForwardNetwork,
max_ts_per_ep = 32,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
normalize_adv = True,
obs_clip = (-10., 10.),
reward_clip = (-10., 10.),
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
lr = lr,
min_lr = min_lr,
lr_dec = lr_dec,
test = test,
num_test_runs = num_test_runs)
def pendulum_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('Pendulum-v1')
actor_kw_args = {}
actor_kw_args["activation"] = nn.LeakyReLU()
actor_kw_args["hidden_size"] = 32
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 256
lr = 0.0003
min_lr = 0.0003
lr_dec = LinearDecrementer(
max_iteration = 1000,
max_value = lr,
min_value = min_lr)
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
max_ts_per_ep = 32,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
dynamic_bs_clip = True,
obs_clip = (-10., 10.),
reward_clip = (-10., 10.),
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
lr = lr,
min_lr = min_lr,
lr_dec = lr_dec,
test = test,
num_test_runs = num_test_runs)
def mountain_car_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('MountainCar-v0')
actor_kw_args = {"activation" : nn.LeakyReLU()}
actor_kw_args["hidden_size"] = 128
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 128
lr = 0.0003
min_lr = 0.0001
lr_dec = LinearStepMapper(
steps = [200,],
step_values = [0.0003,],
ending_value = 0.0001)
#
# NOTE: This environment performs dramatically better when
# max_ts_per_ep is set to the total timesteps allowed by the
# environment. It's not 100% clear to me why this is the case.
# We should probably explore this a bit more. MountainCarContinuous
# doesn't seem to exhibit this behavior, so it's unlikely an issue
# with ICM.
# Also, the extrinsic reward weight fraction is very important
# for good performance.
#
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
dynamic_bs_clip = True,
max_ts_per_ep = 200,
ts_per_rollout = 2048,
ext_reward_weight = 1./100.,
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
use_icm = True,
use_gae = True,
normalize_obs = False,
normalize_rewards = False,
normalize_values = False,
obs_clip = None,
reward_clip = None,
bootstrap_clip = (-10, 10),
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
def mountain_car_continuous_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('MountainCarContinuous-v0')
#
# Extra args for the actor critic models.
#
actor_kw_args = {}
actor_kw_args["activation"] = nn.LeakyReLU()
actor_kw_args["hidden_size"] = 64
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 128
lr = 0.0003
min_lr = 0.0003
lr_dec = LinearDecrementer(
max_iteration = 1,
max_value = lr,
min_value = min_lr)
#
# I've noticed that normalizing rewards and observations
# can slow down learning at times. It's not by much (maybe
# 10-50 iterations).
#
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
max_ts_per_ep = 128,
ts_per_rollout = 2048,
batch_size = 512,
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
use_icm = True,
use_gae = True,
normalize_obs = False,
normalize_rewards = False,
normalize_values = False,
obs_clip = None,
reward_clip = None,
normalize_adv = True,
bootstrap_clip = (-10., 10.),
dynamic_bs_clip = True,
ext_reward_weight = 1./100.,
intr_reward_weight = 50.,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
def acrobot_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('Acrobot-v1')
actor_kw_args = {}
actor_kw_args["hidden_size"] = 64
critic_kw_args = {}
critic_kw_args["hidden_size"] = 128
lr = 0.0003
min_lr = 0.0
lr_dec = LinearDecrementer(
max_iteration = 2000,
max_value = lr,
min_value = min_lr)
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
max_ts_per_ep = 32,
ts_per_rollout = 1024,
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
use_gae = True,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
normalize_obs = True,
normalize_rewards = True,
obs_clip = (-10., 10.),
reward_clip = (-10., 10.),
bootstrap_clip = (-10., 10.),
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
###############################################################################
# Box 2D #
###############################################################################
def lunar_lander_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('LunarLander-v2')
#
# Extra args for the actor critic models.
# I find that leaky relu does much better with the lunar
# lander env.
#
actor_kw_args = {}
actor_kw_args["activation"] = nn.LeakyReLU()
actor_kw_args["hidden_size"] = 64
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 256
critic_kw_args = actor_kw_args.copy()
lr = 0.0003
min_lr = 0.0001
lr_dec = LinearDecrementer(
max_iteration = 200,
max_value = lr,
min_value = min_lr)
#
# Running with 2 processors works well here.
#
ts_per_rollout = num_procs * 1024
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
max_ts_per_ep = 128,
ts_per_rollout = ts_per_rollout,
batch_size = 512,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
obs_clip = (-10., 10.),
reward_clip = (-10., 10.),
bootstrap_clip = (-10., 10.),
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
test = test,
num_test_runs = num_test_runs)
def lunar_lander_continuous_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('LunarLanderContinuous-v2')
#
# Lunar lander observations are organized as follows:
# Positions: 2
# Positional velocities: 2
# Angle: 1
# Angular velocities: 1
# Leg contact: 2
#
actor_kw_args = {}
#
# Extra args for the actor critic models.
# I find that leaky relu does much better with the lunar
# lander env.
#
actor_kw_args["activation"] = nn.LeakyReLU()
actor_kw_args["hidden_size"] = 64
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 256
lr = 0.0003
min_lr = 0.0001
lr_dec = LinearDecrementer(
max_iteration = 100,
max_value = lr,
min_value = min_lr)
#
# Running with 2 processors works well here.
#
ts_per_rollout = num_procs * 1024
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
max_ts_per_ep = 32,
ts_per_rollout = ts_per_rollout,
batch_size = 512,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
obs_clip = (-10., 10.),
reward_clip = (-10., 10.),
bootstrap_clip = (-10., 10.),
target_kl = 0.015,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
test = test,
num_test_runs = num_test_runs)
def bipedal_walker_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('BipedalWalker-v3')
#
# The lidar observations are the last 10.
#
actor_kw_args = {}
#
# I've found that a lower std offset greatly improves performance
# stability in this environment. Also, most papers suggest that using Tanh
# provides the best performance, but I find that LeakyReLU works better
# here.
#
actor_kw_args["std_offset"] = 0.1
actor_kw_args["activation"] = nn.LeakyReLU()
#
# You can also use an LSTM or Split Observation network here,
# but I've found that a simple MLP learns faster both in terms
# of iterations and wall-clock time. The LSTM is the slowest
# of the three options, which I would assume is related to the
# fact that velocity information is already contained in the
# observations, but it's a bit surprising that we can't infer
# extra "history" information from the lidar.
#
actor_kw_args["hidden_size"] = 128
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 256
lr = 0.0003
min_lr = 0.0001
lr_dec = LinearDecrementer(
max_iteration = 200,
max_value = lr,
min_value = min_lr)
#
# Thresholding the reward to a low of -1 doesn't drastically
# change learning, but it does help a bit. Clipping the bootstrap
# reward to the same range seems to help with stability.
#
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 512,
max_ts_per_ep = 32,
ts_per_rollout = 1024,
use_gae = True,
normalize_adv = True,
normalize_obs = True,
normalize_rewards = True,
obs_clip = (-10., 10.),
reward_clip = (-1., 10.),
bootstrap_clip = (-1., 10.),
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
def bipedal_walker_hardcore_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('BipedalWalkerHardcore-v3')
actor_kw_args = {}
actor_kw_args["std_offset"] = 0.1
actor_kw_args["activation"] = nn.LeakyReLU()
actor_kw_args["hidden_size"] = 256
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 512
lr = 0.0001
min_lr = 0.00001
#
# This environment is a pretty challenging one and can be
# very finicky. Learning rate and reward clipping have a
# pretty powerfull impact on results, and it can be tricky
# to get these right. Here's what I've found works best:
#
# 1. First, run the problem with a pretty standard learning
# rate (0.0001 works well), and use a conservative reward
# clipping of (-1, 10). Clipping the reward at -1 results
# in the agent learning a good gait pretty early on.
#
# 2. After a while (roughly 7000->8000 iterations), the agent should
# have a pretty solid policy. Running tests will show that
# it can regularly reach scores over 300, but averaging
# over 100 runs will likely be in the 200s. My impression
# here is that the -1 reward clipping, while allowing the
# agent to learn a good gait quickly, also causes the agent
# be less concerned with failures. So, at this point, I find
# that adjusting the lower bound of the clip to the standard
# -10 value allows the agent to learn that falling is
# actually really bad. I also lower the learning rate here
# to help with stability in this last phase. This last bit
# of learning can take a while (~10,000 -> 11,000 iterations).
#
# The above is all automated with the settings used below. I typically
# run with 4 processors. The resulting policy can regularly reach average
# scores of 320+ over 100 test runs.
#
lr_dec = LinearStepMapper(
steps = [3900,],
step_values = [0.0001,],
ending_value = 0.00001)
reward_clip_min = LinearStepMapper(
steps = [4000,],
step_values = [-1.,],
ending_value = -10.)
bs_clip_min = LinearStepMapper(
steps = [4000,],
step_values = [-1.,],
ending_value = -10.)
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 512,
max_ts_per_ep = 32,
ts_per_rollout = 2048,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
obs_clip = (-10., 10.),
reward_clip = (reward_clip_min, 10.),
bootstrap_clip = (bs_clip_min, 10.),
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
use_soft_resets = True,
num_test_runs = num_test_runs)
###############################################################################
# Atari #
###############################################################################
def breakout_pixels_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
if render:
#
# NOTE: we don't want to explicitly call render for atari games.
# They have more advanced ways of rendering.
#
render = False
env_generator = lambda : gym.make(
'Breakout-v0',
repeat_action_probability = 0.0,
frameskip = 1,
render_mode = 'human')
else:
env_generator = lambda : gym.make(
'Breakout-v0',
repeat_action_probability = 0.0,
frameskip = 1)
wrapper_generator = lambda : BreakoutPixelsEnvWrapper(
env = env_generator(),
allow_life_loss = test,
hist_size = 4,
skip_k_frames = 4)
actor_kw_args = {}
actor_kw_args["activation"] = nn.LeakyReLU()
critic_kw_args = actor_kw_args.copy()
lr = 0.0003
min_lr = 0.0
lr_dec = LinearDecrementer(
max_iteration = 4000,
max_value = lr,
min_value = min_lr)
run_ppo(env_generator = wrapper_generator,
random_seed = random_seed,
ac_network = AtariPixelNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 512,
ts_per_rollout = 2048,
max_ts_per_ep = 64,
epochs_per_iter = 30,
reward_clip = (-1., 1.),
bootstrap_clip = (-1., 1.),
target_kl = 0.2,
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
use_gae = True,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
def breakout_ram_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
if render:
#
# NOTE: we don't want to explicitly call render for atari games.
# They have more advanced ways of rendering.
#
render = False
env_generator = lambda : gym.make(
'Breakout-ram-v0',
repeat_action_probability = 0.0,
frameskip = 1,
render_mode = 'human')
else:
env_generator = lambda : gym.make(
'Breakout-ram-v0',
repeat_action_probability = 0.0,
frameskip = 1)
wrapper_generator = lambda : BreakoutRAMEnvWrapper(
env = env_generator(),
allow_life_loss = test,
hist_size = 4,
skip_k_frames = 4)
actor_kw_args = {}
actor_kw_args["activation"] = nn.LeakyReLU()
actor_kw_args["hidden_size"] = 128
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 256
lr = 0.0003
min_lr = 0.0
lr_dec = LinearDecrementer(
max_iteration = 4000,
max_value = lr,
min_value = min_lr)
run_ppo(env_generator = wrapper_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 512,
ts_per_rollout = 2048,
max_ts_per_ep = 64,
use_gae = True,
epochs_per_iter = 30,
reward_clip = (-1., 1.),
bootstrap_clip = (-1., 1.),
target_kl = 0.2,
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
###############################################################################
# MuJoCo #
###############################################################################
def inverted_pendulum_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('InvertedPendulum-v2')
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
use_gae = True,
use_icm = False,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
def inverted_double_pendulum_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('InvertedDoublePendulum-v2')
#
# Pendulum observations are organized as follows:
# Positions: 1
# Angles: 4
# Velocities: 3
# Contact forces: 3
#
actor_kw_args = {}
actor_kw_args["activation"] = nn.LeakyReLU()
actor_kw_args["hidden_size"] = 64
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 128
lr = 0.0001
min_lr = 0.0001
lr_dec = LinearDecrementer(
max_iteration = 1.,
max_value = lr,
min_value = min_lr)
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 512,
max_ts_per_ep = 16,
ts_per_rollout = 1024,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
obs_clip = (-10., 10.),
reward_clip = (-10., 10.),
bootstrap_clip = (-10., 10.),
entropy_weight = 0.0,
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
def ant_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('Ant-v3')
#
# Ant observations are organized as follows:
# Positions: 13
# Velocities: 14
# Contact forces: 84
#
actor_kw_args = {}
actor_kw_args["activation"] = nn.Tanh()
actor_kw_args["hidden_size"] = 128
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 256
lr = 0.00025
min_lr = 0.0001
lr_dec = LinearDecrementer(
max_iteration = 100,
max_value = lr,
min_value = min_lr)
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 512,
max_ts_per_ep = 64,
ts_per_rollout = 2048,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
obs_clip = (-30., 30.),
reward_clip = (-10., 10.),
bootstrap_clip = (-10., 10.),
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
def humanoid_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('Humanoid-v3')
#
# Humanoid observations are a bit mysterious. See
# https://github.com/openai/gym/issues/585
# Here's a best guess:
#
# Positions: 22
# Velocities: 23
# Center of mass based on inertia (?): 140
# Center of mass based on velocity (?): 84
# Actuator forces (?): 23
# Contact forces: 84
#
# UPDATE: more complete information on the observations cane be found
# here:
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/humanoidstandup.py
#
actor_kw_args = {}
# TODO: the current settings work pretty well, but it
# takes a while to train. Can we do better? Some things
# that need more exploring:
# std offset: is the default optimal?
# activation: How does leaky relu do?
# target_kl: we could experiment more with this.
# obs_clip: this seems to negatively impact results. Does that hold?
# entropy: we could allow entropy reg, but I'm guessing it won't help
# too much.
#
actor_kw_args["activation"] = nn.Tanh()
actor_kw_args["distribution_min"] = -0.4
actor_kw_args["distribution_max"] = 0.4
actor_kw_args["hidden_size"] = 256
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 512
lr = 0.0001
min_lr = 0.0001
lr_dec = LinearDecrementer(
max_iteration = 1.0,
max_value = lr,
min_value = min_lr)
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 512,
max_ts_per_ep = 16,
ts_per_rollout = 1024,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
reward_clip = (-10., 10.),
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
def humanoid_stand_up_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
#
# NOTE: this is an UNSOVLED environment.
#
env_generator = lambda : gym.make('HumanoidStandup-v2')
#
# Positions: 22
# Velocities: 23
# Center of mass based on inertia (?): 140
# Center of mass based on velocity (?): 84
# Actuator forces (?): 23
# Contact forces: 84
#
# UPDATE: more complete information on the observations cane be found
# here:
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/humanoidstandup.py
#
actor_kw_args = {}
actor_kw_args["activation"] = nn.Tanh()
actor_kw_args["distribution_min"] = -0.4
actor_kw_args["distribution_max"] = 0.4
actor_kw_args["hidden_size"] = 256
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 512
lr = 0.0003
min_lr = 0.0001
lr_dec = LinearDecrementer(
max_iteration = 200.0,
max_value = lr,
min_value = min_lr)
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 512,
max_ts_per_ep = 32,
ts_per_rollout = 1024,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
obs_clip = None,
reward_clip = (-10., 10.),
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
def walker2d_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('Walker2d-v3')
actor_kw_args = {}
actor_kw_args["activation"] = nn.Tanh()
actor_kw_args["hidden_size"] = 64
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 256
lr = 0.0003
min_lr = 0.0001
lr_dec = LinearDecrementer(
max_iteration = 600,
max_value = lr,
min_value = min_lr)
#
# arXiv:2006.05990v1 suggests that value normalization significantly hurts
# performance in walker2d. I also find this to be the case.
#
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 512,
max_ts_per_ep = 16,
ts_per_rollout = 2048,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
normalize_values = False,
obs_clip = (-10., 10.),
reward_clip = (-10., 10.),
entropy_weight = 0.0,
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
def hopper_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('Hopper-v3')
actor_kw_args = {}
actor_kw_args["activation"] = nn.Tanh()
actor_kw_args["hidden_size"] = 64
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 256
lr = 0.0003
min_lr = 0.0001
lr_dec = LinearStepMapper(
steps = [400,],
step_values = [0.0003,],
ending_value = 0.0001)
#
# I find that value normalization hurts the hopper environment training.
# That may be a result of it's combination with other settings in here.
#
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 512,
max_ts_per_ep = 16,
ts_per_rollout = 2048,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
normalize_values = False,
obs_clip = (-10., 10.),
reward_clip = (-10., 10.),
entropy_weight = 0.0,
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
def half_cheetah_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('HalfCheetah-v3')
actor_kw_args = {}
actor_kw_args["activation"] = nn.LeakyReLU()
actor_kw_args["hidden_size"] = 128
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 256
lr = 0.0001
min_lr = 0.0001
lr_dec = LinearDecrementer(
max_iteration = 1.0,
max_value = lr,
min_value = min_lr)
#
# Normalizing values seems to stabilize results in this env.
#
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 512,
max_ts_per_ep = 32,
ts_per_rollout = 1024,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
obs_clip = (-10., 10.),
reward_clip = (-10., 10.),
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
def swimmer_ppo(state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('Swimmer-v3')
actor_kw_args = {}
actor_kw_args["activation"] = nn.LeakyReLU()
actor_kw_args["hidden_size"] = 64
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 256
lr = 0.0001
min_lr = 0.0001
lr_dec = LinearDecrementer(
max_iteration = 1.0,
max_value = lr,
min_value = min_lr)
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 512,
max_ts_per_ep = 32,
ts_per_rollout = 2048,
use_gae = True,
normalize_obs = True,
normalize_rewards = True,
obs_clip = (-10., 10.),
reward_clip = (-10., 10.),
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
###############################################################################
# Multi-Agent #
###############################################################################
def robot_warehouse_tiny(
state_path,
load_state,
render,
render_gif,
num_timesteps,
device,
envs_per_proc,
random_seed,
test = False,
num_test_runs = 1):
env_generator = lambda : gym.make('rware-tiny-3ag-v1')
actor_kw_args = {}
actor_kw_args["activation"] = nn.LeakyReLU()
actor_kw_args["hidden_size"] = 256
critic_kw_args = actor_kw_args.copy()
critic_kw_args["hidden_size"] = 512
lr = 0.001
min_lr = 0.00001
lr_dec = LinearDecrementer(
max_iteration = 4000,
max_value = lr,
min_value = min_lr)
entropy_weight = 0.05
min_entropy_weight = 0.01
entropy_dec = LinearDecrementer(
max_iteration = 4000,
max_value = entropy_weight,
min_value = min_entropy_weight)
#
# This environment comes from arXiv:2006.07869v4.
# This is a very sparse reward environment, and there are series of
# complex actions that must occur in between rewards. Because of this,
# using a large maximum timesteps per episode results in faster learning.
# arXiv:2103.01955v2 suggests using smaller epoch counts for complex
# environments and large batch sizes (single batches if possible).
# Because of the sparse rewards, I've also increased the entropy
# weight to incentivize exploration. We could also experiment with
# using ICM here. I've disabled rewards and observation normalization
# and clipping, mainly because they aren't mentioned in arXiv:2103.01955v2.
# I've noticed that performance tends to go down a bit when these
# normalizations are enabled.
#
run_ppo(env_generator = env_generator,
random_seed = random_seed,
ac_network = FeedForwardNetwork,
actor_kw_args = actor_kw_args,
critic_kw_args = critic_kw_args,
batch_size = 10000,
epochs_per_iter = 5,
max_ts_per_ep = 512,
ts_per_rollout = 2048,
is_multi_agent = True,
use_gae = True,
normalize_values = True,
normalize_obs = False,
obs_clip = None,
normalize_rewards = False,
reward_clip = None,
entropy_weight = entropy_weight,
min_entropy_weight = min_entropy_weight,
entropy_dec = entropy_dec,
lr_dec = lr_dec,
lr = lr,
min_lr = min_lr,
state_path = state_path,
load_state = load_state,
render = render,
render_gif = render_gif,
num_timesteps = num_timesteps,
device = device,
envs_per_proc = envs_per_proc,
test = test,
num_test_runs = num_test_runs)
| 35.153896
| 96
| 0.470968
| 5,427
| 54,594
| 4.391008
| 0.100608
| 0.052875
| 0.0577
| 0.031641
| 0.740957
| 0.726521
| 0.712673
| 0.70512
| 0.69958
| 0.686697
| 0
| 0.034338
| 0.451625
| 54,594
| 1,552
| 97
| 35.176546
| 0.761641
| 0.118566
| 0
| 0.858742
| 0
| 0
| 0.021687
| 0.002058
| 0
| 0
| 0
| 0.000644
| 0
| 1
| 0.018949
| false
| 0
| 0.010336
| 0
| 0.029285
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
55e69bf09bf7d572e40e8c9a018b08adde8108c4
| 727
|
py
|
Python
|
clear_path.py
|
williamchangTW/file_correctness
|
276029a7a6ab511f5a6748d041eaa4b4113b91b9
|
[
"MIT"
] | null | null | null |
clear_path.py
|
williamchangTW/file_correctness
|
276029a7a6ab511f5a6748d041eaa4b4113b91b9
|
[
"MIT"
] | null | null | null |
clear_path.py
|
williamchangTW/file_correctness
|
276029a7a6ab511f5a6748d041eaa4b4113b91b9
|
[
"MIT"
] | null | null | null |
import os, shutil
def clearPATH(data_path, model_path, cor_path):
if os.path.exists(data_path + "__pycache__") == True:
shutil.rmtree(data_path + "__pycache__")
if os.path.exists(data_path + cor_path + "__pycache__") == True:
shutil.rmtree(data_path + cor_path +"__pycache__")
if os.path.exists(model_path + "__pycache__") == True:
shutil.rmtree(model_path + "__pycache__")
if os.path.exists(model_path + cor_path + "__pycache__") == True:
shutil.rmtree(model_path + cor_path +"__pycache__")
if os.path.exists(data_path + cor_path + "train.csv") == True:
os.remove(data_path + cor_path + "train.csv")
if os.path.exists(data_path + cor_path + "test.csv") == True:
os.remove(data_path + cor_path + "test.csv")
| 45.4375
| 66
| 0.713893
| 109
| 727
| 4.256881
| 0.165138
| 0.155172
| 0.213362
| 0.181034
| 0.892241
| 0.892241
| 0.801724
| 0.540948
| 0.172414
| 0.172414
| 0
| 0
| 0.129298
| 727
| 15
| 67
| 48.466667
| 0.733017
| 0
| 0
| 0
| 0
| 0
| 0.167813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3619117c91a1b66297df9608688449e4000c82fe
| 3,219
|
py
|
Python
|
selfdrive/car/ocelot/values.py
|
R3D347HR4Y/ArnePilot
|
5d062690aaa6c6adf08ae3949b7f3df95ee1a204
|
[
"MIT"
] | null | null | null |
selfdrive/car/ocelot/values.py
|
R3D347HR4Y/ArnePilot
|
5d062690aaa6c6adf08ae3949b7f3df95ee1a204
|
[
"MIT"
] | null | null | null |
selfdrive/car/ocelot/values.py
|
R3D347HR4Y/ArnePilot
|
5d062690aaa6c6adf08ae3949b7f3df95ee1a204
|
[
"MIT"
] | 1
|
2022-02-13T23:08:14.000Z
|
2022-02-13T23:08:14.000Z
|
# flake8: noqa
from selfdrive.car import dbc_dict
from cereal import car
Ecu = car.CarParams.Ecu
# Steer torque limits
class SteerLimitParams:
STEER_MAX = 800
STEER_DELTA_UP = 2 # 1.5s time to peak torque
STEER_DELTA_DOWN = 2 # always lower than 45 otherwise the Rav4 faults (Prius seems ok with 50)
STEER_ERROR_MAX = 350 # max delta between torque cmd and torque motor
class CAR:
SMART_ROADSTER_COUPE = "SMART ROADSTER COUPE 2003-2006"
FINGERPRINTS = {
CAR.SMART_ROADSTER_COUPE: [{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 253:8, 254:8, 255: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513:6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 2, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1196: 8, 1222: 8, 1224:8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2016: 8, 2017: 8, 2018: 8, 2019: 8, 2020: 8, 2021: 8, 2022: 8, 2023: 8, 2024: 8
},
{
36: 8, 37: 8, 170: 8, 180: 1, 186: 4, 253:8, 254:8, 255: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513:6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 2, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1196: 8, 1222: 8, 1224:8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2016: 8, 2017: 8, 2018: 8, 2019: 8, 2020: 8, 2021: 8, 2022: 8, 2023: 8, 2024: 8
},
{
36: 8, 37: 8, 170: 8, 180: 4, 186: 4, 253:8, 254:8, 255: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513:6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 2, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1196: 8, 1222: 8, 1224:8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2016: 8, 2017: 8, 2018: 8, 2019: 8, 2020: 8, 2021: 8, 2022: 8, 2023: 8, 2024: 8
}]
}
# Don't use theses fingerprints for fingerprinting, they are still needed for ECU detection
STEER_THRESHOLD = 100
DBC = {
CAR.SMART_ROADSTER_COUPE: dbc_dict('ocelot_can', 'ford_focus_adas', 'ford_transit_connect_2015'),
}
| 84.710526
| 814
| 0.574713
| 687
| 3,219
| 2.660844
| 0.25182
| 0.028446
| 0.039387
| 0.034464
| 0.705689
| 0.705689
| 0.705689
| 0.698578
| 0.698578
| 0.698578
| 0
| 0.533656
| 0.229264
| 3,219
| 37
| 815
| 87
| 0.203144
| 0.082324
| 0
| 0
| 0
| 0
| 0.027146
| 0.008483
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.08
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
362018fbba00b36e9cdcb2bdb66d0872512d46b2
| 3,285
|
py
|
Python
|
J_Mathematical_Modeling/Section 3/solutionExercise2.py
|
sylvain2002/CBM101
|
4d9dc4264ce81cc2af58ceaff96fd0ed7a570af5
|
[
"MIT"
] | null | null | null |
J_Mathematical_Modeling/Section 3/solutionExercise2.py
|
sylvain2002/CBM101
|
4d9dc4264ce81cc2af58ceaff96fd0ed7a570af5
|
[
"MIT"
] | null | null | null |
J_Mathematical_Modeling/Section 3/solutionExercise2.py
|
sylvain2002/CBM101
|
4d9dc4264ce81cc2af58ceaff96fd0ed7a570af5
|
[
"MIT"
] | null | null | null |
from IPython.display import display, Latex
print("The are now 3 random variables, the number of mRNAs that can take all integer values m=0 to infinity, the number of proteins that can take all integer values n=0 to infinity, and the state of the promoter that can take only two values on and off, or 1 and 0.")
print("")
print("A state of the system is now characterized by a triplet of numbers (m,n,p) where m and n are integers and p=0 or 1 is the state of the promoter. All the transitions between states with m,n and m+-1,n+-1 accounted for in the previous model where the promoter is always on are still possible irrespective of the state of the promoter, with one exception though: the mRNA synthesis reaction (terms with rate nu_0) is not possible when the promoter is off. Those reactions do not change the state of the promoter: so they are transitions between states (m,n,p) and (m',n',p') only with p'=p. In addition, there are now elementary reactions that activate , and inactivate the promoter, with respective rates k_on and k_off. Those reactions involve the states (m,n,p) and (m'=m,n'=n,p') where p' is not equal to p.")
print("")
print("Because the random variable that describe the state of the promoter is binary, it is more convenient to write separately the CMEs for the p=0 state and the p=1 state.")
print("According to the statements above, the right hand side of the CME for P_{m,n,p=0} will include all terms from the previous model (which preserve the off state but modify m and n) with the exception of the mRNA transcription term (rate nu_0):")
display(Latex('$ \nu_1*m* \displaystyle \Bigg( P_{m,n-1,0}(t)-P_{m,n,0}(t) \Bigg) + d_0* \displaystyle \Bigg( (m+1)*P_{m+1,n,0}(t)-m*P_{m,n,0}(t) \Bigg)+ d_1* \displaystyle \Bigg( (n+1)*P_{m,n+1,0}(t)-n*P_{m,n,0}(t) \Bigg),$'))
print(" + the term corresponding to the inactivation of an already active promoter:")
display(Latex('$ k_{off} * P_{m,n,1}(t),$'))
print(" - the term corresponding to the activation of an inactive promoter:")
display(Latex('$ - k_{on} * P_{m,n,0}(t),$'))
print("leading to the final form for the CME:")
display(Latex('$ \partial P_{m,n,0}(t)/\partial t = k_{off} * P_{m,n,1}(t)- k_{on} * P_{m,n,0}(t)+\nu_1*m* \displaystyle \Bigg( P_{m,n-1,0}(t)-P_{m,n,0}(t) \Bigg) + d_0* \displaystyle \Bigg( (m+1)*P_{m+1,n,0}(t)-m*P_{m,n,0}(t) \Bigg)+ d_1* \displaystyle \Bigg( (n+1)*P_{m,n+1,0}(t)-n*P_{m,n,0}(t) \Bigg),$'))
print("Likewise, the CME for the states (m,n,p) where p=1 (active promoter) will include all terms from the previous model (which preserve the on state but modify m and n), minus the term corresponding to the inactivation of the active promoter and + the term corresponding to the promoter activation: ")
display(Latex('$ \partial P_{m,n,1}(t)/\partial t= - k_{off} * P_{m,n,1}(t)+ k_{on} * P_{m,n,0}(t)+ \nu_0* \displaystyle \Bigg( P_{m-1,n,1}(t)-P_{m,n,1}(t) \Bigg)+ \nu_1*m* \displaystyle \Bigg( P_{m,n-1,1}(t)-P_{m,n,1}(t) \Bigg) + d_0* \displaystyle \Bigg( (m+1)*P_{m+1,n,1}(t)-m*P_{m,n,1}(t) \Bigg)+ d_1* \displaystyle \Bigg( (n+1)*P_{m,n+1,1}(t)-n*P_{m,n,1}(t) \Bigg)$'))
print("It is noteworthy that in the particular cases where m=0 and n=0, the terms proportional to P_{m,n-1,p} and P_{m-1,n,p} with p=0 and 1 will disappear from the equations.")
| 109.5
| 817
| 0.697108
| 675
| 3,285
| 3.317037
| 0.198519
| 0.029477
| 0.034837
| 0.026798
| 0.421617
| 0.355069
| 0.261724
| 0.217508
| 0.209915
| 0.199196
| 0
| 0.02669
| 0.144597
| 3,285
| 30
| 818
| 109.5
| 0.770107
| 0
| 0
| 0.117647
| 0
| 0.529412
| 0.920572
| 0.115946
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.058824
| 0
| 0.058824
| 0.647059
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
3666a673da7c0a237fa683a144bcf100ded70348
| 130
|
py
|
Python
|
scrub.py
|
UnitedBagels/BagelsVsCats
|
e9970be648c5d8f7119eba18d63ff85b8666d066
|
[
"MIT"
] | null | null | null |
scrub.py
|
UnitedBagels/BagelsVsCats
|
e9970be648c5d8f7119eba18d63ff85b8666d066
|
[
"MIT"
] | null | null | null |
scrub.py
|
UnitedBagels/BagelsVsCats
|
e9970be648c5d8f7119eba18d63ff85b8666d066
|
[
"MIT"
] | null | null | null |
class Scrub(object):
health = 0
def __init__(self,health):
self.health = health
def printHealth(self):
print(self.health)
| 16.25
| 27
| 0.715385
| 18
| 130
| 4.944444
| 0.555556
| 0.337079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009174
| 0.161538
| 130
| 8
| 28
| 16.25
| 0.807339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
367927e62dc91c0dceeefd06ecd9a5b48caf175b
| 157
|
py
|
Python
|
actions/force_gc.py
|
martezr/stackstorm-nomad
|
0659aef2be2e0b8247e32b85f4f37f16181c1068
|
[
"Apache-2.0"
] | 1
|
2021-12-26T15:43:51.000Z
|
2021-12-26T15:43:51.000Z
|
actions/force_gc.py
|
martezr/stackstorm-nomad
|
0659aef2be2e0b8247e32b85f4f37f16181c1068
|
[
"Apache-2.0"
] | null | null | null |
actions/force_gc.py
|
martezr/stackstorm-nomad
|
0659aef2be2e0b8247e32b85f4f37f16181c1068
|
[
"Apache-2.0"
] | null | null | null |
from lib import action
class NomadForceGcAction(action.NomadBaseAction):
def run(self):
return self.nomad.system.initiate_garbage_collection()
| 22.428571
| 62
| 0.770701
| 18
| 157
| 6.611111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152866
| 157
| 6
| 63
| 26.166667
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
36a00b0d8f514bcc513ed4f42ee6540928569ffe
| 188
|
py
|
Python
|
kAI/neural_network/inter_processor/__init__.py
|
theswampire/matura
|
ab193d6ab4e85a7fbed49bdffddf48aa23467c3c
|
[
"MIT"
] | 1
|
2022-01-29T07:58:00.000Z
|
2022-01-29T07:58:00.000Z
|
kAI/neural_network/inter_processor/__init__.py
|
theswampire/matura
|
ab193d6ab4e85a7fbed49bdffddf48aa23467c3c
|
[
"MIT"
] | null | null | null |
kAI/neural_network/inter_processor/__init__.py
|
theswampire/matura
|
ab193d6ab4e85a7fbed49bdffddf48aa23467c3c
|
[
"MIT"
] | null | null | null |
from .base import BaseInterProcessor
from .visualizer import ImpactRealtimeVizInter, RealtimeVizInter
__all__ = [
"BaseInterProcessor", 'ImpactRealtimeVizInter', 'RealtimeVizInter'
]
| 26.857143
| 70
| 0.81383
| 13
| 188
| 11.461538
| 0.615385
| 0.510067
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111702
| 188
| 6
| 71
| 31.333333
| 0.892216
| 0
| 0
| 0
| 0
| 0
| 0.297872
| 0.117021
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
36bcca87f75999606445cc89faf7f212c2ff3c67
| 1,129
|
py
|
Python
|
tests/test_utils.py
|
rschoon/bert
|
5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
rschoon/bert
|
5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
rschoon/bert
|
5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13
|
[
"MIT"
] | null | null | null |
import unittest
class TestExpectFileMode(unittest.TestCase):
def setUp(self):
from bert.utils import expect_file_mode
self.expect_file_mode = expect_file_mode
def test_nil(self):
self.assertEqual(self.expect_file_mode(None), None)
self.assertEqual(self.expect_file_mode(""), None)
def test_int(self):
self.assertEqual(self.expect_file_mode(0o422), 0o422)
def test_invalid(self):
with self.assertRaises(ValueError):
self.expect_file_mode("o=u")
with self.assertRaises(ValueError):
self.expect_file_mode("x=g")
with self.assertRaises(ValueError):
self.expect_file_mode("abc")
def test_str(self):
self.assertEqual(self.expect_file_mode("o=x"), 0o001)
self.assertEqual(self.expect_file_mode("o=rwx"), 0o007)
self.assertEqual(self.expect_file_mode("o=rw"), 0o006)
self.assertEqual(self.expect_file_mode("u=rwx,g=rx,o=rx"), 0o755)
self.assertEqual(self.expect_file_mode("g=rx,u=rwx,o=rx"), 0o755)
self.assertEqual(self.expect_file_mode("u=r,g=r,o=r"), 0o444)
| 34.212121
| 73
| 0.672276
| 157
| 1,129
| 4.617834
| 0.248408
| 0.206897
| 0.289655
| 0.322759
| 0.666207
| 0.664828
| 0.664828
| 0.311724
| 0.113103
| 0
| 0
| 0.035359
| 0.198406
| 1,129
| 32
| 74
| 35.28125
| 0.765746
| 0
| 0
| 0.125
| 0
| 0
| 0.054965
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.208333
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
36f87d9e3f14d5454824fe2a0247b8e1242d6dc8
| 57
|
py
|
Python
|
src/configs/__init__.py
|
Sum02dean/GCN-STRING
|
a13f60b475cc8a2ae75da2faf8f502a36f866d15
|
[
"MIT"
] | 1
|
2021-12-07T08:31:28.000Z
|
2021-12-07T08:31:28.000Z
|
src/configs/__init__.py
|
Sum02dean/GCN-STRING
|
a13f60b475cc8a2ae75da2faf8f502a36f866d15
|
[
"MIT"
] | 18
|
2022-03-02T12:13:55.000Z
|
2022-03-17T10:58:25.000Z
|
src/configs/__init__.py
|
Sum02dean/GCN-STRING
|
a13f60b475cc8a2ae75da2faf8f502a36f866d15
|
[
"MIT"
] | null | null | null |
from src.utilities.gcn_utills import DcaLab as D
print(D)
| 28.5
| 48
| 0.824561
| 11
| 57
| 4.181818
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 57
| 2
| 49
| 28.5
| 0.901961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
36fd273e31f47673e06b032859cf3fae4c2d46c1
| 1,793
|
py
|
Python
|
tests/test_transaction_validation.py
|
hussain18/PliantChain
|
fac5b998efa8196182b31980b20626a7e98a097f
|
[
"MIT"
] | 2
|
2021-12-28T16:59:28.000Z
|
2022-01-06T12:23:09.000Z
|
tests/test_transaction_validation.py
|
hussain18/PliantChain
|
fac5b998efa8196182b31980b20626a7e98a097f
|
[
"MIT"
] | null | null | null |
tests/test_transaction_validation.py
|
hussain18/PliantChain
|
fac5b998efa8196182b31980b20626a7e98a097f
|
[
"MIT"
] | 3
|
2021-11-11T09:21:10.000Z
|
2021-12-28T05:49:45.000Z
|
import time
import pytest
from brownie import network, config, PLTToken
from scripts.helpful_scripts import LOCAL_BLOCKCHAIN_ENVIRONMENTS, get_account
from scripts.helpful_scripts import get_account
from scripts.chainlink_api_scripts.make_transaction import make_transaction
from web3 import Web3
AMOUNT = Web3.toWei(1, 'ether')
def test_valid_transaction(org_address, working_api_sender, working_api_receiver, working_api_jwtToken):
# arrange
if(config['networks'][network.show_active()] in LOCAL_BLOCKCHAIN_ENVIRONMENTS):
pytest.skip("This test is only designed for testnets")
account = get_account(id=working_api_sender)
my_token = PLTToken[-1]
account_balance = my_token.balanceOf(account.address)
# act
make_transaction(org_address, working_api_sender,
working_api_receiver, working_api_jwtToken, AMOUNT)
time.sleep(30)
transferred_amount = abs(my_token.balanceOf(
account.address) - account_balance)
# assert
assert transferred_amount == AMOUNT
def test_invalid_authority_transfer(org_address, invalid_auth_sender,
invalid_auth_receiver, invalid_auth_jwtToken):
# arrange
if(config['networks'][network.show_active()] in LOCAL_BLOCKCHAIN_ENVIRONMENTS):
pytest.skip("This test is only designed for testnets")
account = get_account(id=invalid_auth_sender)
my_token = PLTToken[-1]
account_balance = my_token.balanceOf(account.address)
# act
make_transaction(org_address, invalid_auth_sender,
invalid_auth_receiver, invalid_auth_jwtToken, AMOUNT)
time.sleep(30)
transferred_amount = abs(my_token.balanceOf(
account.address)-account_balance)
# assert
assert transferred_amount == 0
| 33.830189
| 104
| 0.742331
| 219
| 1,793
| 5.753425
| 0.283105
| 0.055556
| 0.050794
| 0.073016
| 0.775397
| 0.72619
| 0.72619
| 0.72619
| 0.72619
| 0.72619
| 0
| 0.007529
| 0.185165
| 1,793
| 52
| 105
| 34.480769
| 0.854894
| 0.020636
| 0
| 0.424242
| 0
| 0
| 0.056604
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 1
| 0.060606
| false
| 0
| 0.212121
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7fc62ffaa9ebc7aa3307f43b56b48841f89c6f08
| 86
|
py
|
Python
|
src/actuariat_python/plots/__init__.py
|
Pandinosaurus/actuariat_python
|
77533a75fcc63a5a7ebca664a19a24c9439670ee
|
[
"MIT"
] | 5
|
2017-03-13T15:58:40.000Z
|
2021-02-03T12:52:58.000Z
|
src/actuariat_python/plots/__init__.py
|
Pandinosaurus/actuariat_python
|
77533a75fcc63a5a7ebca664a19a24c9439670ee
|
[
"MIT"
] | 13
|
2015-06-14T22:01:37.000Z
|
2021-01-05T13:57:00.000Z
|
src/actuariat_python/plots/__init__.py
|
Pandinosaurus/actuariat_python
|
77533a75fcc63a5a7ebca664a19a24c9439670ee
|
[
"MIT"
] | 9
|
2017-01-15T15:06:55.000Z
|
2022-01-18T20:42:48.000Z
|
"""
@file
@brief Shortcuts to plots
"""
from .pyramid import plot_population_pyramid
| 12.285714
| 44
| 0.755814
| 11
| 86
| 5.727273
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 86
| 6
| 45
| 14.333333
| 0.851351
| 0.360465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7fd47e3aa795acba2b10be78c226d15bea716232
| 249
|
py
|
Python
|
gym_mupen64plus/__init__.py
|
programWhiz/gym-mupen64plus
|
9dedcc2514ac9d70abf46279ce065c4465ef3f37
|
[
"MIT"
] | null | null | null |
gym_mupen64plus/__init__.py
|
programWhiz/gym-mupen64plus
|
9dedcc2514ac9d70abf46279ce065c4465ef3f37
|
[
"MIT"
] | null | null | null |
gym_mupen64plus/__init__.py
|
programWhiz/gym-mupen64plus
|
9dedcc2514ac9d70abf46279ce065c4465ef3f37
|
[
"MIT"
] | null | null | null |
import logging
from gym_mupen64plus.envs.MarioKart64.mario_kart_env import MarioKartEnv
from gym_mupen64plus.envs.Smash.smash_env import SmashEnv
from gym_mupen64plus.envs.Mario64.mario64_env import Mario64_Env
logger = logging.getLogger(__name__)
| 35.571429
| 72
| 0.875502
| 35
| 249
| 5.885714
| 0.485714
| 0.101942
| 0.262136
| 0.320388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.072289
| 249
| 6
| 73
| 41.5
| 0.831169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3d22675fc14d8a888dacc37364dc8f0879394bcc
| 119
|
py
|
Python
|
app/api/__init__.py
|
kay-cottage/Flask_Keras_Xception_API
|
7d54530bc332daf9e956130aa94f06de38f25108
|
[
"MIT"
] | 24
|
2021-03-27T04:08:37.000Z
|
2022-03-30T13:25:48.000Z
|
app/api/__init__.py
|
kay-cottage/Flask_Keras_Xception_API
|
7d54530bc332daf9e956130aa94f06de38f25108
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
kay-cottage/Flask_Keras_Xception_API
|
7d54530bc332daf9e956130aa94f06de38f25108
|
[
"MIT"
] | 7
|
2021-03-16T06:33:23.000Z
|
2022-01-12T00:21:12.000Z
|
from flask import Blueprint
api = Blueprint('api', __name__, url_prefix='/api/v1')
from . import Web_Server_Xception
| 19.833333
| 54
| 0.764706
| 17
| 119
| 4.941176
| 0.705882
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009615
| 0.12605
| 119
| 5
| 55
| 23.8
| 0.798077
| 0
| 0
| 0
| 0
| 0
| 0.084034
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
3d389ca5d53b549e9ca67631193bb68a5823cc9c
| 17,099
|
py
|
Python
|
acoustic/ABCDNN.py
|
sailist/ASRFrame
|
2fd022c3c00af1d5178dee4b367b2269241bc73c
|
[
"Apache-2.0"
] | 223
|
2019-07-13T06:31:18.000Z
|
2022-03-11T08:23:01.000Z
|
acoustic/ABCDNN.py
|
mayite/ASRFrame
|
484cf1ee5beec4c39439de683c5b4c1f1ea3a94a
|
[
"Apache-2.0"
] | 7
|
2019-12-27T08:48:42.000Z
|
2021-09-01T09:45:13.000Z
|
acoustic/ABCDNN.py
|
mayite/ASRFrame
|
484cf1ee5beec4c39439de683c5b4c1f1ea3a94a
|
[
"Apache-2.0"
] | 71
|
2019-07-14T13:14:13.000Z
|
2022-03-18T06:58:54.000Z
|
from keras.models import Model
from util.reader import VoiceDatasetList,VoiceLoader
from util.mapmap import PinyinMapper
from feature.mel_feature import *
import os
from keras.layers import Dense, Dropout, Input, Multiply, Conv2D, MaxPooling2D
from keras.layers import Activation
from core import AcousticModel, CTC_Batch_Cost
import keras.backend as K
class DCNN2D(AcousticModel):
'''普通的2d卷积+maxpool,有一定的效果,但一般'''
def compile(self,feature_shape = (256,128,1),label_max_string_length = 32,ms_output_size = 1242):
'''
建立模型[batch,times,vector,1]
:param feature_shape: 音频特征形状[timestamp,feature_vec_dim]
:param label_max_string_length: 标签最大长度
:param ms_output_size: 输出范围
:return:
'''
ipt = Input(name='audio_input', shape=feature_shape)
# eb1 = self.encoder_block(audio_ipt,128,128,position_embedding=False)
# eb2 = self.encoder_block(eb1,128,128,position_embedding = False)
layer_h1 = Conv2D(32, 3,
use_bias=False,
activation='relu',
padding='same',
kernel_initializer='he_normal')(ipt)
layer_h1 = Dropout(rate=0.05)(layer_h1)
layer_h2 = Conv2D(32, 3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(layer_h1)
layer_h2 = MaxPooling2D()(layer_h2) # 池化层
layer_h3 = Dropout(rate=0.05)(layer_h2)
layer_h4 = Conv2D(64, 3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(layer_h3) # 卷积层
layer_h4 = Dropout(rate=0.1)(layer_h4)
layer_h5 = Conv2D(64, 3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(layer_h4) # 卷积层
layer_h5 = MaxPooling2D()(layer_h5) # 池化层
layer_h6 = Dropout(rate=0.1)(layer_h5)
layer_h7 = Conv2D(128, 3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(layer_h6) # 卷积层
layer_h7 = Dropout(rate=0.15)(layer_h7)
layer_h8 = Conv2D(128, 3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(layer_h7) # 卷积层
layer_h8 = MaxPooling2D()(layer_h8) # 池化层
layer_h9 = Dropout(0.15)(layer_h8)
layer_h10 = Conv2D(128, 3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(layer_h9) # 卷积层
layer_h10 = Dropout(0.2)(layer_h10)
layer_h11 = Conv2D(128, 3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(layer_h10) # 卷积层
layer_h11 = MaxPooling2D(pool_size=1, strides=None, padding="valid")(layer_h11) # 池化层
layer_h12 = Dropout(0.2)(layer_h11)
layer_h13 = Conv2D(128,3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(layer_h12) # 卷积层
layer_h13 = Dropout(0.3)(layer_h13)
layer_h14 = Conv2D(128,3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(layer_h13) # 卷积层
layer_h14 = MaxPooling2D(pool_size=1, strides=None, padding="valid")(layer_h14) # 池化层
layer_h16 = self.reshape1dfrom2d(layer_h14)
layer_h16 = Dropout(0.3)(layer_h16)
layer_h17 = Dense(128, activation="relu",
kernel_initializer='he_normal')(layer_h16) # 全连接层
layer_h17 = Dropout(0.3)(layer_h17)
layer_h18 = Dense(ms_output_size,
kernel_initializer='he_normal')(layer_h17) # 全连接层
y_pred = Activation('softmax', name='Activation0')(layer_h18)
model_data = Model(inputs=ipt, outputs=y_pred)
# model_data.summary()
label_ipt = Input(name='label_inputs', shape=[label_max_string_length], dtype='float32')
audio_length = Input(name='audio_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = CTC_Batch_Cost()([label_ipt,y_pred, audio_length, label_length])
train_model = Model([ipt, label_ipt, audio_length, label_length], [loss_out])
train_model.compile(optimizer="adam", loss={"ctc": lambda y_true, y_pred: y_pred})
base_model = Model(ipt,y_pred)
self.built(train_model,base_model)
@staticmethod
def train(datagenes: list, load_model=None):
w, h = 1600, 200
dataset = VoiceDatasetList()
x_set, y_set = dataset.merge_load(datagenes)
pymap = PinyinMapper(sil_mode=-1)
vloader = VoiceLoader(x_set, y_set,
batch_size=16,
n_mels=h, feature_pad_len=w, feature_dim=3,
pymap=pymap,
melf=MelFeature5(),
divide_feature_len=8, )
model_helper = DCNN2D(pymap)
model_helper.compile(feature_shape=(w, h, 1),
ms_output_size=pymap.max_index + 1) # ctcloss 计算要求: index < num_class-1
if load_model is not None:
load_model = os.path.abspath(load_model)
model_helper.load(load_model)
model_helper.fit(vloader, epoch=-1, use_ctc=True)
class DCBNN2D(AcousticModel):
'''from https://github.com/audier/DeepSpeechRecognition
2d卷积+maxpool+batchnorm,但效果不如1d的好,参数量也比1d的大
'''
def compile(self,feature_shape = (None,200,1),label_max_string_length = 32,ms_output_size = 1423):
audio_ipt = Input(name="audio_input",shape=feature_shape)
layer_h1 = self.cnn2d_cell(32, audio_ipt)
layer_h2 = self.cnn2d_cell(64, layer_h1)
layer_h3 = self.cnn2d_cell(128, layer_h2)
layer_h4 = self.cnn2d_cell(128, layer_h3, pool=False)
layer_h5 = self.cnn2d_cell(128, layer_h4, pool=False)
layer_h6 = self.reshape1dfrom2d(layer_h5)
layer_h6 = Dropout(0.2)(layer_h6)
layer_h7 = Dense(256,activation="relu",kernel_initializer="he_normal")(layer_h6)
layer_h7 = Dropout(0.2)(layer_h7)
y_pred = Dense(ms_output_size, activation='softmax')(layer_h7)
y_true = Input(name='label_inputs', shape=[label_max_string_length], dtype='float32')
audio_length = Input(name='audio_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = CTC_Batch_Cost()([y_true, y_pred, audio_length, label_length])
train_model = Model([audio_ipt, y_true, audio_length, label_length], [loss_out])
train_model.compile(optimizer="adam", loss={"ctc": lambda y_true, y_pred: y_pred})
base_model = Model(audio_ipt,y_pred)
self.built(train_model,base_model)
@staticmethod
def train(datagenes:list, load_model = None):
w,h = 1600,200
dataset = VoiceDatasetList()
x_set, y_set = dataset.merge_load(datagenes)
pymap = PinyinMapper(sil_mode=-1)
vloader = VoiceLoader(x_set,y_set,batch_size=16,n_mels=h,feature_pad_len=w,feature_dim=3,cut_sub=32)
model_helper = DCBNN2D(pymap)
model_helper.compile(feature_shape=(w,h,1),label_max_string_length=32,ms_output_size=1423)
if load_model is not None:
load_model = os.path.abspath(load_model)
model_helper.load(load_model)
model_helper.fit(vloader)
class DCBNN1D(AcousticModel):
'''当前(2019年7月1日)效果最好的一个模型,1d卷积+maxpool+batchnorm'''
def compile(self,feature_shape = (1024,200),label_max_string_length = 32,ms_output_size = 1423):
audio_ipt = Input(name="audio_input", shape=feature_shape)
layer_h1 = self.cnn1d_cell(32, audio_ipt,pool=True,reshape=False)
layer_h2 = self.cnn1d_cell(32, layer_h1,pool=True,reshape=False)
layer_h3 = self.cnn1d_cell(64, layer_h2,pool=True,reshape=False)
layer_h4 = self.cnn1d_cell(64, layer_h3, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h4, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False) # TODO 考虑多叠加几层
# 64print(layer_h5)
layer_h6 = Dropout(0.2)(layer_h5) # KL,双Dense
layer_h7 = Dense(256, activation="relu", kernel_initializer="he_normal")(layer_h6) # TODO 考虑在这里加Attention
layer_h7 = Dropout(0.2)(layer_h7)
layer_h8 = Dense(ms_output_size)(layer_h7)
y_pred = Activation(activation="softmax")(layer_h8)
y_true = Input(name='label_inputs', shape=[label_max_string_length], dtype='float32')
audio_length = Input(name='audio_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = CTC_Batch_Cost()([y_true, y_pred, audio_length, label_length])
train_model = Model([audio_ipt, y_true, audio_length, label_length], [loss_out])
train_model.compile(optimizer="adam", loss={"ctc": lambda y_true, y_pred: y_pred})
base_model = Model(audio_ipt, y_pred)
self.built(train_model,base_model)
@staticmethod
def train(datagenes: list, load_model=None,**kwargs):
w, h = 1600, 200
max_label_len = 64
dataset = VoiceDatasetList()
x_set, y_set = dataset.merge_load(datagenes)
pymap = PinyinMapper(sil_mode=-1)
vloader = VoiceLoader(x_set, y_set,
batch_size=16,
feature_pad_len=w,
n_mels=h,
max_label_len=max_label_len,
pymap=pymap,
melf=MelFeature5(),
divide_feature_len=8,
all_train=False,
)
model_helper = DCBNN1D(pymap)
model_helper.compile(feature_shape=(w, h), label_max_string_length=max_label_len,
ms_output_size=pymap.max_index + 1)
if load_model is not None:
load_model = os.path.abspath(load_model)
model_helper.load(load_model)
epoch = kwargs.get("epoch",-1)
save_step = kwargs.get("save_step",1000)
model_helper.fit(vloader, epoch=epoch, save_step=save_step, use_ctc=True)
class DCBNN1Dplus(AcousticModel):
def compile(self,feature_shape = (1024,200),label_max_string_length = 32,ms_output_size = 1423):
audio_ipt = Input(name="audio_input", shape=feature_shape)
layer_h1 = self.cnn1d_cell(32, audio_ipt,pool=True,reshape=False)
layer_h2 = self.cnn1d_cell(32, layer_h1,pool=True,reshape=False)
layer_h3 = self.cnn1d_cell(64, layer_h2,pool=True,reshape=False)
layer_h4 = self.cnn1d_cell(64, layer_h3, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h4, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False) # TODO 考虑多叠加几层
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False)
# 64print(layer_h5)
layer_h6 = Dropout(0.2)(layer_h5) # KL,双Dense
layer_h7 = Dense(256, activation="relu", kernel_initializer="he_normal")(layer_h6) # TODO 考虑在这里加Attention
layer_h7 = Dropout(0.2)(layer_h7)
layer_h8 = Dense(ms_output_size)(layer_h7)
y_pred = Activation(activation="softmax")(layer_h8)
y_true = Input(name='label_inputs', shape=[label_max_string_length], dtype='float32')
audio_length = Input(name='audio_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = CTC_Batch_Cost()([y_true, y_pred, audio_length, label_length])
train_model = Model([audio_ipt, y_true, audio_length, label_length], [loss_out])
train_model.compile(optimizer="adam", loss={"ctc": lambda y_true, y_pred: y_pred})
base_model = Model(audio_ipt, y_pred)
self.built(train_model,base_model)
@staticmethod
def train(datagenes:list, load_model = None):
w, h = 1600, 200
max_label_len = 64
batch_size = 16
dataset = VoiceDatasetList()
x_set, y_set = dataset.merge_load(datagenes)
pymap = PinyinMapper(sil_mode=-1)
vloader = VoiceLoader(x_set, y_set,
batch_size= batch_size,
feature_pad_len = w,
n_mels=h,
max_label_len=max_label_len,
pymap=pymap,
melf=MelFeature5(),
all_train=True,
divide_feature_len=8,)
model_helper = DCBNN1Dplus(pymap)
model_helper.compile(feature_shape=(w, h), label_max_string_length=max_label_len, ms_output_size=pymap.max_index+1)
if load_model is not None:
load_model = os.path.abspath(load_model)
model_helper.load(load_model)
# model_helper.fit(vloader,epoch=-1, save_step=len(x_set)//batch_size, use_ctc=True)
model_helper.fit(vloader,epoch=-1, save_step=len(x_set)//batch_size//30, use_ctc=True)
class DCBANN1D(AcousticModel):
def compile(self,feature_shape = (1024,200),label_max_string_length = 32,ms_output_size = 1423):
audio_ipt = Input(name="audio_input", shape=feature_shape)
layer_h1 = self.cnn1d_cell(32, audio_ipt,pool=True,reshape=False)
layer_h2 = self.cnn1d_cell(32, layer_h1,pool=True,reshape=False)
layer_h3 = self.cnn1d_cell(64, layer_h2,pool=True,reshape=False)
layer_h4 = self.cnn1d_cell(64, layer_h3, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h4, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False)
layer_h5 = self.cnn1d_cell(128, layer_h5, pool=False) # TODO 考虑多叠加几层
# 64print(layer_h5)
layer_h6 = Dropout(0.2)(layer_h5) # KL,双Dense
layer_h7 = Dense(512, activation="relu", kernel_initializer="he_normal")(layer_h6) # TODO 考虑在这里加Attention
layer_h7 = Dropout(0.2)(layer_h7)
attention_prob = Dense(units=512, activation='softmax', name='attention_vec')(layer_h7)
attention_mul = Multiply()([layer_h7, attention_prob])
layer_h8 = Dense(ms_output_size)(attention_mul)
y_pred = Activation(activation="softmax")(layer_h8)
y_true = Input(name='label_inputs', shape=[label_max_string_length], dtype='float32')
audio_length = Input(name='audio_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = CTC_Batch_Cost()([y_true, y_pred, audio_length, label_length])
train_model = Model([audio_ipt, y_true, audio_length, label_length], [loss_out])
train_model.compile(optimizer="adam", loss={"ctc": lambda y_true, y_pred: y_pred})
base_model = Model(audio_ipt, y_pred)
self.built(train_model,base_model)
@staticmethod
def train(datagenes: list, load_model=None):
w, h = 1600, 200
max_label_len = 64
dataset = VoiceDatasetList()
x_set, y_set = dataset.merge_load(datagenes)
pymap = PinyinMapper(sil_mode=-1)
vloader = VoiceLoader(x_set, y_set,
batch_size=16,
feature_pad_len=w,
n_mels=h,
max_label_len=max_label_len,
pymap=pymap,
melf=MelFeature5(),
divide_feature_len=8,
)
model_helper = DCBANN1D(pymap)
model_helper.compile(feature_shape=(w, h), label_max_string_length=max_label_len,
ms_output_size=pymap.max_index + 1)
if load_model is not None:
load_model = os.path.abspath(load_model)
model_helper.load(load_model)
model_helper.fit(vloader, epoch=-1, save_step=1000, use_ctc=True)
| 43.508906
| 123
| 0.610211
| 2,186
| 17,099
| 4.475297
| 0.103843
| 0.029337
| 0.037207
| 0.040887
| 0.791577
| 0.761014
| 0.755494
| 0.740366
| 0.728406
| 0.702852
| 0
| 0.054254
| 0.281011
| 17,099
| 392
| 124
| 43.619898
| 0.7415
| 0.056085
| 0
| 0.648084
| 0
| 0
| 0.04299
| 0
| 0
| 0
| 0
| 0.002551
| 0
| 1
| 0.034843
| false
| 0
| 0.031359
| 0
| 0.083624
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3d5ed0ad2eb57022142c79ca608893aa7f5b6e53
| 122
|
py
|
Python
|
motor_lib/__init__.py
|
jielness/motor_lib
|
9e152c2d9f84b85671c282274cd16dbad0312b44
|
[
"MIT"
] | null | null | null |
motor_lib/__init__.py
|
jielness/motor_lib
|
9e152c2d9f84b85671c282274cd16dbad0312b44
|
[
"MIT"
] | null | null | null |
motor_lib/__init__.py
|
jielness/motor_lib
|
9e152c2d9f84b85671c282274cd16dbad0312b44
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Time : 2021/7/2 4:12 PM
# Author : Shaojie Liu
# File : __init__.py.py
from . import motor_lib
| 17.428571
| 25
| 0.622951
| 21
| 122
| 3.380952
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103093
| 0.204918
| 122
| 6
| 26
| 20.333333
| 0.628866
| 0.721311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e9fa0adaa66b1f0ae9d3a67a54c72f9ad20ae557
| 70
|
py
|
Python
|
doc/integrations/siddhi-celery/_celery/s3_events/tasks.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 552
|
2020-09-24T18:16:09.000Z
|
2022-03-25T06:21:55.000Z
|
doc/integrations/siddhi-celery/_celery/s3_events/tasks.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 722
|
2020-09-24T19:48:44.000Z
|
2022-03-31T17:42:41.000Z
|
doc/integrations/siddhi-celery/_celery/s3_events/tasks.py
|
KOSASIH/cortx
|
f247ed319e3139aa0593ecd5aec804c2c1a342c4
|
[
"Apache-2.0"
] | 442
|
2020-09-24T14:24:21.000Z
|
2022-03-25T10:40:16.000Z
|
import app
@app.app.task
def event_test(x, y):
return x * y + 15
| 11.666667
| 21
| 0.628571
| 14
| 70
| 3.071429
| 0.714286
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037736
| 0.242857
| 70
| 5
| 22
| 14
| 0.773585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
e9fbacf3b55cce7e28a8d0ba88683ba9e9c3251e
| 50
|
py
|
Python
|
ois_api_client/v3_0/__init__.py
|
peterkulik/ois_api_client
|
51dabcc9f920f89982c4419bb058f5a88193cee0
|
[
"MIT"
] | 7
|
2020-10-22T08:15:29.000Z
|
2022-01-27T07:59:39.000Z
|
ois_api_client/v3_0/__init__.py
|
peterkulik/ois_api_client
|
51dabcc9f920f89982c4419bb058f5a88193cee0
|
[
"MIT"
] | null | null | null |
ois_api_client/v3_0/__init__.py
|
peterkulik/ois_api_client
|
51dabcc9f920f89982c4419bb058f5a88193cee0
|
[
"MIT"
] | null | null | null |
from .dto import *
from .deserialization import *
| 16.666667
| 30
| 0.76
| 6
| 50
| 6.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 50
| 2
| 31
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1804668058bb613f6e365305682a614d3daae4f2
| 217
|
py
|
Python
|
xynodeeditor/node_serializable.py
|
XiaYoung/XYNodeEditor
|
0846733874cae499ca4a3851a2a9e52e9592b0d0
|
[
"MIT"
] | null | null | null |
xynodeeditor/node_serializable.py
|
XiaYoung/XYNodeEditor
|
0846733874cae499ca4a3851a2a9e52e9592b0d0
|
[
"MIT"
] | null | null | null |
xynodeeditor/node_serializable.py
|
XiaYoung/XYNodeEditor
|
0846733874cae499ca4a3851a2a9e52e9592b0d0
|
[
"MIT"
] | null | null | null |
class Serializable():
def __init__(self):
self.id = id(self)
def serialize(self):
raise NotImplementedError()
def deserialize(self, data, hashmap={}):
raise NotImplementedError()
| 21.7
| 44
| 0.631336
| 21
| 217
| 6.333333
| 0.571429
| 0.360902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.253456
| 217
| 9
| 45
| 24.111111
| 0.820988
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
43e801db290ad10dd2168966ec496ab3790ed05a
| 94
|
py
|
Python
|
api/nivo_api/namespaces/flowcapt/namespace.py
|
RemiDesgrange/nivo
|
e13dcd7c00d1fbc41c23d51c9004901d7704b498
|
[
"MIT"
] | 2
|
2019-05-07T20:23:59.000Z
|
2020-04-26T11:18:38.000Z
|
api/nivo_api/namespaces/flowcapt/namespace.py
|
RemiDesgrange/nivo
|
e13dcd7c00d1fbc41c23d51c9004901d7704b498
|
[
"MIT"
] | 89
|
2019-08-06T12:47:50.000Z
|
2022-03-28T04:03:25.000Z
|
api/nivo_api/namespaces/flowcapt/namespace.py
|
RemiDesgrange/nivo
|
e13dcd7c00d1fbc41c23d51c9004901d7704b498
|
[
"MIT"
] | 1
|
2020-06-23T10:07:38.000Z
|
2020-06-23T10:07:38.000Z
|
from flask_restx import Namespace
flowcapt_api = Namespace("flowcapt-api", path="/flowcapt")
| 23.5
| 58
| 0.787234
| 12
| 94
| 6
| 0.666667
| 0.472222
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095745
| 94
| 3
| 59
| 31.333333
| 0.847059
| 0
| 0
| 0
| 0
| 0
| 0.223404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a1274546a62d540edb1c037cae99fdbb40fb3b6a
| 177
|
py
|
Python
|
src/diary_bot/const.py
|
chanjunweimy/diary-bot
|
13ba7731663d787333ed43ded946d3b8118d82b1
|
[
"MIT"
] | null | null | null |
src/diary_bot/const.py
|
chanjunweimy/diary-bot
|
13ba7731663d787333ed43ded946d3b8118d82b1
|
[
"MIT"
] | null | null | null |
src/diary_bot/const.py
|
chanjunweimy/diary-bot
|
13ba7731663d787333ed43ded946d3b8118d82b1
|
[
"MIT"
] | 1
|
2018-11-04T04:33:08.000Z
|
2018-11-04T04:33:08.000Z
|
import os
def TOKEN():
return os.environ['TOKEN_TELEGRAM']
def INTENT_START():
return [
"Hi, I'm Diary Bot, your personal diary. Nice to meet you. :)"
]
| 14.75
| 70
| 0.610169
| 25
| 177
| 4.24
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265537
| 177
| 11
| 71
| 16.090909
| 0.815385
| 0
| 0
| 0
| 0
| 0
| 0.418079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| true
| 0
| 0.142857
| 0.285714
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a13c6c720961ef3d031a968ab86868a1cb360504
| 83
|
py
|
Python
|
vivid/backends/__init__.py
|
upura/vivid
|
6139697d60656d4774aceae880f5a07d929124a8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
vivid/backends/__init__.py
|
upura/vivid
|
6139697d60656d4774aceae880f5a07d929124a8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
vivid/backends/__init__.py
|
upura/vivid
|
6139697d60656d4774aceae880f5a07d929124a8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from .dataframes import DataFrameBackend
from .experiments import ExperimentBackend
| 41.5
| 42
| 0.891566
| 8
| 83
| 9.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084337
| 83
| 2
| 42
| 41.5
| 0.973684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a1a03fa7253414de528af60db2afd7bd8c3d47e8
| 226
|
py
|
Python
|
app/schemas.py
|
navroz-lamba/human-rights-first-asylum-ds-a
|
48255de3c15671f1b4859e2bcf7379805f2a3fc8
|
[
"MIT"
] | null | null | null |
app/schemas.py
|
navroz-lamba/human-rights-first-asylum-ds-a
|
48255de3c15671f1b4859e2bcf7379805f2a3fc8
|
[
"MIT"
] | null | null | null |
app/schemas.py
|
navroz-lamba/human-rights-first-asylum-ds-a
|
48255de3c15671f1b4859e2bcf7379805f2a3fc8
|
[
"MIT"
] | null | null | null |
"""To avoid confusion between the SQLAlchemy models and the Pydantic models,
we will have the file models.py with the SQLAlchemy models,
and the file schemas.py with the Pydantic models"""
# from pydantic import BaseModel
| 32.285714
| 77
| 0.783186
| 35
| 226
| 5.057143
| 0.542857
| 0.146893
| 0.214689
| 0.248588
| 0.282486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168142
| 226
| 6
| 78
| 37.666667
| 0.941489
| 0.955752
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a1a949d29c1bf11326aff806282d4994859200d8
| 153
|
py
|
Python
|
boa3_test/test_sc/python_operation_test/TypedTupleIn.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/test_sc/python_operation_test/TypedTupleIn.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/test_sc/python_operation_test/TypedTupleIn.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
from typing import Tuple
from boa3.builtin import public
@public
def main(value: int, some_tuple: Tuple[int]) -> bool:
return value in some_tuple
| 17
| 53
| 0.745098
| 24
| 153
| 4.666667
| 0.625
| 0.160714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007937
| 0.176471
| 153
| 8
| 54
| 19.125
| 0.880952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
a1bb6bdb4534760fa82959a44cbc569f863ea7e1
| 539
|
py
|
Python
|
Kmeans_HTVIS_algorithm.py
|
vildursn/Master-thesis
|
d43be72f597f04572e80b3b5441ef508756dbb37
|
[
"MIT"
] | null | null | null |
Kmeans_HTVIS_algorithm.py
|
vildursn/Master-thesis
|
d43be72f597f04572e80b3b5441ef508756dbb37
|
[
"MIT"
] | null | null | null |
Kmeans_HTVIS_algorithm.py
|
vildursn/Master-thesis
|
d43be72f597f04572e80b3b5441ef508756dbb37
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun May 19 11:27:35 2019
@author: vildeg
"""
import numpy as np
import cv2
from functions import draw_hough_lines
from functions import voting_scheme
from functions import three_line_RANSAC
from functions import find_cart_line_eq
from functions import perpendicular_polar_line
from functions import lines_approx_parallel
from functions import show_image
from skimage.measure import ransac, LineModelND
from sklearn.cluster import DBSCAN
from functions import lines_approx_parallel
| 28.368421
| 48
| 0.80705
| 78
| 539
| 5.384615
| 0.564103
| 0.247619
| 0.361905
| 0.114286
| 0.180952
| 0.180952
| 0
| 0
| 0
| 0
| 0
| 0.030635
| 0.152134
| 539
| 19
| 49
| 28.368421
| 0.888403
| 0.139147
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
62f9e0cd2165187d330bdb9d1f77fc07f08139d7
| 23
|
py
|
Python
|
heima/register.py
|
teashell/test
|
2855ec7f757660deb6a309d2d6672aa5d8fbf09a
|
[
"MIT"
] | null | null | null |
heima/register.py
|
teashell/test
|
2855ec7f757660deb6a309d2d6672aa5d8fbf09a
|
[
"MIT"
] | null | null | null |
heima/register.py
|
teashell/test
|
2855ec7f757660deb6a309d2d6672aa5d8fbf09a
|
[
"MIT"
] | null | null | null |
print('版本1.0终于完成了!!!')
| 11.5
| 22
| 0.608696
| 3
| 23
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.043478
| 23
| 1
| 23
| 23
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1a0ebd0e27a81e9bcd58b7d364802480de36d37b
| 53
|
py
|
Python
|
app/celery.py
|
erick-rivas/django-reference
|
3195de635419a0c2ac8eee92742bb98365f614d8
|
[
"MIT"
] | null | null | null |
app/celery.py
|
erick-rivas/django-reference
|
3195de635419a0c2ac8eee92742bb98365f614d8
|
[
"MIT"
] | 11
|
2020-02-11T23:57:45.000Z
|
2022-02-17T07:03:39.000Z
|
app/celery.py
|
erick-rivas/django-reference
|
3195de635419a0c2ac8eee92742bb98365f614d8
|
[
"MIT"
] | null | null | null |
# pylint: disable=W0401
from seed.app.celery import *
| 26.5
| 29
| 0.773585
| 8
| 53
| 5.125
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 0.113208
| 53
| 2
| 29
| 26.5
| 0.787234
| 0.396226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a7e2879e18cadbff0c1030e0e3ea7e3f796b5ee4
| 246
|
py
|
Python
|
api/medicine_api/readers/exceptions.py
|
maxwelld90/medicine-for-ukraine
|
f10152929a3827b9de055d993103b352959f3b33
|
[
"MIT"
] | 2
|
2022-03-21T14:00:27.000Z
|
2022-03-21T17:17:51.000Z
|
api/medicine_api/readers/exceptions.py
|
maxwelld90/medicine-for-ukraine
|
f10152929a3827b9de055d993103b352959f3b33
|
[
"MIT"
] | 19
|
2022-03-21T14:14:26.000Z
|
2022-03-31T07:49:40.000Z
|
api/medicine_api/readers/exceptions.py
|
maxwelld90/medicine-for-ukraine
|
f10152929a3827b9de055d993103b352959f3b33
|
[
"MIT"
] | 4
|
2022-03-02T13:56:30.000Z
|
2022-03-22T10:49:00.000Z
|
class CacheConnectionError(Exception):
pass
class GoogleConnectionError(Exception):
pass
class UnknownDocumentError(Exception):
pass
class UnknownSheetError(Exception):
pass
class UnknownDataFrameError(Exception):
pass
| 13.666667
| 39
| 0.768293
| 20
| 246
| 9.45
| 0.4
| 0.343915
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 246
| 18
| 40
| 13.666667
| 0.926471
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
c5037d286080ff58585a528163db941f878e1ef4
| 22,281
|
py
|
Python
|
pysnmp/Novell-License-Server-Trap-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/Novell-License-Server-Trap-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/Novell-License-Server-Trap-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module Novell-License-Server-Trap-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Novell-License-Server-Trap-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:22:39 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint")
InternationalDisplayString, = mibBuilder.importSymbols("HOST-RESOURCES-MIB", "InternationalDisplayString")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, Counter64, NotificationType, NotificationType, ObjectIdentity, Gauge32, Integer32, Unsigned32, IpAddress, Bits, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, enterprises, MibIdentifier, TimeTicks, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Counter64", "NotificationType", "NotificationType", "ObjectIdentity", "Gauge32", "Integer32", "Unsigned32", "IpAddress", "Bits", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "enterprises", "MibIdentifier", "TimeTicks", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
novell = MibIdentifier((1, 3, 6, 1, 4, 1, 23))
mibDoc = MibIdentifier((1, 3, 6, 1, 4, 1, 23, 2))
nlstrap_mib = MibIdentifier((1, 3, 6, 1, 4, 1, 23, 2, 73)).setLabel("nlstrap-mib")
trapinfo = MibIdentifier((1, 3, 6, 1, 4, 1, 23, 2, 73, 1))
serverName = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 73, 1, 1), InternationalDisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 48)))
if mibBuilder.loadTexts: serverName.setStatus('mandatory')
trapTime = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 73, 1, 2), Integer32())
if mibBuilder.loadTexts: trapTime.setStatus('mandatory')
appName = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 73, 1, 3), InternationalDisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 256)))
if mibBuilder.loadTexts: appName.setStatus('mandatory')
userName = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 73, 1, 4), InternationalDisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 256)))
if mibBuilder.loadTexts: userName.setStatus('mandatory')
networkAddress = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 73, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4))
if mibBuilder.loadTexts: networkAddress.setStatus('mandatory')
macAddress = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 73, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(6, 6)).setFixedLength(6))
if mibBuilder.loadTexts: macAddress.setStatus('mandatory')
lsRequestSuccess = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,1)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRequestSystemError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,2)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRequestResourceError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,3)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRequestAuthError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,4)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRequestBadArg = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,5)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRequestInsuffUnits = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,6)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRequestLicUnavail = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,7)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRequestNetUnavail = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,8)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRelSuccess = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,9)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRelSystemError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,10)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRelResouceError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,11)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRelAuthError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,12)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRelBadArg = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,13)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsRelBadHandle = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,14)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsUpdateSuccess = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,15)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsUpdateSystemError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,16)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsUpdateResouceError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,17)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsUpdateAuthError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,18)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsUpdateBadArg = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,19)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsUpdateBadHandle = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,20)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsUpdateInsuffUnits = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,21)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsUpdateLicUnavail = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,22)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsUpdateLicTerm = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,23)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
lsUpdateLicExpired = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,24)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"), ("Novell-License-Server-Trap-MIB", "userName"), ("Novell-License-Server-Trap-MIB", "networkAddress"), ("Novell-License-Server-Trap-MIB", "macAddress"))
addAssignSuccess = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,25)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
addAssignSystemError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,26)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
addAssignResourceError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,27)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
addAssignAuthError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,28)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
addAssignBadArg = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,29)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
removeAssignSuccess = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,30)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
removeAssignSystemError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,31)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
removeAssignResourceError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,32)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
removeAssignAuthError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,33)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
removeAssignBadArg = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,34)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
installCertifSuccess = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,35)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
installCertifSystemError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,36)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
installCertifResourceError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,37)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
installCertifAuthError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,38)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
installCertifBadArg = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,39)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
deleteCertifSuccess = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,40)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
deleteCertifSystemError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,41)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
deleteCertifResourceError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,42)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
deleteCertifAuthError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,43)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
deleteCertifBadArg = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,44)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
transOwnerSuccess = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,45)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
transOwnerSystemError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,46)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
transOwnerResourceError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,47)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
transOwnerAuthError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,48)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
transOwnerBadArg = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,49)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
addProdInfoSuccess = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,50)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
addProdInfoSystemError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,51)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
addProdInfoResourceError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,52)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
addProdInfoAuthError = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,53)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
addProdInfoBadArg = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,54)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"), ("Novell-License-Server-Trap-MIB", "appName"))
nlsTrapLoaded = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,98)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"))
nlsTrapUnloaded = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 73) + (0,99)).setObjects(("Novell-License-Server-Trap-MIB", "serverName"), ("Novell-License-Server-Trap-MIB", "trapTime"))
mibBuilder.exportSymbols("Novell-License-Server-Trap-MIB", installCertifSuccess=installCertifSuccess, addProdInfoResourceError=addProdInfoResourceError, lsUpdateSuccess=lsUpdateSuccess, lsUpdateSystemError=lsUpdateSystemError, userName=userName, lsUpdateBadArg=lsUpdateBadArg, removeAssignSuccess=removeAssignSuccess, transOwnerResourceError=transOwnerResourceError, nlsTrapLoaded=nlsTrapLoaded, addProdInfoBadArg=addProdInfoBadArg, lsRelResouceError=lsRelResouceError, installCertifBadArg=installCertifBadArg, appName=appName, installCertifAuthError=installCertifAuthError, nlsTrapUnloaded=nlsTrapUnloaded, lsRequestAuthError=lsRequestAuthError, removeAssignBadArg=removeAssignBadArg, addProdInfoSuccess=addProdInfoSuccess, lsUpdateBadHandle=lsUpdateBadHandle, installCertifSystemError=installCertifSystemError, lsRequestResourceError=lsRequestResourceError, removeAssignResourceError=removeAssignResourceError, deleteCertifBadArg=deleteCertifBadArg, addAssignAuthError=addAssignAuthError, nlstrap_mib=nlstrap_mib, deleteCertifSuccess=deleteCertifSuccess, lsRequestNetUnavail=lsRequestNetUnavail, deleteCertifAuthError=deleteCertifAuthError, addAssignSuccess=addAssignSuccess, addProdInfoAuthError=addProdInfoAuthError, lsRelSystemError=lsRelSystemError, lsRequestInsuffUnits=lsRequestInsuffUnits, lsUpdateLicTerm=lsUpdateLicTerm, lsRequestBadArg=lsRequestBadArg, addAssignResourceError=addAssignResourceError, installCertifResourceError=installCertifResourceError, lsRequestSuccess=lsRequestSuccess, deleteCertifResourceError=deleteCertifResourceError, mibDoc=mibDoc, addProdInfoSystemError=addProdInfoSystemError, removeAssignSystemError=removeAssignSystemError, lsRelBadHandle=lsRelBadHandle, lsUpdateLicUnavail=lsUpdateLicUnavail, networkAddress=networkAddress, trapinfo=trapinfo, macAddress=macAddress, deleteCertifSystemError=deleteCertifSystemError, lsUpdateInsuffUnits=lsUpdateInsuffUnits, removeAssignAuthError=removeAssignAuthError, lsUpdateAuthError=lsUpdateAuthError, serverName=serverName, lsRequestLicUnavail=lsRequestLicUnavail, lsRelAuthError=lsRelAuthError, lsUpdateResouceError=lsUpdateResouceError, transOwnerSystemError=transOwnerSystemError, transOwnerBadArg=transOwnerBadArg, transOwnerSuccess=transOwnerSuccess, lsRelSuccess=lsRelSuccess, transOwnerAuthError=transOwnerAuthError, addAssignBadArg=addAssignBadArg, lsUpdateLicExpired=lsUpdateLicExpired, lsRequestSystemError=lsRequestSystemError, novell=novell, trapTime=trapTime, lsRelBadArg=lsRelBadArg, addAssignSystemError=addAssignSystemError)
| 253.193182
| 2,520
| 0.72968
| 2,711
| 22,281
| 5.995942
| 0.078569
| 0.192741
| 0.281698
| 0.341003
| 0.713257
| 0.692341
| 0.692341
| 0.692341
| 0.691049
| 0.678868
| 0
| 0.047179
| 0.066783
| 22,281
| 87
| 2,521
| 256.103448
| 0.734574
| 0.015888
| 0
| 0
| 0
| 0
| 0.451684
| 0.330322
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0875
| 0
| 0.0875
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c53143c96ad6e5bb1a8d139a0e00799f9470eee7
| 20
|
py
|
Python
|
remotepdb_client/__init__.py
|
MartyMacGyver/remote-pdb-client
|
71be4f05694fd2ec7ac8d86dcb6642ab15bd5280
|
[
"MIT"
] | 1
|
2020-07-23T22:19:07.000Z
|
2020-07-23T22:19:07.000Z
|
remotepdb_client/__init__.py
|
MartyMacGyver/remote-pdb-client
|
71be4f05694fd2ec7ac8d86dcb6642ab15bd5280
|
[
"MIT"
] | 3
|
2019-11-05T23:31:03.000Z
|
2020-05-17T03:03:11.000Z
|
remotepdb_client/__init__.py
|
MartyMacGyver/remote-pdb-client
|
71be4f05694fd2ec7ac8d86dcb6642ab15bd5280
|
[
"MIT"
] | null | null | null |
# Init # noqa H104
| 10
| 19
| 0.6
| 3
| 20
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 0.3
| 20
| 1
| 20
| 20
| 0.642857
| 0.8
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c56dccbd3822d0725ed4774d799104d05e2350ce
| 177
|
py
|
Python
|
hc/providers/customoauth2/urls.py
|
pjaudiomv/healthchecks-oauth-login
|
bbf5f7ccd34a25516959c6980e137ade93266e9b
|
[
"BSD-3-Clause"
] | null | null | null |
hc/providers/customoauth2/urls.py
|
pjaudiomv/healthchecks-oauth-login
|
bbf5f7ccd34a25516959c6980e137ade93266e9b
|
[
"BSD-3-Clause"
] | 7
|
2020-06-05T23:16:36.000Z
|
2022-02-10T08:33:36.000Z
|
hc/providers/customoauth2/urls.py
|
pjaudiomv/healthchecks-oauth-login
|
bbf5f7ccd34a25516959c6980e137ade93266e9b
|
[
"BSD-3-Clause"
] | null | null | null |
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import CustomOAuth2Provider
urlpatterns = default_urlpatterns(CustomOAuth2Provider)
| 29.5
| 75
| 0.881356
| 17
| 177
| 9.058824
| 0.647059
| 0.233766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018293
| 0.073446
| 177
| 5
| 76
| 35.4
| 0.920732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3dbd527ccd6210bdf9082f94460e5e38efe2d102
| 96
|
py
|
Python
|
ski_conditions/apps/app_scraping/admin.py
|
JKProjects-Org/ski-conditions
|
e7f9350bb3c290853f49f65e30d495ee0aa3b737
|
[
"MIT"
] | 2
|
2019-11-03T16:37:33.000Z
|
2020-01-08T19:05:20.000Z
|
ski_conditions/apps/app_scraping/admin.py
|
JKProjects-Org/ski-conditions
|
e7f9350bb3c290853f49f65e30d495ee0aa3b737
|
[
"MIT"
] | 8
|
2019-11-04T02:49:30.000Z
|
2022-02-10T12:22:15.000Z
|
ski_conditions/apps/app_scraping/admin.py
|
JKProjects-Org/ski-conditions
|
e7f9350bb3c290853f49f65e30d495ee0aa3b737
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import SkiResort
admin.site.register(SkiResort)
| 16
| 32
| 0.822917
| 13
| 96
| 6.076923
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114583
| 96
| 5
| 33
| 19.2
| 0.929412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3dfc471c8f87842d940abc20e4bc063e9106ba2c
| 46
|
py
|
Python
|
slimmermeten/__init__.py
|
puittenbroek/slimmermeten
|
95769d18f764be0dbc905465b8e420b070dfe531
|
[
"BSD-3-Clause"
] | 1
|
2019-03-28T16:03:23.000Z
|
2019-03-28T16:03:23.000Z
|
slimmermeten/__init__.py
|
puittenbroek/slimmermeten
|
95769d18f764be0dbc905465b8e420b070dfe531
|
[
"BSD-3-Clause"
] | null | null | null |
slimmermeten/__init__.py
|
puittenbroek/slimmermeten
|
95769d18f764be0dbc905465b8e420b070dfe531
|
[
"BSD-3-Clause"
] | null | null | null |
import models
import views
import templatetags
| 15.333333
| 19
| 0.891304
| 6
| 46
| 6.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 46
| 3
| 19
| 15.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9a7ac6b6f53c236da3fe613408068df8d65e568c
| 143
|
py
|
Python
|
example_snippets/multimenus_snippets/Snippets/SciPy/Special functions/Legendre Functions/sph_harm Spherical harmonic of degree $n geq 0$ and order $m leq n$.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/SciPy/Special functions/Legendre Functions/sph_harm Spherical harmonic of degree $n geq 0$ and order $m leq n$.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/SciPy/Special functions/Legendre Functions/sph_harm Spherical harmonic of degree $n geq 0$ and order $m leq n$.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-04T04:51:48.000Z
|
2021-02-04T04:51:48.000Z
|
# Note: n >= 0 and |m| <= n; azimuthal angle in [0, 2pi) and polar in [0, pi]
special.sph_harm(order_m, degree_n, azimuthal_angle, polar_angle)
| 71.5
| 77
| 0.699301
| 27
| 143
| 3.518519
| 0.592593
| 0.210526
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033058
| 0.153846
| 143
| 2
| 78
| 71.5
| 0.752066
| 0.524476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9accbd8d9ba052791f89983224f8c825229cf1c2
| 135
|
py
|
Python
|
docker_sdk_api/domain/models/models_info.py
|
BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI
|
902f35a7e367e635898f687b16a830db892fbaa5
|
[
"Apache-2.0"
] | 20
|
2021-07-13T13:08:57.000Z
|
2022-03-29T09:38:00.000Z
|
docker_sdk_api/domain/models/models_info.py
|
BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI
|
902f35a7e367e635898f687b16a830db892fbaa5
|
[
"Apache-2.0"
] | null | null | null |
docker_sdk_api/domain/models/models_info.py
|
BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI
|
902f35a7e367e635898f687b16a830db892fbaa5
|
[
"Apache-2.0"
] | 2
|
2021-07-12T08:42:53.000Z
|
2022-03-04T18:41:25.000Z
|
from typing import List
from pydantic import BaseModel
class ModelsInfo(BaseModel):
networks: List[str]
backbones: List[str]
| 16.875
| 30
| 0.755556
| 17
| 135
| 6
| 0.647059
| 0.137255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 135
| 8
| 31
| 16.875
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9ace726986cc225b9ff15e4c370b6daa4db1bacd
| 7,169
|
py
|
Python
|
tests/test_parser.py
|
gridsmartercities/aws-dynamodb-parser
|
eae2ceed30ada7655922a5202871035db91d9054
|
[
"MIT"
] | 2
|
2021-06-14T08:29:51.000Z
|
2021-07-30T10:29:51.000Z
|
tests/test_parser.py
|
gridsmartercities/aws-dynamodb-parser
|
eae2ceed30ada7655922a5202871035db91d9054
|
[
"MIT"
] | null | null | null |
tests/test_parser.py
|
gridsmartercities/aws-dynamodb-parser
|
eae2ceed30ada7655922a5202871035db91d9054
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from aws_dynamodb_parser import parse
class ParserTests(TestCase):
def test_parse_string_type_in_dictionary(self):
given_data = {"String": {"S": "Hello World!"}}
expected_data = {"String": "Hello World!"}
self.assertEqual(expected_data, parse(given_data))
def test_parse_list_of_string_types(self):
given_data = [
{"first": {"S": "Hello"}},
{"second": {"S": "World"}}
]
expected_data = [
{"first": "Hello"},
{"second": "World"}
]
self.assertEqual(expected_data, parse(given_data))
def test_parse_does_not_modify_input_object(self):
given_data = {"String": {"S": "Hello World!"}}
expected_data = {"String": "Hello World!"}
self.assertEqual(expected_data, parse(given_data))
self.assertNotEqual(expected_data, given_data)
def test_parse_integer_type_in_dictionary(self):
given_data = {"Number": {"N": "1337"}}
expected_data = {"Number": 1337}
self.assertEqual(expected_data, parse(given_data))
def test_parse_float_type_in_dictionary(self):
given_data = {"Number": {"N": "13.37"}}
expected_data = {"Number": 13.37}
self.assertEqual(expected_data, parse(given_data))
def test_parse_bytes_type_in_dictionary(self):
given_data = {"Bytes": {"B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"}}
expected_data = {"Bytes": b"this text is base64-encoded"}
self.assertEqual(expected_data, parse(given_data))
def test_parse_string_set_in_dictionary(self):
given_data = {"StringSet": {"SS": ["Hello", "World", "!"]}}
expected_data = {"StringSet": ["Hello", "World", "!"]}
self.assertEqual(expected_data, parse(given_data))
def test_parse_number_set_in_dictionary(self):
given_data = {"NumberSet": {"NS": ["1337", "13.37"]}}
expected_data = {"NumberSet": [1337, 13.37]}
self.assertEqual(expected_data, parse(given_data))
def test_parse_byte_set_in_dictionary(self):
given_data = {"ByteSet": {"BS": ["U3Vubnk=", "UmFpbnk="]}}
expected_data = {"ByteSet": [b"Sunny", b"Rainy"]}
self.assertEqual(expected_data, parse(given_data))
def test_parse_map_in_dictionary(self):
given_data = {
"Map": {
"M": {
"String": {"S": "Hello World!"},
"Integer": {"N": "1337"},
"Float": {"N": "13.37"},
"Bytes": {"B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"},
"StringSet": {"SS": ["Hello", "World", "!"]},
"NumberSet": {"NS": ["1337", "13.37"]},
"ByteSet": {"BS": ["U3Vubnk=", "UmFpbnk="]}
}
}
}
expected_data = {
"Map": {
"String": "Hello World!",
"Integer": 1337,
"Float": 13.37,
"Bytes": b"this text is base64-encoded",
"StringSet": ["Hello", "World", "!"],
"NumberSet": [1337, 13.37],
"ByteSet": [b"Sunny", b"Rainy"]
}
}
self.assertEqual(expected_data, parse(given_data))
def test_parse_list_in_dictionary(self):
given_data = {
"List": {
"L": [
{"S": "Hello World!"},
{"N": "1337"},
{"N": "13.37"},
{"B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"},
{"SS": ["Hello", "World", "!"]},
{"NS": ["1337", "13.37"]},
{"BS": ["U3Vubnk=", "UmFpbnk="]}
]
}
}
expected_data = {
"List": ["Hello World!", 1337, 13.37, b"this text is base64-encoded", ["Hello", "World", "!"],
[1337, 13.37], [b"Sunny", b"Rainy"]]
}
self.assertEqual(expected_data, parse(given_data))
def test_parse_null_type_in_dictionary(self):
given_data = {"DoesntExist": {"NULL": True}}
expected_data = {"DoesntExist": None}
self.assertEqual(expected_data, parse(given_data))
def test_parse_bool_type_in_dictionary(self):
given_data = {
"BoolTrue": {"BOOL": True},
"BoolFalse": {"BOOL": False}
}
expected_data = {"BoolTrue": True, "BoolFalse": False}
self.assertEqual(expected_data, parse(given_data))
def test_parse_multiple_types_in_dictionary(self):
given_data = {
"String": {"S": "Hello World!"},
"Integer": {"N": "1337"},
"Float": {"N": "13.37"},
"Bytes": {"B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"},
"StringSet": {"SS": ["Hello", "World", "!"]},
"NumberSet": {"NS": ["1337", "13.37"]},
"ByteSet": {"BS": ["U3Vubnk=", "UmFpbnk="]},
"Map": {
"M": {
"String": {"S": "Hello World!"},
"Integer": {"N": "1337"},
"Float": {"N": "13.37"},
"Bytes": {"B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"},
"StringSet": {"SS": ["Hello", "World", "!"]},
"NumberSet": {"NS": ["1337", "13.37"]},
"ByteSet": {"BS": ["U3Vubnk=", "UmFpbnk="]}
}
},
"List": {
"L": [
{"S": "Hello World!"},
{"N": "1337"},
{"N": "13.37"},
{"B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"},
{"SS": ["Hello", "World", "!"]},
{"NS": ["1337", "13.37"]},
{"BS": ["U3Vubnk=", "UmFpbnk="]}
]
},
"DoesntExist": {"NULL": True},
"BoolTrue": {"BOOL": True},
"BoolFalse": {"BOOL": False}
}
expected_data = {
"String": "Hello World!",
"Integer": 1337,
"Float": 13.37,
"Bytes": b"this text is base64-encoded",
"StringSet": ["Hello", "World", "!"],
"NumberSet": [1337, 13.37],
"ByteSet": [b"Sunny", b"Rainy"],
"Map": {
"String": "Hello World!",
"Integer": 1337,
"Float": 13.37,
"Bytes": b"this text is base64-encoded",
"StringSet": ["Hello", "World", "!"],
"NumberSet": [1337, 13.37],
"ByteSet": [b"Sunny", b"Rainy"]
},
"List": ["Hello World!", 1337, 13.37, b"this text is base64-encoded", ["Hello", "World", "!"],
[1337, 13.37], [b"Sunny", b"Rainy"]],
"DoesntExist": None,
"BoolTrue": True,
"BoolFalse": False
}
self.assertEqual(expected_data, parse(given_data))
def test_parse_unknown_type_in_dictionary(self):
given_data = {
"Unknown": {"Q": "Quanta"}
}
self.assertRaises(TypeError, parse, given_data)
| 35.845
| 106
| 0.477612
| 658
| 7,169
| 4.99696
| 0.120061
| 0.084854
| 0.054745
| 0.114964
| 0.818431
| 0.787409
| 0.711071
| 0.702251
| 0.652372
| 0.641119
| 0
| 0.051096
| 0.344818
| 7,169
| 199
| 107
| 36.025126
| 0.648925
| 0
| 0
| 0.578313
| 0
| 0
| 0.22667
| 0.03013
| 0
| 0
| 0
| 0
| 0.096386
| 1
| 0.090361
| false
| 0
| 0.012048
| 0
| 0.108434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9ae355b15140340afc24b0ad50d42449cad2323e
| 122
|
py
|
Python
|
src/main/resources/resource/Clock/clock_6_clock_stopped.py
|
holgerfriedrich/myrobotlab
|
c6d6c9e564cb8e68844918fd713b8921171667d9
|
[
"Apache-2.0"
] | 179
|
2015-01-13T17:39:13.000Z
|
2022-03-29T03:52:15.000Z
|
src/main/resources/resource/Clock/clock_6_clock_stopped.py
|
holgerfriedrich/myrobotlab
|
c6d6c9e564cb8e68844918fd713b8921171667d9
|
[
"Apache-2.0"
] | 499
|
2015-01-11T15:26:23.000Z
|
2022-03-04T14:10:34.000Z
|
src/main/resources/resource/Clock/clock_6_clock_stopped.py
|
holgerfriedrich/myrobotlab
|
c6d6c9e564cb8e68844918fd713b8921171667d9
|
[
"Apache-2.0"
] | 105
|
2015-01-17T14:07:08.000Z
|
2022-03-19T17:43:50.000Z
|
clock.addListener("clockStopped", "python", "clock_stopped")
def clock_stopped():
print("The clock has been stopped")
| 30.5
| 60
| 0.737705
| 15
| 122
| 5.866667
| 0.666667
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114754
| 122
| 4
| 61
| 30.5
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0.463415
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b16c762b4c09910b3dc8fd915db266f09c4937e5
| 259
|
py
|
Python
|
backend/oauth2/serializers.py
|
projectpai/paipass
|
8b8e70b6808bf026cf957e240c7eed7bfcf4c55d
|
[
"MIT"
] | 3
|
2021-04-17T10:20:26.000Z
|
2022-03-08T07:36:13.000Z
|
backend/oauth2/serializers.py
|
projectpai/paipass
|
8b8e70b6808bf026cf957e240c7eed7bfcf4c55d
|
[
"MIT"
] | null | null | null |
backend/oauth2/serializers.py
|
projectpai/paipass
|
8b8e70b6808bf026cf957e240c7eed7bfcf4c55d
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import PaipassApplication
class ApplicationsListSerializer(serializers.ModelSerializer):
class Meta:
model = PaipassApplication
fields = ('id', 'logo_url', 'namespace', 'description',)
| 28.777778
| 64
| 0.749035
| 23
| 259
| 8.347826
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166023
| 259
| 8
| 65
| 32.375
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
b173ba959f6d60d6203f9d282ff72f2d7661036e
| 19,538
|
py
|
Python
|
calendar_resource/tests/test_calendar_event.py
|
woodbrettm/odoo
|
bd843b901f2c3852fe069eee705692fd21cffa15
|
[
"BSD-Source-Code"
] | null | null | null |
calendar_resource/tests/test_calendar_event.py
|
woodbrettm/odoo
|
bd843b901f2c3852fe069eee705692fd21cffa15
|
[
"BSD-Source-Code"
] | null | null | null |
calendar_resource/tests/test_calendar_event.py
|
woodbrettm/odoo
|
bd843b901f2c3852fe069eee705692fd21cffa15
|
[
"BSD-Source-Code"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2017 Laslabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from mock import patch
from odoo import fields
from odoo.exceptions import ValidationError
from .setup import Setup
from .setup import MOCK_FORMATS
class TestCalendarEvent(Setup):
def test_overlap_left_outside_date_allow_double_book_true(self):
""" Test overlap date no raise Validation if allow_double_book True """
self.resource_1.allow_double_book = True
self._create_event()
start_stop = self._get_datetime_interval(
0, '00:00:00',
2, '00:00:00',
)
try:
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise ValidationError '
'if allow_double_book is True'
)
def test_overlap_left_outside_time_allow_double_book_true(self):
""" Test overlap time no raise Validation allow_dbl_book True """
self.resource_1.allow_double_book = True
self._create_event()
start_stop = self._get_datetime_interval(
1, '11:00:00',
1, '12:30:00',
)
try:
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise ValidationError '
'if allow_double_book is True'
)
def test_overlap_left_outside_date(self):
""" Test left side overlap raise ValidationError """
self._create_event()
start_stop = self._get_datetime_interval(
0, '00:00:00',
2, '00:00:00',
)
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
def test_overlap_right_outside_date(self):
""" Test right side overlap raise ValidationError """
self._create_event()
start_stop = self._get_datetime_interval(
2, '00:00:00',
4, '00:00:00',
)
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
def test_match_left_outside_date(self):
""" Test left side match not ValidationError """
self._create_event()
start_stop = self._get_datetime_interval(
0, '00:00:00',
1, '12:00:00',
)
try:
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise ValidationError '
'if stop datetime same as existing event '
'start datetime',
)
def test_match_right_outside_date(self):
""" Test date right side match not ValidationError """
self._create_event()
start_stop = self._get_datetime_interval(
3, '14:00:00',
4, '00:00:00',
)
try:
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise ValidationError '
'if start datetime same as existing event '
'stop datetime',
)
def test_overlap_both_inside_time(self):
""" Test time overlap both inside raise ValidationError """
self._create_event()
start_stop = self._get_datetime_interval(
2, '00:00:00',
2, '10:00:00',
)
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
def test_match_both_inside_date(self):
""" Test date match both inside raise ValidationError """
self._create_event()
start_stop = self._get_datetime_interval(
1, '12:00:00',
3, '14:00:00',
)
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
def test_overlap_both_outside_date(self):
""" Test date overlap both outside raise ValidationError """
self._create_event()
start_stop = self._get_datetime_interval(
0, '00:00:00',
4, '00:00:00',
)
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
def test_overlap_left_outside_time(self):
""" Test time left side overlap raise ValidationError """
self._create_event()
start_stop = self._get_datetime_interval(
1, '00:00:00',
1, '12:30:00',
)
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
def test_overlap_right_outside_time(self):
""" Test time right side overlap raise ValidationError """
self._create_event()
start_stop = self._get_datetime_interval(
3, '13:00:00',
3, '15:30:00',
)
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
def test_match_left_outside_time(self):
""" Test time left side match not ValidationError """
self._create_event()
start_stop = self._get_datetime_interval(
1, '00:00:00',
1, '12:00:00',
)
try:
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise ValidationError '
'if stop time same as existing event '
'start time',
)
def test_match_right_outside_time(self):
""" Test time date right side match not ValidationError """
self._create_event()
start_stop = self._get_datetime_interval(
3, '14:00:00',
3, '16:00:00',
)
try:
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise ValidationError '
'if start time same as existing event '
'stop time',
)
def test_check_resource_ids_categ_ids_raise_error(self):
""" Test raise ValidationError if resource not allowed """
existing_event = self._create_event()
with self.assertRaises(ValidationError):
existing_event.write({
'resource_ids': [(4, [self.resource_2.id])],
'categ_ids': [(4, [self.event_type_5.id])],
})
def test_check_resource_ids_categ_ids_no_error(self):
""" Test no error if allowed resource added """
existing_event = self._create_event()
try:
existing_event.write({
'resource_ids': [(4, [self.resource_2.id])],
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise validation error if '
'eligible resource added.'
)
def test_check_resource_ids_categ_ids_no_error_resource(self):
""" Test no error if allowed resource added when no categ """
existing_event = self._create_event()
existing_event.write({
'categ_ids': [(5, 0, 0)],
'resource_ids': [(5, 0, 0)],
})
try:
existing_event.write({
'resource_ids': [(4, [self.resource_2.id])],
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise validation error if '
'eligible resource added.'
)
def test_check_resource_ids_categ_ids_no_error_categ(self):
""" Test no error if allowed categ added when no resource """
existing_event = self._create_event()
existing_event.write({
'categ_ids': [(5, 0, 0)],
'resource_ids': [(5, 0, 0)],
})
try:
existing_event.write({
'categ_ids': [(4, [self.event_type_4.id])],
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise validation error if '
'eligible categ added.'
)
def test_event_in_past_true(self):
""" Test returns true if event in past """
event = self._create_event({
'start': '2016-06-01 00:00:00',
'stop': '2016-06-02 00:00:00',
})
self.assertTrue(
event._event_in_past()
)
def test_event_in_past_false(self):
""" Test returns false if event in future """
event = self._create_event()
self.assertFalse(
event._event_in_past()
)
def test_check_resource_leaves_datetime_in_past(self):
""" Test no validationerror if event in the past """
self.leave_1.write({
'date_from': '2015-04-10 12:00:00',
'date_to': '2015-05-12 14:00:00',
})
try:
self._create_event({
'start': '2015-04-10 12:00:00',
'stop': '2015-05-12 12:00:00',
'allday': True,
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise ValidationError if event in past'
)
def test_check_resource_leaves_resource_no_calendar(self):
""" Test no validationerror if resource has no calendar_id """
self.resource_1.calendar_id = None
start_stop = self._get_datetime_interval(
0, '12:00:00',
6, '20:00:00'
)
self.leave_1.write({
'date_from': start_stop[0],
'date_to': start_stop[1],
})
try:
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise ValidationError if resource '
'has no calendar_id'
)
def test_check_resource_leaves(self):
""" Test raise ValidationError if conflicting leave """
start_stop = self._get_datetime_interval(
1, '12:00:00',
3, '12:00:00'
)
self.leave_1.write({
'date_from': start_stop[0],
'date_to': start_stop[1],
})
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
})
@patch(MOCK_FORMATS)
def test_format_datetime_intervals_to_str(self, datetime_format):
""" Test returns correct string """
datetime_format.return_value = ('%Y-%m-%d', '%H:%M:%S')
intervals = [
('2017-03-07 00:00:00', '2017-03-07 16:00:00'),
('2017-03-07 12:00:00', '2017-03-07 20:00:00'),
]
intervals_dt = self._intervals_to_dt([
('2017-03-07 00:00:00', '2017-03-07 16:00:00'),
('2017-03-07 12:00:00', '2017-03-07 20:00:00'),
])
args = {
'start': intervals[0][0],
'stop': intervals[0][1],
'zallday': False,
'zduration': 24,
}
intervals[0] = self.Event._get_display_time(**args)
args.update({
'start': intervals[1][0],
'stop': intervals[1][1],
})
intervals[1] = self.Event._get_display_time(**args)
exp = '%s\n\n%s' % (intervals[0], intervals[1])
res = self.Event._format_datetime_intervals_to_str(intervals_dt)
self.assertEquals(
exp, res,
)
def test_check_resource_ids_working_times_past(self):
""" Test no validationerror if event in past """
self.resource_1.calendar_id = self.calendar_1
try:
self._create_event({
'start': '2017-03-06 00:00:00',
'stop': '2017-03-12 00:00:00',
'allday': False,
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not fail if event in past'
)
def test_check_resource_ids_working_times_overlap_left(self):
""" Test ValidationError if event overlapping unavailable times """
self.resource_1.calendar_id = self.calendar_1
start_stop = self._get_datetime_interval(
0, '23:00:00',
2, '20:00:00',
)
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': False,
})
def test_check_resource_ids_working_times_match_left(self):
""" Test no Error if event stop is unavailable time start """
self.resource_1.calendar_id = self.calendar_1
start_stop = self._get_datetime_interval(
1, '00:00:00',
2, '16:00:00',
)
try:
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': False,
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise Error if event stop matches '
'unavailable time start'
)
def test_check_resource_ids_working_times_inside_left(self):
""" Test no Error if event within working times """
self.resource_1.calendar_id = self.calendar_1
start_stop = self._get_datetime_interval(
1, '00:00:00',
2, '10:00:00',
)
try:
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': False,
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise Error if event witin working times'
)
def test_check_resource_ids_working_times_overlap_right(self):
""" Test ValidationError if event overlapping unavailable times """
self.resource_1.calendar_id = self.calendar_1
start_stop = self._get_datetime_interval(
3, '00:00:00',
5, '20:00:00',
)
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': False,
})
def test_check_resource_ids_working_times_match_right(self):
""" Test no Error if event stop is unavailable time start """
self.resource_1.calendar_id = self.calendar_1
start_stop = self._get_datetime_interval(
3, '09:00:00',
4, '00:00:00',
)
try:
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': False,
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise Error if event stop matches '
'unavailable time start'
)
def test_check_resource_ids_working_times_right_whole_day(self):
""" Test ValidationError if event on non-working day """
self.resource_1.calendar_id = self.calendar_1
start_stop = self._get_datetime_interval(
5, '09:00:00',
6, '00:00:00',
)
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': False,
})
def test_check_resource_ids_working_times_right_whole_day_allday(self):
""" Test ValidationError if allday event on non-working day """
self.resource_1.calendar_id = self.calendar_1
start_stop = self._get_datetime_interval(
5, '00:00:00',
6, '00:00:00',
)
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
def test_check_resource_ids_working_times_right_week_allday(self):
""" Test ValidationError if allday event all week """
self.resource_1.calendar_id = self.calendar_1
start_stop = self._get_datetime_interval(
0, '00:00:00',
6, '23:59:59',
)
with self.assertRaises(ValidationError):
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
def test_check_resource_ids_working_times_allday_overlap_outside(self):
""" Test no Error if allday event is on day with 1+ working time """
self.resource_1.calendar_id = self.calendar_1
start_stop = self._get_datetime_interval(
2, '00:00:00',
4, '00:00:00',
)
try:
self._create_event({
'start': start_stop[0],
'stop': start_stop[1],
'allday': True,
})
self.assertTrue(True)
except ValidationError:
self.fail(
'Should not raise Error if event is allday '
'and there is at least 1 working interval that day '
)
def test_get_event_date_list(self):
event = self._create_event({
'start': '2016-06-01 00:00:00',
'stop': '2016-06-03 00:00:00',
})
exp = [
fields.Datetime.from_string('2016-06-01 00:00:00'),
fields.Datetime.from_string('2016-06-02 00:00:00'),
]
self.assertEquals(
exp,
event._get_event_date_list()
)
| 33.512864
| 79
| 0.524414
| 2,134
| 19,538
| 4.542643
| 0.078725
| 0.040025
| 0.071178
| 0.084588
| 0.856509
| 0.802868
| 0.764906
| 0.720652
| 0.678874
| 0.674954
| 0
| 0.059785
| 0.367335
| 19,538
| 582
| 80
| 33.570447
| 0.724456
| 0.091002
| 0
| 0.690476
| 0
| 0
| 0.13765
| 0
| 0
| 0
| 0
| 0
| 0.06746
| 1
| 0.06746
| false
| 0
| 0.009921
| 0
| 0.079365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4933252a987bc00f66bb068aadc83c8b6d6196ae
| 2,377
|
py
|
Python
|
backup_grapa/layers/__init__.py
|
psorus/grapa
|
6af343bb35c466c971ded1876e7a9d00e77cef00
|
[
"MIT"
] | null | null | null |
backup_grapa/layers/__init__.py
|
psorus/grapa
|
6af343bb35c466c971ded1876e7a9d00e77cef00
|
[
"MIT"
] | null | null | null |
backup_grapa/layers/__init__.py
|
psorus/grapa
|
6af343bb35c466c971ded1876e7a9d00e77cef00
|
[
"MIT"
] | null | null | null |
import grapa.layers.gpre4
import grapa.layers.gcomextractdiag
import grapa.layers.gaddzeros
import grapa.layers.gkeepmatcut
import grapa.layers.gcomfullyconnected
import grapa.layers.gliam
import grapa.layers.gsym
import grapa.layers.glam
import grapa.layers.gbuilder
import grapa.layers.gcomgraphand
import grapa.layers.gcomdensediverge
import grapa.layers.gremoveparam
import grapa.layers.gcomgraphand2
import grapa.layers.glom
import grapa.layers.gcomgraphcutter
import grapa.layers.gltk
import grapa.layers.gfeat
import grapa.layers.gpoolgrowth
import grapa.layers.gltknd
import grapa.layers.ggraphstract
import grapa.layers.gpre2
import grapa.layers.gperm
import grapa.layers.gaddbias
import grapa.layers.gcomdex
import grapa.layers.gtlbuilder
import grapa.layers.gcomgraphcombinations
import grapa.layers.gltrivmlp
import grapa.layers.gcomdiagraph
import grapa.layers.gcomreopool
import grapa.layers.gcomdepoollg
import grapa.layers.gcomgraphfrom2param
import grapa.layers.gpartinorm
import grapa.layers.gpre1
import grapa.layers.glim
import grapa.layers.gl
import grapa.layers.glmlp
import grapa.layers.gchooseparam
import grapa.layers.gfromparam
import grapa.layers.glkeep
import grapa.layers.gcomdensemerge
import grapa.layers.kron
import grapa.layers.gshuffle
import grapa.layers.gcutparam
import grapa.layers.glcreate
import grapa.layers.gcomdepool
import grapa.layers.gcomparastract
import grapa.layers.gcompoolmerge
import grapa.layers.gfeatkeep
import grapa.layers.gcomdepoolplus
import grapa.layers.gkeepcutter
import grapa.layers.glbuilder
import grapa.layers.gortho
import grapa.layers.gvaluation
import grapa.layers.gcomparamcombinations
import grapa.layers.ggoparam
import grapa.layers.gcomgraphfromparam
import grapa.layers.gbrokengrowth
import grapa.layers.glm
import grapa.layers.gcutter
import grapa.layers.glacreate
import grapa.layers.gcomjpool
import grapa.layers.gtopk
import grapa.layers.gssort
import grapa.layers.gcomgraphlevel
import grapa.layers.gadd1
import grapa.layers.gecutter
import grapa.layers.gpre3
import grapa.layers.gcompool
import grapa.layers.ghealparam
import grapa.layers.gpre5
import grapa.layers.gcomgraphrepeat
import grapa.layers.gkeepbuilder
import grapa.layers.gtbuilder
import grapa.layers.gpool
import grapa.layers.gmultiply
import grapa.layers.gcomgpool
import grapa.layers.gmake1graph
import grapa.layers.gcomparamlevel
| 30.088608
| 41
| 0.868742
| 312
| 2,377
| 6.61859
| 0.259615
| 0.415496
| 0.642131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004052
| 0.065629
| 2,377
| 78
| 42
| 30.474359
| 0.925709
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
49779dc85735a139bc66591f2898df9ccf0ca749
| 182
|
py
|
Python
|
rsden/loader/test.py
|
lidongyv/RSDEN
|
75da87c4981c772dcf4c2a25c8b02bef504ec369
|
[
"Apache-2.0"
] | null | null | null |
rsden/loader/test.py
|
lidongyv/RSDEN
|
75da87c4981c772dcf4c2a25c8b02bef504ec369
|
[
"Apache-2.0"
] | null | null | null |
rsden/loader/test.py
|
lidongyv/RSDEN
|
75da87c4981c772dcf4c2a25c8b02bef504ec369
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: yulidong
# @Date: 2018-04-06 21:02:16
# @Last Modified by: yulidong
# @Last Modified time: 2018-04-06 21:19:06
import numpy as np
print('fdas')
| 26
| 42
| 0.648352
| 31
| 182
| 3.806452
| 0.741935
| 0.101695
| 0.135593
| 0.169492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192053
| 0.17033
| 182
| 7
| 43
| 26
| 0.589404
| 0.763736
| 0
| 0
| 0
| 0
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
4983ddfd1d151a2eec1ebc7697350404c56c489e
| 32
|
py
|
Python
|
setup.py
|
ewjoachim/pypitoken
|
1661574852de179683c57d4236c4a579a6df2856
|
[
"MIT"
] | 5
|
2021-03-12T12:20:34.000Z
|
2021-06-22T13:35:48.000Z
|
setup.py
|
ewjoachim/pypitoken
|
1661574852de179683c57d4236c4a579a6df2856
|
[
"MIT"
] | 58
|
2021-03-03T22:05:11.000Z
|
2022-01-17T09:03:21.000Z
|
setup.py
|
ewjoachim/pypitoken
|
1661574852de179683c57d4236c4a579a6df2856
|
[
"MIT"
] | 1
|
2021-03-09T09:46:42.000Z
|
2021-03-09T09:46:42.000Z
|
# Empty setup.py for dependabot
| 16
| 31
| 0.78125
| 5
| 32
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 1
| 32
| 32
| 0.925926
| 0.90625
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
499c2ddb538480752a0341e1653ddeff4e57b2bd
| 172
|
py
|
Python
|
src/tentohako/agent/__init__.py
|
Koukyosyumei/TenToHako
|
4007033c2401aa491f6c79e0a670812cd8aa8054
|
[
"MIT"
] | null | null | null |
src/tentohako/agent/__init__.py
|
Koukyosyumei/TenToHako
|
4007033c2401aa491f6c79e0a670812cd8aa8054
|
[
"MIT"
] | null | null | null |
src/tentohako/agent/__init__.py
|
Koukyosyumei/TenToHako
|
4007033c2401aa491f6c79e0a670812cd8aa8054
|
[
"MIT"
] | null | null | null |
from .base import BaseAgent
from .mixmax import AlphaBetaAgent, MinMaxAgent
from .qlearning import QLearningAgent
from .random import RandomAgent
from .uct import UCTAgent
| 28.666667
| 47
| 0.843023
| 21
| 172
| 6.904762
| 0.619048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122093
| 172
| 5
| 48
| 34.4
| 0.960265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b8dbfe8c72bee095d8f1bb4b1ba904ec10ce407e
| 220
|
py
|
Python
|
application/api/resources/images.py
|
imghack/image_bot
|
d686342afa1862f7fba718e86e6737a57f828e1e
|
[
"MIT"
] | 3
|
2018-01-13T11:57:42.000Z
|
2018-01-14T12:18:05.000Z
|
application/api/resources/images.py
|
imghack/image_bot
|
d686342afa1862f7fba718e86e6737a57f828e1e
|
[
"MIT"
] | 32
|
2018-01-11T22:15:28.000Z
|
2018-03-05T17:09:14.000Z
|
application/api/resources/images.py
|
imghack/image_bot
|
d686342afa1862f7fba718e86e6737a57f828e1e
|
[
"MIT"
] | 1
|
2018-03-13T00:05:57.000Z
|
2018-03-13T00:05:57.000Z
|
from flask_restful import Resource
# TODO: db connection should be one for all blueprints
from application.db.db import get_all_images
class Images(Resource):
def get(self, id):
return get_all_images()[id]
| 24.444444
| 54
| 0.754545
| 34
| 220
| 4.735294
| 0.647059
| 0.074534
| 0.149068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177273
| 220
| 8
| 55
| 27.5
| 0.889503
| 0.236364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
b8fb30b84dcb0733d8e475694d1a9779fe84dab1
| 119
|
py
|
Python
|
src/Settings.py
|
Lovely-XPP/tkzgeom
|
bf68e139dc05f759542d6611f4dc07f4f2727b92
|
[
"MIT"
] | 41
|
2021-11-24T05:54:08.000Z
|
2022-03-26T10:19:30.000Z
|
src/Settings.py
|
Lovely-XPP/tkzgeom
|
bf68e139dc05f759542d6611f4dc07f4f2727b92
|
[
"MIT"
] | 1
|
2022-02-28T04:34:51.000Z
|
2022-03-07T10:49:27.000Z
|
src/Settings.py
|
Lovely-XPP/tkzgeom
|
bf68e139dc05f759542d6611f4dc07f4f2727b92
|
[
"MIT"
] | 10
|
2021-11-24T07:35:17.000Z
|
2022-03-25T18:42:14.000Z
|
class Settings:
SNAP_RADIUS = 4
SNAP_GRID_NUM_COLS = 40
SNAP_GRID_NUM_ROWS = 40
SNAP_TO_LATTICE = True
| 19.833333
| 27
| 0.705882
| 19
| 119
| 3.947368
| 0.684211
| 0.213333
| 0.293333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05618
| 0.252101
| 119
| 5
| 28
| 23.8
| 0.786517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
770b85177e48ff60409fe802583efe195af81d18
| 153
|
py
|
Python
|
flaskr/NetApp_9_3/__init__.py
|
cdare77/SCAP
|
22942a6794863de28ebc9b2c2c631ccb4b5523c4
|
[
"CNRI-Python"
] | null | null | null |
flaskr/NetApp_9_3/__init__.py
|
cdare77/SCAP
|
22942a6794863de28ebc9b2c2c631ccb4b5523c4
|
[
"CNRI-Python"
] | null | null | null |
flaskr/NetApp_9_3/__init__.py
|
cdare77/SCAP
|
22942a6794863de28ebc9b2c2c631ccb4b5523c4
|
[
"CNRI-Python"
] | null | null | null |
__all__ = ['NaServer', 'NaElement', 'NaErrno', 'DfmErrno']
from NaServer import *
from NaElement import *
from NaErrno import *
from DfmErrno import *
| 19.125
| 58
| 0.72549
| 17
| 153
| 6.294118
| 0.411765
| 0.280374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 153
| 7
| 59
| 21.857143
| 0.829457
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
77369c739180f999a4d4d3cfaf8ef763492051da
| 18
|
py
|
Python
|
project_name/some_file.py
|
freelawproject/test-flake8
|
4e12d59eca4f66b8f8feabafd193d6e9f0945413
|
[
"BSD-2-Clause"
] | null | null | null |
project_name/some_file.py
|
freelawproject/test-flake8
|
4e12d59eca4f66b8f8feabafd193d6e9f0945413
|
[
"BSD-2-Clause"
] | null | null | null |
project_name/some_file.py
|
freelawproject/test-flake8
|
4e12d59eca4f66b8f8feabafd193d6e9f0945413
|
[
"BSD-2-Clause"
] | null | null | null |
a = ['a','b','c']
| 9
| 17
| 0.222222
| 4
| 18
| 1
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 18
| 1
| 18
| 18
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7756841f3d7dcfa846a17a0b0a06a15f2eb294a4
| 6,486
|
py
|
Python
|
resources/dot_PyCharm/system/python_stubs/-762174762/pandas/_libs/join.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | 1
|
2020-04-20T02:27:20.000Z
|
2020-04-20T02:27:20.000Z
|
resources/dot_PyCharm/system/python_stubs/cache/8ec8e0c09fd2c9183727312444e7d6a5548c52799ab4a704ad5f81526056d106/pandas/_libs/join.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | null | null | null |
resources/dot_PyCharm/system/python_stubs/cache/8ec8e0c09fd2c9183727312444e7d6a5548c52799ab4a704ad5f81526056d106/pandas/_libs/join.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module pandas._libs.join
# from C:\Python27\lib\site-packages\pandas\_libs\join.pyd
# by generator 1.147
# no doc
# imports
import numpy as np # C:\Python27\lib\site-packages\numpy\__init__.pyc
import __builtin__ as __builtins__ # <module '__builtin__' (built-in)>
from pandas._libs.algos import ensure_platform_int, groupsort_indexer
# functions
def asof_join_backward(*args, **kwargs): # real signature unknown
pass
def asof_join_backward_on_X_by_Y(*args, **kwargs): # real signature unknown
pass
def asof_join_forward(*args, **kwargs): # real signature unknown
pass
def asof_join_forward_on_X_by_Y(*args, **kwargs): # real signature unknown
pass
def asof_join_nearest(*args, **kwargs): # real signature unknown
pass
def asof_join_nearest_on_X_by_Y(*args, **kwargs): # real signature unknown
pass
def ffill_indexer(*args, **kwargs): # real signature unknown
pass
def full_outer_join(*args, **kwargs): # real signature unknown
pass
def inner_join(*args, **kwargs): # real signature unknown
pass
def inner_join_indexer(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def inner_join_indexer_float32(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def inner_join_indexer_float64(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def inner_join_indexer_int32(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def inner_join_indexer_int64(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def inner_join_indexer_object(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def inner_join_indexer_uint64(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def left_join_indexer(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def left_join_indexer_float32(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def left_join_indexer_float64(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def left_join_indexer_int32(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def left_join_indexer_int64(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def left_join_indexer_object(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def left_join_indexer_uint64(*args, **kwargs): # real signature unknown
""" Two-pass algorithm for monotonic indexes. Handles many-to-one merges """
pass
def left_join_indexer_unique(*args, **kwargs): # real signature unknown
pass
def left_join_indexer_unique_float32(*args, **kwargs): # real signature unknown
pass
def left_join_indexer_unique_float64(*args, **kwargs): # real signature unknown
pass
def left_join_indexer_unique_int32(*args, **kwargs): # real signature unknown
pass
def left_join_indexer_unique_int64(*args, **kwargs): # real signature unknown
pass
def left_join_indexer_unique_object(*args, **kwargs): # real signature unknown
pass
def left_join_indexer_unique_uint64(*args, **kwargs): # real signature unknown
pass
def left_outer_join(*args, **kwargs): # real signature unknown
pass
def outer_join_indexer(*args, **kwargs): # real signature unknown
pass
def outer_join_indexer_float32(*args, **kwargs): # real signature unknown
pass
def outer_join_indexer_float64(*args, **kwargs): # real signature unknown
pass
def outer_join_indexer_int32(*args, **kwargs): # real signature unknown
pass
def outer_join_indexer_int64(*args, **kwargs): # real signature unknown
pass
def outer_join_indexer_object(*args, **kwargs): # real signature unknown
pass
def outer_join_indexer_uint64(*args, **kwargs): # real signature unknown
pass
def take_nd(arr, indexer, axis=0, out=None, fill_value=nan, mask_info=None, allow_fill=True): # reliably restored by inspect
"""
Specialized Cython take which sets NaN values in one pass
This dispatches to ``take`` defined on ExtensionArrays. It does not
currently dispatch to ``SparseArray.take`` for sparse ``arr``.
Parameters
----------
arr : array-like
Input array.
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indices are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
_maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
If provided, value should correspond to:
(indexer != -1, (indexer != -1).any())
If not provided, it will be computed internally if necessary
allow_fill : boolean, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
Returns
-------
subarray : array-like
May be the same type as the input, or cast to an ndarray.
"""
pass
def _get_result_indexer(*args, **kwargs): # real signature unknown
pass
def __pyx_unpickle_Enum(*args, **kwargs): # real signature unknown
pass
# no classes
# variables with complex values
__test__ = {}
| 33.78125
| 124
| 0.702744
| 878
| 6,486
| 5.005695
| 0.202733
| 0.091013
| 0.127418
| 0.209329
| 0.699431
| 0.68851
| 0.679408
| 0.679408
| 0.625256
| 0.611832
| 0
| 0.011408
| 0.20259
| 6,486
| 191
| 125
| 33.958115
| 0.83836
| 0.551958
| 0
| 0.476744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.476744
| false
| 0.476744
| 0.034884
| 0
| 0.511628
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
620c1edbc42679219b52e4104a6398281c327fb2
| 25
|
py
|
Python
|
experiments/cnn_comp/mx_models/__init__.py
|
carol-hsu/relay-bench
|
0facffedb3cbb0d5f110769a84bba68718cff72b
|
[
"Apache-2.0"
] | 7
|
2019-10-03T22:41:18.000Z
|
2020-05-31T18:52:15.000Z
|
experiments/cnn_comp/mx_models/__init__.py
|
carol-hsu/relay-bench
|
0facffedb3cbb0d5f110769a84bba68718cff72b
|
[
"Apache-2.0"
] | 14
|
2019-10-18T19:13:53.000Z
|
2021-09-08T01:36:37.000Z
|
experiments/cnn_comp/mx_models/__init__.py
|
carol-hsu/relay-bench
|
0facffedb3cbb0d5f110769a84bba68718cff72b
|
[
"Apache-2.0"
] | 4
|
2019-10-03T21:34:03.000Z
|
2022-02-23T10:29:49.000Z
|
from .mxnet_zoo import *
| 12.5
| 24
| 0.76
| 4
| 25
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
623c9f1c88bfc892bccdff8625961d5d1acd1dc6
| 170
|
py
|
Python
|
pydantic_geojson/point.py
|
farridav/pydantic-geojson
|
693760d32dfd4ce4f5c1a65363527d170363e60f
|
[
"MIT"
] | 5
|
2022-01-18T20:53:01.000Z
|
2022-01-27T16:38:26.000Z
|
pydantic_geojson/point.py
|
farridav/pydantic-geojson
|
693760d32dfd4ce4f5c1a65363527d170363e60f
|
[
"MIT"
] | 10
|
2022-01-19T23:04:53.000Z
|
2022-03-27T22:27:16.000Z
|
pydantic_geojson/point.py
|
farridav/pydantic-geojson
|
693760d32dfd4ce4f5c1a65363527d170363e60f
|
[
"MIT"
] | 1
|
2022-03-19T06:36:42.000Z
|
2022-03-19T06:36:42.000Z
|
from pydantic import BaseModel
from ._base import Coordinates, PointFieldType
class PointModel(BaseModel):
type: str = PointFieldType
coordinates: Coordinates
| 18.888889
| 46
| 0.788235
| 17
| 170
| 7.823529
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164706
| 170
| 8
| 47
| 21.25
| 0.93662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6253b2b7c22c3bf35019ea2eb33a334a99c71262
| 134
|
py
|
Python
|
src/panel_components/html/widgets/__init__.py
|
MarcSkovMadsen/panel-components
|
ca176f605006165889cbbd3c775d24cdcc4f964d
|
[
"MIT"
] | 5
|
2020-09-30T03:10:33.000Z
|
2022-02-04T19:54:58.000Z
|
src/panel_components/html/widgets/__init__.py
|
MarcSkovMadsen/panel-components
|
ca176f605006165889cbbd3c775d24cdcc4f964d
|
[
"MIT"
] | null | null | null |
src/panel_components/html/widgets/__init__.py
|
MarcSkovMadsen/panel-components
|
ca176f605006165889cbbd3c775d24cdcc4f964d
|
[
"MIT"
] | 1
|
2021-11-08T19:00:45.000Z
|
2021-11-08T19:00:45.000Z
|
"""# Basic HTML Widgets
See https://www.w3.org/TR/2012/WD-html-markup-20121025/elements.html
"""
from .html_button import HTMLButton
| 22.333333
| 68
| 0.753731
| 21
| 134
| 4.761905
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106557
| 0.089552
| 134
| 5
| 69
| 26.8
| 0.713115
| 0.671642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6267e0db6c083b2039a024281819ac4e3ac1c7b2
| 186
|
py
|
Python
|
tests/test_exercise12.py
|
surajwate/57-python-exercises
|
3b47c096266fec114300b9340cd852088a00d56e
|
[
"MIT"
] | null | null | null |
tests/test_exercise12.py
|
surajwate/57-python-exercises
|
3b47c096266fec114300b9340cd852088a00d56e
|
[
"MIT"
] | 1
|
2021-07-06T17:34:59.000Z
|
2021-07-06T17:34:59.000Z
|
tests/test_exercise12.py
|
surajwate/57-python-exercises
|
3b47c096266fec114300b9340cd852088a00d56e
|
[
"MIT"
] | null | null | null |
from exercises.exercise12 import simple_interest
def test_simple_interest():
assert simple_interest(1500, 4.3, 4) == """After 4 years at 4.3%, the investment will
be worth $1758."""
| 37.2
| 89
| 0.741935
| 29
| 186
| 4.62069
| 0.724138
| 0.313433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0.139785
| 186
| 5
| 90
| 37.2
| 0.7375
| 0
| 0
| 0
| 0
| 0
| 0.31016
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6557e2d0f5651c52b03116104cef71dd315cb4cf
| 49
|
py
|
Python
|
python/Learning Files/1- Basics-Variable Declaration and Memory Allocation.py
|
angelopassaro/Hacktoberfest-1
|
21f90f5d49efba9b1a27f4d9b923f5017ab43f0e
|
[
"Apache-2.0"
] | 1
|
2020-10-06T01:20:07.000Z
|
2020-10-06T01:20:07.000Z
|
python/Learning Files/1- Basics-Variable Declaration and Memory Allocation.py
|
angelopassaro/Hacktoberfest-1
|
21f90f5d49efba9b1a27f4d9b923f5017ab43f0e
|
[
"Apache-2.0"
] | null | null | null |
python/Learning Files/1- Basics-Variable Declaration and Memory Allocation.py
|
angelopassaro/Hacktoberfest-1
|
21f90f5d49efba9b1a27f4d9b923f5017ab43f0e
|
[
"Apache-2.0"
] | null | null | null |
x=3
y=2
x = x %y
x= x%y
y = y %x
IN = 4
print(IN)
| 7
| 9
| 0.469388
| 17
| 49
| 1.352941
| 0.411765
| 0.173913
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 0.306122
| 49
| 7
| 9
| 7
| 0.588235
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 1
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
65a377727bbfd7bad98d3a70e1e7d3b90c7d7605
| 18,384
|
py
|
Python
|
lib/model.py
|
rizalord/komikcast-rest-api
|
31c9d384b01fee213843676a975b2f27096cc81d
|
[
"MIT"
] | 3
|
2020-06-28T14:05:38.000Z
|
2020-08-17T19:30:43.000Z
|
lib/model.py
|
rizalord/komikcast-api
|
31c9d384b01fee213843676a975b2f27096cc81d
|
[
"MIT"
] | 1
|
2020-08-17T15:25:25.000Z
|
2020-08-18T03:52:13.000Z
|
lib/model.py
|
rizalord/komikcast-rest-api
|
31c9d384b01fee213843676a975b2f27096cc81d
|
[
"MIT"
] | 1
|
2021-01-30T08:56:28.000Z
|
2021-01-30T08:56:28.000Z
|
from bs4 import BeautifulSoup as bs
from lib.static import urlPath,errorMessage,headers
import requests
from flask import request as req
def getRootData():
newUrl = urlPath if req.args.get('page') is None else urlPath + 'page/' + req.args.get('page') + '/'
page = requests.get(newUrl, headers=headers)
soup = bs(page.text , 'html.parser')
if page.status_code == 200:
# Parsing Data
# Initialization Container
hot_comic = []
project_comic = []
latest_chapter = []
# GET HOT COMIC
for data in soup.find_all('div' , attrs={'class' : 'bs'}):
tmp = {
'title' : data.find('div' , attrs={'class' : 'tt'}).get_text().strip(),
'ch' : data.find('div' , attrs={'class' : 'epxs'}).find('a').get_text().strip().replace('Ch.' , ''),
'rating' : data.find('div' , attrs={'class' : 'rating'}).find('i').get_text().strip(),
'image': data.find('img').get('src').strip(),
'type': data.find('span' , attrs={'class' : 'type'}).get_text().strip(),
'link': data.find('a').get('href') if data.find('a') is not None else None,
'linkId': data.find('a').get('href').replace('https://komikcast.com/komik/' , '') if data.find('a') is not None else None,
}
hot_comic.append(tmp)
# GET UPDATE PROJECT
for data in soup.find('div', attrs={'class' : 'listupd project'}).find_all('div' , attrs={'class': 'utao'}):
chapters = []
for cp in data.find_all('li'):
chapters.append({
'title': cp.find('a').get_text().strip() if cp.find('a') is not None else None ,
'time_uploaded': cp.find('i').get_text().strip() if cp.find('i') is not None else None ,
'link' : cp.find('a').get('href').strip() if cp.find('a') is not None else None ,
'linkId' : cp.find('a').get('href').strip().replace('https://komikcast.com/chapter/' , '') if cp.find('a') is not None else None ,
})
tmp = {
'title' : data.find('h3').get_text().strip(),
'image': data.find('img').get('src').strip(),
'isHot': True if data.find('span' , attrs={'class' : 'hot'}) is not None else False,
'link': data.find('a').get('href') if data.find('a') is not None else None,
'linkId': data.find('a').get('href').replace('https://komikcast.com/komik/' , '') if data.find('a') is not None else None,
'chapters': chapters
}
project_comic.append(tmp)
# GET LATEST COMIC
for data in soup.find_all('div', attrs={'class' : 'listupd'})[2].find_all('div' , attrs={'class': 'utao'}):
chapters = []
for cp in data.find_all('li'):
chapters.append({
'title': cp.find('a').get_text().strip() if cp.find('a') is not None else None ,
'time_uploaded': cp.find('i').get_text().strip() if cp.find('i') is not None else None ,
'link' : cp.find('a').get('href').strip() if cp.find('a') is not None else None ,
'linkId' : cp.find('a').get('href').strip().replace('https://komikcast.com/chapter/' , '') if cp.find('a') is not None else None ,
})
tmp = {
'title' : data.find('h3').get_text().strip(),
'image': data.find('img').get('src').strip(),
'isHot': True if data.find('span' , attrs={'class' : 'hot'}) is not None else False,
'link': data.find('a').get('href') if data.find('a') is not None else None,
'linkId': data.find('a').get('href').replace('https://komikcast.com/komik/' , '') if data.find('a') is not None else None,
'chapters': chapters
}
latest_chapter.append(tmp)
return {
'hot_comic' : hot_comic,
'project_comic': project_comic ,
'latest_chapter': latest_chapter
}
else:
return errorMessage
def getDaftarKomik():
order = '' if req.args.get('order') is None else '?order=' + req.args.get('order')
newUrl = urlPath + 'daftar-komik/' + order if req.args.get('page') is None else urlPath + 'daftar-komik/page/' + req.args.get('page') + '/' + order
pagination_page = int(req.args.get('page')) if req.args.get('page') is not None else 1
page = requests.get(newUrl , headers=headers)
soup = bs(page.text , 'html.parser')
if page.status_code == 200:
# Parsing Data
# Initialization Container
daftar_komik = []
for data in soup.find_all('div' , attrs={'class' : 'bs'}):
daftar_komik.append({
'title' : data.find('div' , attrs={'class': 'tt'}).get_text().strip(),
'chapter': data.find('div' , attrs={'class' : 'epxs'}).find('a').get_text().replace('Ch.' , '').strip(),
'rating' : data.find('div' , attrs={'class': 'rating'}).find('i').get_text().strip(),
'image': data.find('img').get('src').strip(),
'type': data.find('span' , attrs={'class' : 'type'}).get_text().strip(),
'isCompleted': True if data.find('span' , attrs={'class' : 'Completed'}) is not None else False,
'link': data.find('a').get('href') if data.find('a') is not None else None,
'linkId': data.find('a').get('href').replace('https://komikcast.com/komik/' , '')[:-1] if data.find('a') is not None else None,
'linkChapter': data.find('div' , attrs={'class' : 'bigor'}).find('a').get('href') if data.find('div' , attrs={'class' : 'bigor'}).find('a') is not None else None,
'linkIdChapter': data.find('div' , attrs={'class' : 'bigor'}).find('a').get('href').replace('https://komikcast.com/chapter/' , '')[:-1] if data.find('div' , attrs={'class' : 'bigor'}).find('a') is not None else None,
})
return {
'daftar_komik' : daftar_komik,
'page': pagination_page
}
else:
return errorMessage
def getProjectList():
newUrl = urlPath + 'project-list/' if req.args.get('page') is None else urlPath + 'project-list/page/' + req.args.get('page') + '/'
pagination_page = int(req.args.get('page')) if req.args.get('page') is not None else 1
page = requests.get(newUrl , headers=headers)
soup = bs(page.text , 'html.parser')
if page.status_code == 200:
# Parsing Data
# Initialization Container
daftar_komik = []
for data in soup.find_all('div' , attrs={'class' : 'bs'}):
daftar_komik.append({
'title' : data.find('div' , attrs={'class': 'tt'}).get_text().strip(),
'chapter': data.find('div' , attrs={'class' : 'epxs'}).find('a').get_text().replace('Ch.' , '').strip(),
'rating' : data.find('div' , attrs={'class': 'rating'}).find('i').get_text().strip(),
'image': data.find('img').get('src').strip(),
'type': data.find('span' , attrs={'class' : 'type'}).get_text().strip(),
'isCompleted': True if data.find('span' , attrs={'class' : 'Completed'}) is not None else False,
'link': data.find('a').get('href') if data.find('a') is not None else None,
'linkId': data.find('a').get('href').replace('https://komikcast.com/komik/' , '')[:-1] if data.find('a') is not None else None,
'linkChapter': data.find('div' , attrs={'class' : 'bigor'}).find('a').get('href') if data.find('div' , attrs={'class' : 'bigor'}).find('a') is not None else None,
'linkIdChapter': data.find('div' , attrs={'class' : 'bigor'}).find('a').get('href').replace('https://komikcast.com/chapter/' , '')[:-1] if data.find('div' , attrs={'class' : 'bigor'}).find('a') is not None else None,
})
return {
'daftar_komik' : daftar_komik,
'page': pagination_page
}
else:
return errorMessage
def getKomikTamat():
newUrl = urlPath + 'komik-tamat/' if req.args.get('page') is None else urlPath + 'komik-tamat/page/' + req.args.get('page') + '/'
pagination_page = int(req.args.get('page')) if req.args.get('page') is not None else 1
page = requests.get(newUrl , headers=headers)
soup = bs(page.text , 'html.parser')
if page.status_code == 200:
# Parsing Data
# Initialization Container
daftar_komik = []
for data in soup.find_all('div' , attrs={'class' : 'bs'}):
daftar_komik.append({
'title' : data.find('div' , attrs={'class': 'tt'}).get_text().strip(),
'chapter': data.find('div' , attrs={'class' : 'epxs'}).find('a').get_text().replace('Ch.' , '').strip(),
'rating' : data.find('div' , attrs={'class': 'rating'}).find('i').get_text().strip(),
'image': data.find('img').get('src').strip(),
'type': data.find('span' , attrs={'class' : 'type'}).get_text().strip(),
'link': data.find('a').get('href') if data.find('a') is not None else None,
'linkId': data.find('a').get('href').replace('https://komikcast.com/komik/' , '')[:-1] if data.find('a') is not None else None,
'linkChapter': data.find('div' , attrs={'class' : 'bigor'}).find('a').get('href') if data.find('div' , attrs={'class' : 'bigor'}).find('a') is not None else None,
'linkIdChapter': data.find('div' , attrs={'class' : 'bigor'}).find('a').get('href').replace('https://komikcast.com/chapter/' , '')[:-1] if data.find('div' , attrs={'class' : 'bigor'}).find('a') is not None else None,
})
return {
'daftar_komik' : daftar_komik,
'page': pagination_page
}
else:
return errorMessage
def getJadwalUpdate():
newUrl = urlPath + 'jadwal-update-project-harian-komikcast/'
page = requests.get(newUrl , headers=headers)
soup = bs(page.text , 'html.parser')
if page.status_code == 200:
# Parsing Data
# Initialization Container
container = []
# GETTING DATA
for data in soup.find('div' , attrs={'class' , 'text_exposed_show'}).find_all('p'):
if ' ' not in data.get_text():
container.append({
'time' : str(data.get_text()).split('=')[0].replace('–' , '').strip(),
'project': str(data.get_text()).split('=')[-1].strip(),
})
# Remove last element, cause it's useless empty string
container.pop()
return {
'data' : container
}
else:
return errorMessage
def getDataKomik():
idKomik = req.args.get('id')
newUrl = urlPath + 'komik/' + idKomik
page = requests.get(newUrl , headers=headers)
soup = bs(page.text , 'html.parser')
if page.status_code == 200:
# Parsing Data
# Initialization Container
container = {}
# GETTING DATA
genres = []
list_chapter = []
for data in soup.find('div' , {'class' : 'spe'}).find('span').find_all('a'):
genres.append(data.get_text().strip())
for data in soup.find('div' , {'class' : 'cl'}).find_all('li'):
list_chapter.append({
'chapter': data.find('span' , {'class' : 'leftoff'}).find('a').get_text().replace('Chapter' , '').strip(),
'time_release': data.find('span' , {'class' : 'rightoff'}).get_text().strip(),
'link': data.find('span' , {'class' : 'leftoff'}).find('a').get('href').strip(),
'linkId': data.find('span' , {'class' : 'leftoff'}).find('a').get('href').replace('https://komikcast.com/chapter/' , '').strip(),
})
container = {
'image': soup.find('div' , {'class' : 'thumb'}).find('img').get('src'),
'title': soup.find('h1' , {'itemprop' : 'headline'}).get_text().strip(),
'title_other': soup.find('span' , {'class' : 'alter'}).get_text().strip(),
'rating': soup.find('div' , {'class' : 'rating'}).find('strong').get_text().replace('Rating' , '').strip(),
'sinopsis': soup.find('div' , {'itemprop' : 'articleBody'}).find('p').get_text().strip(),
'genres' : genres,
'type': soup.find('div' , {'class': 'spe'}).find_all('span')[4].find('a').get_text().strip(),
'updated_on': soup.find('div' , {'class': 'spe'}).find_all('span')[6].find('time').get_text().strip(),
'status': soup.find('div', {'class': 'spe'}).find_all('span')[1].get_text().replace('Status:','').strip(),
'released': soup.find('div', {'class': 'spe'}).find_all('span')[2].get_text().replace('Released:','').strip(),
'author': soup.find('div', {'class': 'spe'}).find_all('span')[3].get_text().replace('Author:','').strip(),
'total_chapter': soup.find('div', {'class': 'spe'}).find_all('span')[5].get_text().replace('Total Chapter:','').strip(),
'list_chapter': list_chapter
}
return {
'data' : container
}
else:
return errorMessage
def getChapterComic():
idKomik = req.args.get('id')
newUrl = urlPath + 'chapter/' + idKomik
page = requests.get(newUrl , headers=headers)
soup = bs(page.text , 'html.parser')
if page.status_code == 200:
# Parsing Data
# Initialization Container
select_chapter = []
images = []
for data in soup.find('select').find_all('option'):
if data.get_text() != 'Select Chapter Manga':
select_chapter.append({
'text': data.get_text().strip(),
'link': data.get('value').strip(),
'linkId': data.get('value').replace('https://komikcast.com/chapter/' , '').strip(),
})
for data in soup.find('div' , {'id' : 'readerarea'}).find_all('img'):
if data.get('src').strip() != '':
images.append({
'link': data.get('src').strip(),
'width': data.get('width').strip() if data.get('width') is not None else None,
'height': data.get('height').strip() if data.get('height') is not None else None,
})
container = {
'title': soup.find('h1' , {'itemprop' : 'name'}).get_text().strip(),
'chapter': soup.find('h1' , {'itemprop' : 'name'}).get_text().split('Chapter')[1].replace('Bahasa Indonesia' , '').strip(),
'comic_title': soup.find('div' , {'class': 'allc'}).find('a').get_text().strip(),
'comic_link': soup.find('div' , {'class': 'allc'}).find('a').get('href').strip(),
'comic_link_id': soup.find('div' , {'class': 'allc'}).find('a').get('href').replace('https://komikcast.com/komik/' , '').strip(),
'select_chapter': select_chapter,
'prev_link': soup.find('div' , {'class': 'nextprev'}).find('a' , {'rel' : 'prev'}).get('href').strip() if soup.find('div' , {'class': 'nextprev'}).find('a' , {'rel' : 'prev'}) is not None else None,
'prev_link_id': soup.find('div' , {'class': 'nextprev'}).find('a' , {'rel' : 'prev'}).get('href').replace('https://komikcast.com/chapter/' , '').strip() if soup.find('div' , {'class': 'nextprev'}).find('a' , {'rel' : 'prev'}) is not None else None,
'next_link': soup.find('div' , {'class': 'nextprev'}).find('a' , {'rel' : 'next'}).get('href').strip() if soup.find('div' , {'class': 'nextprev'}).find('a' , {'rel' : 'next'}) is not None else None,
'next_link_id': soup.find('div' , {'class': 'nextprev'}).find('a' , {'rel' : 'next'}).get('href').replace('https://komikcast.com/chapter/' , '').strip() if soup.find('div' , {'class': 'nextprev'}).find('a' , {'rel' : 'next'}) is not None else None,
'images': images
}
return {
'data' : container
}
else:
return errorMessage
def getSpecificComic():
keyword = req.args.get('keyword')
newUrl = urlPath + '?s=' + keyword if req.args.get('page') is None else urlPath + 'page/' + req.args.get('page') + '/?s=' + keyword
pagination_page = int(req.args.get('page')) if req.args.get('page') is not None else 1
page = requests.get(newUrl , headers=headers)
soup = bs(page.text , 'html.parser')
if page.status_code == 200:
# Parsing Data
# Initialization Container
daftar_komik = []
for data in soup.find_all('div' , attrs={'class' : 'bs'}):
image = None
try:
image = data.find('img') .get('src').strip()
except:
pass
daftar_komik.append({
'title' : data.find('div' , attrs={'class': 'tt'}).get_text().strip(),
'chapter': data.find('div' , attrs={'class' : 'epxs'}).find('a').get_text().replace('Ch.' , '').strip(),
'rating' : data.find('div' , attrs={'class': 'rating'}).find('i').get_text().strip(),
'image': image,
'type': data.find('span' , attrs={'class' : 'type'}).get_text().strip(),
'isCompleted': True if data.find('span' , attrs={'class' : 'Completed'}) is not None else False,
'link': data.find('a').get('href') if data.find('a') is not None else None,
'linkId': data.find('a').get('href').replace('https://komikcast.com/komik/' , '')[:-1] if data.find('a') is not None else None,
'linkChapter': data.find('div' , attrs={'class' : 'bigor'}).find('a').get('href') if data.find('div' , attrs={'class' : 'bigor'}).find('a') is not None else None,
'linkIdChapter': data.find('div' , attrs={'class' : 'bigor'}).find('a').get('href').replace('https://komikcast.com/chapter/' , '')[:-1] if data.find('div' , attrs={'class' : 'bigor'}).find('a') is not None else None,
})
return {
'results' : daftar_komik,
'page': pagination_page
}
else:
return errorMessage
| 51.932203
| 260
| 0.525946
| 2,223
| 18,384
| 4.288349
| 0.077823
| 0.070492
| 0.042484
| 0.061366
| 0.804678
| 0.773943
| 0.767649
| 0.728942
| 0.691598
| 0.659813
| 0
| 0.003838
| 0.263
| 18,384
| 353
| 261
| 52.07932
| 0.699683
| 0.023499
| 0
| 0.595588
| 0
| 0
| 0.186625
| 0.002175
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0.003676
| 0.014706
| 0
| 0.102941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
65ab23bb66f6b25916335ca834756226fb1bedf3
| 178
|
py
|
Python
|
src/vae/models/__init__.py
|
iserh/data-augmentation
|
1e1e99177ff4256c68cafe043bd7e50d52bf669d
|
[
"MIT"
] | null | null | null |
src/vae/models/__init__.py
|
iserh/data-augmentation
|
1e1e99177ff4256c68cafe043bd7e50d52bf669d
|
[
"MIT"
] | null | null | null |
src/vae/models/__init__.py
|
iserh/data-augmentation
|
1e1e99177ff4256c68cafe043bd7e50d52bf669d
|
[
"MIT"
] | null | null | null |
"""VAE models for various datasets."""
from .base import VAEConfig, VAEModel, VAEOutput # noqa: F401
from .vae_for_data_augmentation import VAEForDataAugmentation # noqa: F401
| 44.5
| 75
| 0.786517
| 22
| 178
| 6.227273
| 0.727273
| 0.116788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03871
| 0.129213
| 178
| 3
| 76
| 59.333333
| 0.845161
| 0.308989
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
65b1947e385b2d6f84121bbed17b72927221bc3d
| 130
|
py
|
Python
|
FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/pggan_tf_official/config.py
|
CSID-DGU/-2020-1-OSSP1-ninetynine-2
|
b1824254882eeea0ee44e4e60896b72c51ef1d2c
|
[
"MIT"
] | 1
|
2020-06-21T13:45:26.000Z
|
2020-06-21T13:45:26.000Z
|
FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/pggan_tf_official/config.py
|
CSID-DGU/-2020-1-OSSP1-ninetynine-2
|
b1824254882eeea0ee44e4e60896b72c51ef1d2c
|
[
"MIT"
] | null | null | null |
FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/pggan_tf_official/config.py
|
CSID-DGU/-2020-1-OSSP1-ninetynine-2
|
b1824254882eeea0ee44e4e60896b72c51ef1d2c
|
[
"MIT"
] | 3
|
2020-09-02T03:18:45.000Z
|
2021-01-27T08:24:05.000Z
|
version https://git-lfs.github.com/spec/v1
oid sha256:27f886a21c94d3f4888c7ce341c3c2f7d0002ea43a7cdc8a004ecd3de711e97d
size 12894
| 32.5
| 75
| 0.884615
| 13
| 130
| 8.846154
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.379032
| 0.046154
| 130
| 3
| 76
| 43.333333
| 0.548387
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
028fd5bc71543d0fafa8959d541974c76a0a1054
| 80
|
py
|
Python
|
learning/test/myprofile.py
|
SpAiNiOr/mystudy
|
9bbb76eead6a5a43633ce75a1eaff3636a7f6ba0
|
[
"Apache-2.0"
] | null | null | null |
learning/test/myprofile.py
|
SpAiNiOr/mystudy
|
9bbb76eead6a5a43633ce75a1eaff3636a7f6ba0
|
[
"Apache-2.0"
] | null | null | null |
learning/test/myprofile.py
|
SpAiNiOr/mystudy
|
9bbb76eead6a5a43633ce75a1eaff3636a7f6ba0
|
[
"Apache-2.0"
] | 1
|
2015-08-24T09:13:50.000Z
|
2015-08-24T09:13:50.000Z
|
import profile
from my_math import product
profile.run('product(1,2)','123.txt')
| 26.666667
| 37
| 0.775
| 14
| 80
| 4.357143
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067568
| 0.075
| 80
| 3
| 37
| 26.666667
| 0.756757
| 0
| 0
| 0
| 0
| 0
| 0.234568
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
02b88b1245071f758cad9af565097053d94b38f3
| 11,559
|
py
|
Python
|
ietf/ipr/resources.py
|
wpjesus/codematch
|
eee7405259cce9239ea0545a2a1300ee1accfe94
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2015-09-02T19:53:12.000Z
|
2015-09-02T19:53:12.000Z
|
ietf/ipr/resources.py
|
wpjesus/codematch
|
eee7405259cce9239ea0545a2a1300ee1accfe94
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
ietf/ipr/resources.py
|
wpjesus/codematch
|
eee7405259cce9239ea0545a2a1300ee1accfe94
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# Autogenerated by the mkresources management command 2015-03-21 14:05 PDT
from tastypie.resources import ModelResource
from tastypie.fields import ToOneField, ToManyField # pyflakes:ignore
from tastypie.constants import ALL, ALL_WITH_RELATIONS # pyflakes:ignore
from ietf import api
from ietf.ipr.models import * # pyflakes:ignore
from ietf.person.resources import PersonResource
from ietf.name.resources import IprDisclosureStateNameResource
from ietf.doc.resources import DocAliasResource
class IprDisclosureBaseResource(ModelResource):
by = ToOneField(PersonResource, 'by')
state = ToOneField(IprDisclosureStateNameResource, 'state')
docs = ToManyField(DocAliasResource, 'docs', null=True)
rel = ToManyField('ietf.ipr.resources.IprDisclosureBaseResource', 'rel', null=True)
class Meta:
queryset = IprDisclosureBase.objects.all()
serializer = api.Serializer()
#resource_name = 'iprdisclosurebase'
filtering = {
"id": ALL,
"compliant": ALL,
"holder_legal_name": ALL,
"notes": ALL,
"other_designations": ALL,
"submitter_name": ALL,
"submitter_email": ALL,
"time": ALL,
"title": ALL,
"by": ALL_WITH_RELATIONS,
"state": ALL_WITH_RELATIONS,
"docs": ALL_WITH_RELATIONS,
"rel": ALL_WITH_RELATIONS,
}
api.ipr.register(IprDisclosureBaseResource())
from ietf.doc.resources import DocAliasResource
class IprDocRelResource(ModelResource):
disclosure = ToOneField(IprDisclosureBaseResource, 'disclosure')
document = ToOneField(DocAliasResource, 'document')
class Meta:
queryset = IprDocRel.objects.all()
serializer = api.Serializer()
#resource_name = 'iprdocrel'
filtering = {
"id": ALL,
"sections": ALL,
"revisions": ALL,
"disclosure": ALL_WITH_RELATIONS,
"document": ALL_WITH_RELATIONS,
}
api.ipr.register(IprDocRelResource())
from ietf.person.resources import PersonResource
from ietf.name.resources import IprDisclosureStateNameResource, IprLicenseTypeNameResource
from ietf.doc.resources import DocAliasResource
class HolderIprDisclosureResource(ModelResource):
by = ToOneField(PersonResource, 'by')
state = ToOneField(IprDisclosureStateNameResource, 'state')
iprdisclosurebase_ptr = ToOneField(IprDisclosureBaseResource, 'iprdisclosurebase_ptr')
licensing = ToOneField(IprLicenseTypeNameResource, 'licensing')
docs = ToManyField(DocAliasResource, 'docs', null=True)
rel = ToManyField(IprDisclosureBaseResource, 'rel', null=True)
class Meta:
queryset = HolderIprDisclosure.objects.all()
serializer = api.Serializer()
#resource_name = 'holderiprdisclosure'
filtering = {
"id": ALL,
"compliant": ALL,
"holder_legal_name": ALL,
"notes": ALL,
"other_designations": ALL,
"submitter_name": ALL,
"submitter_email": ALL,
"time": ALL,
"title": ALL,
"ietfer_name": ALL,
"ietfer_contact_email": ALL,
"ietfer_contact_info": ALL,
"patent_info": ALL,
"has_patent_pending": ALL,
"holder_contact_email": ALL,
"holder_contact_name": ALL,
"holder_contact_info": ALL,
"licensing_comments": ALL,
"submitter_claims_all_terms_disclosed": ALL,
"by": ALL_WITH_RELATIONS,
"state": ALL_WITH_RELATIONS,
"iprdisclosurebase_ptr": ALL_WITH_RELATIONS,
"licensing": ALL_WITH_RELATIONS,
"docs": ALL_WITH_RELATIONS,
"rel": ALL_WITH_RELATIONS,
}
api.ipr.register(HolderIprDisclosureResource())
from ietf.person.resources import PersonResource
from ietf.name.resources import IprDisclosureStateNameResource
from ietf.doc.resources import DocAliasResource
class ThirdPartyIprDisclosureResource(ModelResource):
by = ToOneField(PersonResource, 'by')
state = ToOneField(IprDisclosureStateNameResource, 'state')
iprdisclosurebase_ptr = ToOneField(IprDisclosureBaseResource, 'iprdisclosurebase_ptr')
docs = ToManyField(DocAliasResource, 'docs', null=True)
rel = ToManyField(IprDisclosureBaseResource, 'rel', null=True)
class Meta:
queryset = ThirdPartyIprDisclosure.objects.all()
serializer = api.Serializer()
#resource_name = 'thirdpartyiprdisclosure'
filtering = {
"id": ALL,
"compliant": ALL,
"holder_legal_name": ALL,
"notes": ALL,
"other_designations": ALL,
"submitter_name": ALL,
"submitter_email": ALL,
"time": ALL,
"title": ALL,
"ietfer_name": ALL,
"ietfer_contact_email": ALL,
"ietfer_contact_info": ALL,
"patent_info": ALL,
"has_patent_pending": ALL,
"by": ALL_WITH_RELATIONS,
"state": ALL_WITH_RELATIONS,
"iprdisclosurebase_ptr": ALL_WITH_RELATIONS,
"docs": ALL_WITH_RELATIONS,
"rel": ALL_WITH_RELATIONS,
}
api.ipr.register(ThirdPartyIprDisclosureResource())
from ietf.name.resources import DocRelationshipNameResource
class RelatedIprResource(ModelResource):
source = ToOneField(IprDisclosureBaseResource, 'source')
target = ToOneField(IprDisclosureBaseResource, 'target')
relationship = ToOneField(DocRelationshipNameResource, 'relationship')
class Meta:
queryset = RelatedIpr.objects.all()
serializer = api.Serializer()
#resource_name = 'relatedipr'
filtering = {
"id": ALL,
"source": ALL_WITH_RELATIONS,
"target": ALL_WITH_RELATIONS,
"relationship": ALL_WITH_RELATIONS,
}
api.ipr.register(RelatedIprResource())
from ietf.person.resources import PersonResource
from ietf.name.resources import IprDisclosureStateNameResource
from ietf.doc.resources import DocAliasResource
class NonDocSpecificIprDisclosureResource(ModelResource):
by = ToOneField(PersonResource, 'by')
state = ToOneField(IprDisclosureStateNameResource, 'state')
iprdisclosurebase_ptr = ToOneField(IprDisclosureBaseResource, 'iprdisclosurebase_ptr')
docs = ToManyField(DocAliasResource, 'docs', null=True)
rel = ToManyField(IprDisclosureBaseResource, 'rel', null=True)
class Meta:
queryset = NonDocSpecificIprDisclosure.objects.all()
serializer = api.Serializer()
#resource_name = 'nondocspecificiprdisclosure'
filtering = {
"id": ALL,
"compliant": ALL,
"holder_legal_name": ALL,
"notes": ALL,
"other_designations": ALL,
"submitter_name": ALL,
"submitter_email": ALL,
"time": ALL,
"title": ALL,
"holder_contact_name": ALL,
"holder_contact_email": ALL,
"holder_contact_info": ALL,
"patent_info": ALL,
"has_patent_pending": ALL,
"statement": ALL,
"by": ALL_WITH_RELATIONS,
"state": ALL_WITH_RELATIONS,
"iprdisclosurebase_ptr": ALL_WITH_RELATIONS,
"docs": ALL_WITH_RELATIONS,
"rel": ALL_WITH_RELATIONS,
}
api.ipr.register(NonDocSpecificIprDisclosureResource())
from ietf.person.resources import PersonResource
from ietf.name.resources import IprDisclosureStateNameResource
from ietf.doc.resources import DocAliasResource
class GenericIprDisclosureResource(ModelResource):
by = ToOneField(PersonResource, 'by')
state = ToOneField(IprDisclosureStateNameResource, 'state')
iprdisclosurebase_ptr = ToOneField(IprDisclosureBaseResource, 'iprdisclosurebase_ptr')
docs = ToManyField(DocAliasResource, 'docs', null=True)
rel = ToManyField(IprDisclosureBaseResource, 'rel', null=True)
class Meta:
queryset = GenericIprDisclosure.objects.all()
serializer = api.Serializer()
#resource_name = 'genericiprdisclosure'
filtering = {
"id": ALL,
"compliant": ALL,
"holder_legal_name": ALL,
"notes": ALL,
"other_designations": ALL,
"submitter_name": ALL,
"submitter_email": ALL,
"time": ALL,
"title": ALL,
"holder_contact_name": ALL,
"holder_contact_email": ALL,
"holder_contact_info": ALL,
"statement": ALL,
"by": ALL_WITH_RELATIONS,
"state": ALL_WITH_RELATIONS,
"iprdisclosurebase_ptr": ALL_WITH_RELATIONS,
"docs": ALL_WITH_RELATIONS,
"rel": ALL_WITH_RELATIONS,
}
api.ipr.register(GenericIprDisclosureResource())
from ietf.person.resources import PersonResource
from ietf.message.resources import MessageResource
from ietf.name.resources import IprEventTypeNameResource
class IprEventResource(ModelResource):
type = ToOneField(IprEventTypeNameResource, 'type')
by = ToOneField(PersonResource, 'by')
disclosure = ToOneField(IprDisclosureBaseResource, 'disclosure')
message = ToOneField(MessageResource, 'message', null=True)
in_reply_to = ToOneField(MessageResource, 'in_reply_to', null=True)
class Meta:
queryset = IprEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'iprevent'
filtering = {
"id": ALL,
"time": ALL,
"desc": ALL,
"response_due": ALL,
"type": ALL_WITH_RELATIONS,
"by": ALL_WITH_RELATIONS,
"disclosure": ALL_WITH_RELATIONS,
"message": ALL_WITH_RELATIONS,
"in_reply_to": ALL_WITH_RELATIONS,
}
api.ipr.register(IprEventResource())
from ietf.person.resources import PersonResource
from ietf.message.resources import MessageResource
from ietf.name.resources import IprEventTypeNameResource
class LegacyMigrationIprEventResource(ModelResource):
type = ToOneField(IprEventTypeNameResource, 'type')
by = ToOneField(PersonResource, 'by')
disclosure = ToOneField(IprDisclosureBaseResource, 'disclosure')
message = ToOneField(MessageResource, 'message', null=True)
in_reply_to = ToOneField(MessageResource, 'in_reply_to', null=True)
iprevent_ptr = ToOneField(IprEventResource, 'iprevent_ptr')
class Meta:
queryset = LegacyMigrationIprEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'legacymigrationiprevent'
filtering = {
"id": ALL,
"time": ALL,
"desc": ALL,
"response_due": ALL,
"type": ALL_WITH_RELATIONS,
"by": ALL_WITH_RELATIONS,
"disclosure": ALL_WITH_RELATIONS,
"message": ALL_WITH_RELATIONS,
"in_reply_to": ALL_WITH_RELATIONS,
"iprevent_ptr": ALL_WITH_RELATIONS,
}
api.ipr.register(LegacyMigrationIprEventResource())
| 41.430108
| 100
| 0.630764
| 984
| 11,559
| 7.21748
| 0.111789
| 0.041397
| 0.094621
| 0.029147
| 0.752182
| 0.745001
| 0.730358
| 0.64503
| 0.637004
| 0.620952
| 0
| 0.001435
| 0.276668
| 11,559
| 278
| 101
| 41.579137
| 0.847985
| 0.037893
| 0
| 0.764706
| 1
| 0
| 0.140194
| 0.02233
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.109804
| 0
| 0.341176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
02c4971803bebc2586e32f0d6ab6c50f4780c46e
| 8,194
|
py
|
Python
|
place2planet/place2eeintersect.py
|
AndreaChlebikova/hacktober2018
|
edfcab5c6d6f11e269643c9180ffc0108b15e1e0
|
[
"Apache-2.0"
] | null | null | null |
place2planet/place2eeintersect.py
|
AndreaChlebikova/hacktober2018
|
edfcab5c6d6f11e269643c9180ffc0108b15e1e0
|
[
"Apache-2.0"
] | null | null | null |
place2planet/place2eeintersect.py
|
AndreaChlebikova/hacktober2018
|
edfcab5c6d6f11e269643c9180ffc0108b15e1e0
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import csv
import time
import ee
import json
import requests
import shapely
import shapely.geometry as geom
from requests.auth import HTTPBasicAuth
from planet.api.auth import find_api_key
from shapely.geometry import Point, box, Polygon, MultiPoint
os.chdir(os.path.dirname(os.path.realpath(__file__)))
path=os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, path)
try:
ee.Initialize()
except Exception as e:
print('Authenticate Earth Engine first and rerun program')
time.sleep(2)
os.system('earthengine authenticate')
src=os.path.dirname(os.path.realpath(__file__))
try:
api_key = find_api_key()
os.environ['PLANET_API_KEY'] = find_api_key()
except:
print('Failed to get Planet Key')
sys.exit()
l=[]
for items in os.listdir(src):
if items.endswith('.csv'):
input_file=csv.DictReader(open(os.path.join(src,items)))
for rows in input_file:
l.append(rows['id'])
def intersect(start,end,geometry,operator,output):
with open(output, 'wb') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=['type', 'id','#items'],
delimiter=',')
writer.writeheader()
i=1
for items in l:
print('Processing '+str(i)+' of '+str(len(l)))
i=i+1
try:
typ = ee.data.getInfo(items)['type']
aoi_geom = ee.Geometry.Polygon(geometry)
boundbox = aoi_geom.bounds()
except Exception as e:
print(e)
if str(typ) == 'Image' and operator == 'bb':
try:
userCollection = \
ee.ImageCollection([items]).filterBounds(boundbox).filterDate(start,
end)
length = userCollection.size().getInfo()
if int(length) == 0:
pass
#print 'Geometry does not intersect collection '+str(items)
else:
#print 'Total images in filtered collection: '+str(items) +' of size '+ str(length)
with open(output, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
lineterminator='\n')
writer.writerow([str(typ),str(items),str(length)])
csvfile.close()
except Exception as e:
print('Check on Collection failed becaused '+str(e))
elif str(typ) == 'Image' and operator == None:
try:
userCollection =ee.ImageCollection([items]).filterBounds(aoi_geom).filterDate(start,
end)
length = userCollection.size().getInfo()
if int(length) == 0:
pass
#print 'Geometry does not intersect collection '+str(items)
else:
#print 'Total images in filtered collection: '+str(items) +' of size '+ str(length)
with open(output, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
lineterminator='\n')
writer.writerow([str(typ),str(items),str(length)])
csvfile.close()
except Exception as e:
print('Check on Collection failed becaused '+str(e))
elif typ == 'ImageCollection' and operator == 'bb':
try:
userCollection = \
ee.ImageCollection(items).filterBounds(boundbox).filterDate(start,
end)
length = userCollection.size().getInfo()
if int(length) == 0:
pass
#print 'Geometry does not intersect collection '+str(items)
else:
#print 'Total images in filtered collection: '+str(items) +' of size '+ str(length)
with open(output, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
lineterminator='\n')
writer.writerow([str(typ),str(items),str(length)])
csvfile.close()
except Exception as e:
print('Check on Collection failed becaused '+str(e))
elif typ == 'ImageCollection' and operator == None:
try:
userCollection = \
ee.ImageCollection(items).filterBounds(aoi_geom).filterDate(start,
end)
length = userCollection.size().getInfo()
if int(length) == 0:
pass
#print 'Geometry does not intersect collection '+str(items)
else:
#print 'Total images in filtered collection: '+str(items) +' of size '+ str(length)
with open(output, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
lineterminator='\n')
writer.writerow([str(typ),str(items),str(length)])
csvfile.close()
except Exception as e:
print('Check on Collection failed becaused '+str(e))
print('')
print('Report with Intersects Exported to '+str(output))
def eefp(place,local,op,start,end):
if (',') in place:
place.split(',')
real=''.join(place)
r=requests.get('https://nominatim.openstreetmap.org/search?q='+real+'&format=jsonv2')
response=r.json()
for things in response:
try:
if len(response)>1 and things['importance']>=0.7:
lat=things['lat']
lon=things['lon']
center=Point(float(lon),float(lat)).buffer(0.11)
poly=center.simplify(4)
features = shapely.geometry.mapping(poly)
json_string=json.dumps(features)
geom=json.loads(json_string)
geom=geom['coordinates']
intersect(geometry=geom,operator=op,output=local,start=start,end=end)
else:
lat=things['lat']
lon=things['lon']
center=Point(float(lon),float(lat)).buffer(0.11)
poly=center.simplify(4)
features = shapely.geometry.mapping(poly)
json_string=json.dumps(features)
geom=json.loads(json_string)
geom=geom['coordinates']
intersect(geometry=geom,operator=op,output=local,start=start,end=end)
except Exception as e:
print('Issue Getting Geometry'+str(e))
else:
r=requests.get('https://nominatim.openstreetmap.org/search?q='+place+'&format=jsonv2')
response=r.json()
for things in response:
try:
if len(response)>1 and things['importance']>=0.7:
lat=things['lat']
lon=things['lon']
center=Point(float(lon),float(lat)).buffer(0.11)
poly=center.simplify(4)
features = json.dumps(shapely.geometry.mapping(poly))
json_string=json.dumps(features)
geom=json.loads(json_string)['coordinates']
intersect(geometry=geom,operator=op,output=local,start=start,end=end)
elif things['importance']>=0.7:
lat=things['lat']
lon=things['lon']
center=Point(float(lon),float(lat)).buffer(0.11)
poly=center.simplify(4)
features = json.dumps(shapely.geometry.mapping(poly))
json_string=json.dumps(features)
geom=json.loads(json_string)['coordinates']
intersect(geometry=geom,operator=op,output=local,start=start,end=end)
except Exception as e:
print('Issue Getting Geometry'+str(e))
# fp(place="Raleigh,NC",
# item='PSScene4Band',
# local=r"C:\planet_demo\bangalore_fp.geojson",
# start='2018-10-01',
# end='2018-10-15')
| 43.585106
| 103
| 0.528313
| 862
| 8,194
| 4.979118
| 0.200696
| 0.022367
| 0.031687
| 0.033551
| 0.763048
| 0.739748
| 0.739748
| 0.71808
| 0.71808
| 0.695247
| 0
| 0.009605
| 0.351965
| 8,194
| 187
| 104
| 43.818182
| 0.798682
| 0.08494
| 0
| 0.639053
| 0
| 0
| 0.087108
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011834
| false
| 0.023669
| 0.088757
| 0
| 0.100592
| 0.071006
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f30d100f54ebb582491e9985e1f9cb26919ceb88
| 185
|
py
|
Python
|
profiles/filters/quota_filter.py
|
LaudateCorpus1/squest
|
98304f20c1d966fb3678d348ffd7c5be438bb6be
|
[
"Apache-2.0"
] | null | null | null |
profiles/filters/quota_filter.py
|
LaudateCorpus1/squest
|
98304f20c1d966fb3678d348ffd7c5be438bb6be
|
[
"Apache-2.0"
] | null | null | null |
profiles/filters/quota_filter.py
|
LaudateCorpus1/squest
|
98304f20c1d966fb3678d348ffd7c5be438bb6be
|
[
"Apache-2.0"
] | 1
|
2022-03-24T03:37:12.000Z
|
2022-03-24T03:37:12.000Z
|
from profiles.models import Quota
from Squest.utils.squest_filter import SquestFilter
class QuotaFilter(SquestFilter):
class Meta:
model = Quota
fields = ['name']
| 20.555556
| 51
| 0.713514
| 21
| 185
| 6.238095
| 0.714286
| 0.259542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216216
| 185
| 8
| 52
| 23.125
| 0.903448
| 0
| 0
| 0
| 0
| 0
| 0.021622
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b82b3b005e581fdbafd06fe5a54419cc97ce285b
| 63
|
py
|
Python
|
early_projects/exercise3.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
early_projects/exercise3.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
early_projects/exercise3.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
jobb = 12340
print(10000 + 2345)
print(12345)
print(jobb - -5)
| 12.6
| 19
| 0.68254
| 10
| 63
| 4.3
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.377358
| 0.15873
| 63
| 4
| 20
| 15.75
| 0.433962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.75
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b84fd2df8535b81e9c6413ca43d50950ea00e8d6
| 85
|
py
|
Python
|
azmessaging/telegram/__init__.py
|
ali-zahedi/az-messaging
|
ecc626e6be3f58a9ec166923623c144c86d2734e
|
[
"MIT"
] | null | null | null |
azmessaging/telegram/__init__.py
|
ali-zahedi/az-messaging
|
ecc626e6be3f58a9ec166923623c144c86d2734e
|
[
"MIT"
] | null | null | null |
azmessaging/telegram/__init__.py
|
ali-zahedi/az-messaging
|
ecc626e6be3f58a9ec166923623c144c86d2734e
|
[
"MIT"
] | null | null | null |
from .telegramabstract import TelegramApi
from .default import TELEGRAMAPIDefaultAPI
| 28.333333
| 42
| 0.882353
| 8
| 85
| 9.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094118
| 85
| 2
| 43
| 42.5
| 0.974026
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b85da02a3eb904619d68e25de483a376bba889e0
| 231
|
py
|
Python
|
justredis/errors.py
|
illuminatedwax/justredis
|
8e692d0983de5809964d4e4e361447c5fa400e88
|
[
"MIT"
] | 45
|
2020-02-28T17:43:10.000Z
|
2022-03-08T09:20:34.000Z
|
justredis/errors.py
|
illuminatedwax/justredis
|
8e692d0983de5809964d4e4e361447c5fa400e88
|
[
"MIT"
] | 4
|
2020-02-29T00:05:04.000Z
|
2022-01-24T20:39:51.000Z
|
justredis/errors.py
|
illuminatedwax/justredis
|
8e692d0983de5809964d4e4e361447c5fa400e88
|
[
"MIT"
] | 4
|
2020-06-08T19:50:48.000Z
|
2022-02-23T16:33:09.000Z
|
class RedisError(Exception):
pass
class CommunicationError(RedisError):
pass
class ConnectionPoolError(RedisError):
pass
class ProtocolError(RedisError):
pass
class PipelinedExceptions(RedisError):
pass
| 12.157895
| 38
| 0.748918
| 20
| 231
| 8.65
| 0.4
| 0.208092
| 0.32948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186147
| 231
| 18
| 39
| 12.833333
| 0.920213
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
b8a0b9fba6babe9a876f4d0071eb379a21bca9c6
| 284
|
py
|
Python
|
ramda/greater_test.py
|
Rafi993/pyramda
|
4fa7fe28d5eaa798b702d28bdd3948515cb88f48
|
[
"MIT"
] | 56
|
2018-08-06T08:44:58.000Z
|
2022-03-17T09:49:03.000Z
|
ramda/greater_test.py
|
Rafi993/pyramda
|
4fa7fe28d5eaa798b702d28bdd3948515cb88f48
|
[
"MIT"
] | 28
|
2019-06-17T11:09:52.000Z
|
2022-02-18T16:59:21.000Z
|
ramda/greater_test.py
|
slavaGanzin/pyramda
|
4fa7fe28d5eaa798b702d28bdd3948515cb88f48
|
[
"MIT"
] | 5
|
2019-09-18T09:24:38.000Z
|
2021-07-21T08:40:23.000Z
|
from .greater import greater
from ramda.private.asserts import assert_equal
def greater_nocurry_test():
assert_equal(greater(5, 3), 5)
assert_equal(greater(5, 7), 7)
def greater_curry_test():
min5 = greater(5)
assert_equal(min5(3), 5)
assert_equal(min5(7), 7)
| 20.285714
| 46
| 0.707746
| 44
| 284
| 4.363636
| 0.363636
| 0.286458
| 0.1875
| 0.197917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059574
| 0.172535
| 284
| 13
| 47
| 21.846154
| 0.757447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.555556
| 1
| 0.222222
| false
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b8a23fcff10fee686785b688b8d2e8754e097815
| 94
|
py
|
Python
|
settings.py
|
yeshas1994/carla-dataset-runner
|
c781b9d2b5cd748d062f775b65a86b5d569c8e64
|
[
"MIT"
] | null | null | null |
settings.py
|
yeshas1994/carla-dataset-runner
|
c781b9d2b5cd748d062f775b65a86b5d569c8e64
|
[
"MIT"
] | null | null | null |
settings.py
|
yeshas1994/carla-dataset-runner
|
c781b9d2b5cd748d062f775b65a86b5d569c8e64
|
[
"MIT"
] | null | null | null |
CARLA_EGG_PATH = "/home/colab/carla/PythonAPI/carla/dist/carla-0.9.10-py3.7-linux-x86_64.egg"
| 47
| 93
| 0.776596
| 19
| 94
| 3.684211
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10989
| 0.031915
| 94
| 1
| 94
| 94
| 0.659341
| 0
| 0
| 0
| 0
| 1
| 0.787234
| 0.787234
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b233fa62f628fd281e46f078649b1af7d45fba50
| 59
|
py
|
Python
|
example/other_module.py
|
devoxel/repro-pdoc-behaviour
|
8eee038a47ad4a078e8382952445a9b83dd97d06
|
[
"MIT"
] | null | null | null |
example/other_module.py
|
devoxel/repro-pdoc-behaviour
|
8eee038a47ad4a078e8382952445a9b83dd97d06
|
[
"MIT"
] | null | null | null |
example/other_module.py
|
devoxel/repro-pdoc-behaviour
|
8eee038a47ad4a078e8382952445a9b83dd97d06
|
[
"MIT"
] | null | null | null |
def foo():
"""
# Foo!
"""
return 'foo'*10
| 8.428571
| 19
| 0.338983
| 6
| 59
| 3.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.423729
| 59
| 6
| 20
| 9.833333
| 0.529412
| 0.101695
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
b28a8d3f9ee606fd4c80db12aeb66a4c21ebd8ef
| 67
|
py
|
Python
|
packages/vaex-astro/vaex/astro/_version.py
|
rishi1111/vaex
|
b3516201d04e9277b8918dadab9df33a7c83c01a
|
[
"MIT"
] | null | null | null |
packages/vaex-astro/vaex/astro/_version.py
|
rishi1111/vaex
|
b3516201d04e9277b8918dadab9df33a7c83c01a
|
[
"MIT"
] | null | null | null |
packages/vaex-astro/vaex/astro/_version.py
|
rishi1111/vaex
|
b3516201d04e9277b8918dadab9df33a7c83c01a
|
[
"MIT"
] | null | null | null |
__version_tuple__ = (0, 8, 0, 'dev.0')
__version__ = '0.8.0-dev.0'
| 22.333333
| 38
| 0.626866
| 13
| 67
| 2.538462
| 0.384615
| 0.121212
| 0.181818
| 0.363636
| 0.424242
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 0.134328
| 67
| 2
| 39
| 33.5
| 0.431034
| 0
| 0
| 0
| 0
| 0
| 0.238806
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b2906ffa2a6bcf0686663493c08406b70af9ee90
| 19
|
py
|
Python
|
src/wstool/__version__.py
|
romayiii/wstool
|
e163c10384de0f71d19fce9d67ce7a079c2fcfcd
|
[
"BSD-3-Clause"
] | 35
|
2015-02-18T17:59:03.000Z
|
2020-05-28T09:34:18.000Z
|
src/wstool/__version__.py
|
romayiii/wstool
|
e163c10384de0f71d19fce9d67ce7a079c2fcfcd
|
[
"BSD-3-Clause"
] | 119
|
2015-02-04T20:54:53.000Z
|
2020-06-25T15:30:36.000Z
|
src/wstool/__version__.py
|
romayiii/wstool
|
e163c10384de0f71d19fce9d67ce7a079c2fcfcd
|
[
"BSD-3-Clause"
] | 33
|
2015-06-20T14:52:16.000Z
|
2020-07-29T08:16:12.000Z
|
version = '0.1.18'
| 9.5
| 18
| 0.578947
| 4
| 19
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.157895
| 19
| 1
| 19
| 19
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b29e1f411da30463fea8fe485ff929b31715671d
| 13,549
|
py
|
Python
|
tests/test_cap.py
|
kysolvik/WeatherAlerts
|
5f782894d0b9268aa6ccf7de5c545507ce347aa8
|
[
"MIT"
] | 25
|
2015-03-15T09:28:16.000Z
|
2021-05-13T22:23:51.000Z
|
tests/test_cap.py
|
kysolvik/WeatherAlerts
|
5f782894d0b9268aa6ccf7de5c545507ce347aa8
|
[
"MIT"
] | 7
|
2016-10-14T15:41:56.000Z
|
2021-07-04T23:29:42.000Z
|
tests/test_cap.py
|
kysolvik/WeatherAlerts
|
5f782894d0b9268aa6ccf7de5c545507ce347aa8
|
[
"MIT"
] | 18
|
2015-02-05T01:53:12.000Z
|
2020-10-12T11:28:26.000Z
|
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import unittest
from weatheralerts.cap import CapParser
# some sample cap xml
rc = """<?xml version = '1.0' encoding = 'UTF-8' standalone = 'yes'?>
<feed xmlns = 'http://www.w3.org/2005/Atom'
xmlns:cap = 'urn:oasis:names:tc:emergency:cap:1.1'
xmlns:ha = 'http://www.alerting.net/namespace/index_1.0'>
<id>http://alerts.weather.gov/cap/id.atom</id>
<logo>http://alerts.weather.gov/images/xml_logo.gif</logo>
<generator>NWS CAP Server</generator>
<updated>2013-01-26T13:13:00-07:00</updated>
<author>
<name>w-nws.webmaster@noaa.gov</name>
</author>
<title>Current Watches, Warnings and Advisories for Idaho Issued by the National Weather Service</title>
<link href='http://alerts.weather.gov/cap/id.atom'/>
<entry>
<id>http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE90581D4.DenseFogAdvisory.124EE911B1C0ID.BOINPWBOI.94f563b5486fa14b27263b9f27b03205</id>
<updated>2013-01-26T13:13:00-07:00</updated>
<published>2013-01-26T13:13:00-07:00</published>
<author>
<name>w-nws.webmaster@noaa.gov</name>
</author>
<title>Dense Fog Advisory issued January 26 at 1:13PM MST until January 26 at 1:00PM MST by NWS</title>
<link href="http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE90581D4.DenseFogAdvisory.124EE911B1C0ID.BOINPWBOI.94f563b5486fa14b27263b9f27b03205"/>
<summary>...DENSE FOG CONTINUES IN THE IDAHO TREASURE VALLEY... .DENSE FOG CONTINUES IN THE IDAHO PORTION OF THE LOWER TREASURE VALLEY WHERE VISIBILITY HAS BEEN LESS THAN 200 YARDS...ESPECIALLY BETWEEN CALDWELL AND BOISE. VISIBILITY SHOULD IMPROVE AS A COLD FRONT COMES THROUGH THE AREA LATE TODAY. ...DENSE FOG ADVISORY IN EFFECT UNTIL 5 PM MST THIS AFTERNOON...</summary>
<cap:event>Dense Fog Advisory</cap:event>
<cap:effective>2013-01-26T13:13:00-07:00</cap:effective>
<cap:expires>2013-01-26T17:00:00-07:00</cap:expires>
<cap:status>Actual</cap:status>
<cap:msgType>Alert</cap:msgType>
<cap:category>Met</cap:category>
<cap:urgency>Expected</cap:urgency>
<cap:severity>Minor</cap:severity>
<cap:certainty>Likely</cap:certainty>
<cap:areaDesc>Lower Treasure Valley</cap:areaDesc>
<cap:polygon></cap:polygon>
<cap:geocode>
<valueName>FIPS6</valueName>
<value>016027 016045 016073 016075 016087</value>
<valueName>UGC</valueName>
<value>IDZ012</value>
</cap:geocode>
<cap:parameter>
<valueName>VTEC</valueName>
<value>/O.EXP.KBOI.FG.Y.0008.000000T0000Z-130126T2000Z/
/O.NEW.KBOI.FG.Y.0009.130126T2013Z-130127T0000Z/</value>
</cap:parameter>
</entry>
<entry>
<id>http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE90581D4.DenseFogAdvisory.124EE911B1C0ID.BOINPWBOI.12dfa8dd949036098cfca451659153b7</id>
<updated>2013-01-26T13:13:00-07:00</updated>
<published>2013-01-26T13:13:00-07:00</published>
<author>
<name>w-nws.webmaster@noaa.gov</name>
</author>
<title>Dense Fog Advisory issued January 26 at 1:13PM MST until January 26 at 5:00PM MST by NWS</title>
<link href="http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE90581D4.DenseFogAdvisory.124EE911B1C0ID.BOINPWBOI.12dfa8dd949036098cfca451659153b7"/>
<summary>...DENSE FOG CONTINUES IN THE IDAHO TREASURE VALLEY... .DENSE FOG CONTINUES IN THE IDAHO PORTION OF THE LOWER TREASURE VALLEY WHERE VISIBILITY HAS BEEN LESS THAN 200 YARDS...ESPECIALLY BETWEEN CALDWELL AND BOISE. VISIBILITY SHOULD IMPROVE AS A COLD FRONT COMES THROUGH THE AREA LATE TODAY. ...DENSE FOG ADVISORY IN EFFECT UNTIL 5 PM MST THIS AFTERNOON...</summary>
<cap:event>Dense Fog Advisory</cap:event>
<cap:effective>2013-01-26T13:13:00-07:00</cap:effective>
<cap:expires>2013-01-26T17:00:00-07:00</cap:expires>
<cap:status>Actual</cap:status>
<cap:msgType>Alert</cap:msgType>
<cap:category>Met</cap:category>
<cap:urgency>Expected</cap:urgency>
<cap:severity>Minor</cap:severity>
<cap:certainty>Likely</cap:certainty>
<cap:areaDesc>Upper Treasure Valley</cap:areaDesc>
<cap:polygon></cap:polygon>
<cap:geocode>
<valueName>FIPS6</valueName>
<value>016001 016039 016073</value>
<valueName>UGC</valueName>
<value>IDZ014</value>
</cap:geocode>
<cap:parameter>
<valueName>VTEC</valueName>
<value>/O.NEW.KBOI.FG.Y.0009.130126T2013Z-130127T0000Z/</value>
</cap:parameter>
</entry>
<entry>
<id>http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE9056A64.FloodWatch.124EE920F400ID.MSOFFAMSO.738619c9bd434bd0bfbf7001349f0197</id>
<updated>2013-01-26T12:53:00-07:00</updated>
<published>2013-01-26T12:53:00-07:00</published>
<author>
<name>w-nws.webmaster@noaa.gov</name>
</author>
<title>Flood Watch issued January 26 at 12:53PM MST until January 27 at 5:00PM MST by NWS</title>
<link href="http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE9056A64.FloodWatch.124EE920F400ID.MSOFFAMSO.738619c9bd434bd0bfbf7001349f0197"/>
<summary>...FLOOD WATCH NOW IN EFFECT THROUGH SUNDAY AFTERNOON... THE FLOOD WATCH IS NOW IN EFFECT FOR * A PORTION OF NORTH CENTRAL IDAHO...INCLUDING THE FOLLOWING COUNTY...LEMHI. * THROUGH SUNDAY AFTERNOON * TEMPERATURES IN THE SALMON AND LEMHI VALLEYS HAVE WARMED</summary>
<cap:event>Flood Watch</cap:event>
<cap:effective>2013-01-26T12:53:00-07:00</cap:effective>
<cap:expires>2013-01-27T17:00:00-07:00</cap:expires>
<cap:status>Actual</cap:status>
<cap:msgType>Alert</cap:msgType>
<cap:category>Met</cap:category>
<cap:urgency>Future</cap:urgency>
<cap:severity>Moderate</cap:severity>
<cap:certainty>Possible</cap:certainty>
<cap:areaDesc>Lemhi</cap:areaDesc>
<cap:polygon></cap:polygon>
<cap:geocode>
<valueName>FIPS6</valueName>
<value>016059</value>
<valueName>UGC</valueName>
<value>IDC059</value>
</cap:geocode>
<cap:parameter>
<valueName>VTEC</valueName>
<value>/O.EXT.KMSO.FA.A.0002.000000T0000Z-130128T0000Z/
/00000.0.IJ.000000T0000Z.000000T0000Z.000000T0000Z.OO/</value>
</cap:parameter>
</entry>
<entry>
<id>http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE9053A58.WinterWeatherAdvisory.124EE905CAE0ID.MSOWSWMSO.67d47e7b54dd47ad8d5910a7bbb339d1</id>
<updated>2013-01-26T11:30:00-07:00</updated>
<published>2013-01-26T11:30:00-07:00</published>
<author>
<name>w-nws.webmaster@noaa.gov</name>
</author>
<title>Winter Weather Advisory issued January 26 at 11:30AM MST until January 26 at 3:00PM MST by NWS</title>
<link href="http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE9053A58.WinterWeatherAdvisory.124EE905CAE0ID.MSOWSWMSO.67d47e7b54dd47ad8d5910a7bbb339d1"/>
<summary>...WINTER WEATHER ADVISORY REMAINS IN EFFECT UNTIL 2 PM PST THIS AFTERNOON ABOVE 4500 FEET... A WINTER WEATHER ADVISORY ABOVE 4500 FEET REMAINS IN EFFECT UNTIL 2 PM PST THIS AFTERNOON. * IMPACTS/TIMING: SNOW WILL ACCUMULATE ABOVE 4500 FEET THROUGH THIS AFTERNOON. EXPECT SLICK ROADWAYS ON HIGHWAY 12 OVER LOLO</summary>
<cap:event>Winter Weather Advisory</cap:event>
<cap:effective>2013-01-26T11:30:00-07:00</cap:effective>
<cap:expires>2013-01-26T15:00:00-07:00</cap:expires>
<cap:status>Actual</cap:status>
<cap:msgType>Alert</cap:msgType>
<cap:category>Met</cap:category>
<cap:urgency>Expected</cap:urgency>
<cap:severity>Minor</cap:severity>
<cap:certainty>Likely</cap:certainty>
<cap:areaDesc>Northern Clearwater Mountains; Southern Clearwater Mountains</cap:areaDesc>
<cap:polygon></cap:polygon>
<cap:geocode>
<valueName>FIPS6</valueName>
<value>016035 016049</value>
<valueName>UGC</valueName>
<value>IDZ005 IDZ006</value>
</cap:geocode>
<cap:parameter>
<valueName>VTEC</valueName>
<value>/O.CON.KMSO.WW.Y.0008.000000T0000Z-130126T2200Z/</value>
</cap:parameter>
</entry>
<entry>
<id>http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE9053A58.WinterWeatherAdvisory.124EE905CAE0ID.MSOWSWMSO.8267d50e98191686ab5e8d4821939726</id>
<updated>2013-01-26T11:30:00-07:00</updated>
<published>2013-01-26T11:30:00-07:00</published>
<author>
<name>w-nws.webmaster@noaa.gov</name>
</author>
<title>Winter Weather Advisory issued January 26 at 11:30AM MST until January 27 at 11:00AM MST by NWS</title>
<link href="http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE9053A58.WinterWeatherAdvisory.124EE905CAE0ID.MSOWSWMSO.8267d50e98191686ab5e8d4821939726"/>
<summary>...WINTER WEATHER ADVISORY IN EFFECT FROM 2 PM THIS AFTERNOON TO 11 AM MST SUNDAY... THE NATIONAL WEATHER SERVICE IN MISSOULA HAS ISSUED A WINTER WEATHER ADVISORY FOR SNOW...WHICH IS IN EFFECT FROM 2 PM THIS AFTERNOON TO 11 AM MST SUNDAY. * IMPACTS/TIMING: SNOW...MODERATE AT TIMES...WILL DEVELOP</summary>
<cap:event>Winter Weather Advisory</cap:event>
<cap:effective>2013-01-26T11:30:00-07:00</cap:effective>
<cap:expires>2013-01-26T15:00:00-07:00</cap:expires>
<cap:status>Actual</cap:status>
<cap:msgType>Alert</cap:msgType>
<cap:category>Met</cap:category>
<cap:urgency>Expected</cap:urgency>
<cap:severity>Minor</cap:severity>
<cap:certainty>Likely</cap:certainty>
<cap:areaDesc>Eastern Lemhi County; Western Lemhi County</cap:areaDesc>
<cap:polygon></cap:polygon>
<cap:geocode>
<valueName>UGC</valueName>
<value>IDZ009 IDZ010</value>
</cap:geocode>
<cap:parameter>
<valueName>VTEC</valueName>
<value>/O.EXB.KMSO.WW.Y.0008.130126T2100Z-130127T1800Z/</value>
</cap:parameter>
</entry>
<entry>
<id>http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE90427BC.SpecialWeatherStatement.124EE905CAE0ID.PIHSPSPIH.db949cd29dd427fabcbb7cdbafe8d16a</id>
<updated>2013-01-26T04:27:00-07:00</updated>
<published>2013-01-26T04:27:00-07:00</published>
<author>
<name>w-nws.webmaster@noaa.gov</name>
</author>
<title>Special Weather Statement issued January 26 at 4:27AM MST by NWS</title>
<link href="http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE90427BC.SpecialWeatherStatement.124EE905CAE0ID.PIHSPSPIH.db949cd29dd427fabcbb7cdbafe8d16a"/>
<summary>...WIDESPREAD SNOW AND COLDER WEATHER RETURNS STARTING TONIGHT... A STORM SYSTEM WILL BEGIN AFFECTING PORTIONS OF THE CENTRAL MOUNTAINS ALONG WITH THE MAGIC VALLEY AND SOUTHERN HIGHLANDS THIS EVENING AND TONIGHT. DURING THE DAY TOMORROW...THE FOCUS QUICKLY SHIFTS TO THE SOUTHERN AND EASTERN MOUNTAINS...AS WELL AS ALONG THE INTERSTATE CORRIDORS. A COLD FRONT SWEEPS THROUGH...HELPING TO</summary>
<cap:event>Special Weather Statement</cap:event>
<cap:effective>2013-01-26T04:27:00-07:00</cap:effective>
<cap:expires>2013-01-26T15:00:00-07:00</cap:expires>
<cap:status>Actual</cap:status>
<cap:msgType>Alert</cap:msgType>
<cap:category>Met</cap:category>
<cap:urgency>Expected</cap:urgency>
<cap:severity>Minor</cap:severity>
<cap:certainty>Observed</cap:certainty>
<cap:areaDesc>Big and Little Wood River Region; Cache Valley, Idaho Portion; Caribou Highlands; Lost River, Pashimeroi; Sawtooth Mountains; South Central Highlands; Wasatch Mountains, Idaho Portion</cap:areaDesc>
<cap:polygon></cap:polygon>
<cap:geocode>
<valueName>FIPS6</valueName>
<value>016005 016007 016011 016013 016019 016023 016029 016031 016037 016041 016071 016077</value>
<valueName>UGC</valueName>
<value>IDZ018 IDZ022 IDZ023 IDZ024 IDZ025 IDZ031 IDZ032</value>
</cap:geocode>
<cap:parameter>
<valueName>VTEC</valueName>
<value></value>
</cap:parameter>
</entry>
<entry>
<id>http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE8F5A9F8.AirQualityAlert.124EE9142E78ID.MSOAQAMSO.738619c9bd434bd0bfbf7001349f0197</id>
<updated>2013-01-25T09:30:00-07:00</updated>
<published>2013-01-25T09:30:00-07:00</published>
<author>
<name>w-nws.webmaster@noaa.gov</name>
</author>
<title>Air Quality Alert issued January 25 at 9:30AM MST by NWS</title>
<link href="http://alerts.weather.gov/cap/wwacapget.php?x=ID124EE8F5A9F8.AirQualityAlert.124EE9142E78ID.MSOAQAMSO.738619c9bd434bd0bfbf7001349f0197"/>
<summary>...AN AIR QUALITY ADVISORY HAS BEEN ISSUED BY THE IDAHO DEPARTMENT OF ENVIRONMENTAL QUALITY... DUE TO SMOKE FROM WOOD BURNING FOR HOME HEATING...THE AIR QUALITY HAS BECOME UNHEALTHY FOR SENSITIVE GROUPS IN LEMHI COUNTY OF IDAHO INCLUDING THE CITIES OF SALMON. THIS ADVISORY WILL REMAIN IN EFFECT UNTIL AIR QUALITY HAS SIGNIFICANTLY IMPROVED.</summary>
<cap:event>Air Quality Alert</cap:event>
<cap:effective>2013-01-25T09:30:00-07:00</cap:effective>
<cap:expires>2013-01-27T09:30:00-07:00</cap:expires>
<cap:status>Actual</cap:status>
<cap:msgType>Alert</cap:msgType>
<cap:category>Met</cap:category>
<cap:urgency>Unknown</cap:urgency>
<cap:severity>Unknown</cap:severity>
<cap:certainty>Unknown</cap:certainty>
<cap:areaDesc>Lemhi</cap:areaDesc>
<cap:polygon></cap:polygon>
<cap:geocode>
<valueName>FIPS6</valueName>
<value>016059</value>
<valueName>UGC</valueName>
<value>IDC059</value>
</cap:geocode>
<cap:parameter>
<valueName>VTEC</valueName>
<value></value>
</cap:parameter>
</entry>
</feed>"""
# a sample empty cap feed
empty_cap = """<?xml version = '1.0' encoding = 'UTF-8' standalone = 'yes'?>
<id>http://alerts.weather.gov/cap/or.atom</id>
<logo>http://alerts.weather.gov/images/xml_logo.gif</logo>
<generator>NWS CAP Server</generator>
<updated>2013-01-26T20:58:01+00:00</updated>
<author>
<name>w-nws.webmaster@noaa.gov</name>
</author>
<title>Current Watches, Warnings and Advisories for Oregon Issued by the National Weather Service</title>
<link href='http://alerts.weather.gov/cap/or.atom'/>
<entry>
<id>http://alerts.weather.gov/cap/or.atom</id>
<updated>2013-01-26T20:58:01+00:00</updated>
<author>
<name>w-nws.webmaster@noaa.gov</name>
</author>
<title>There are no active watches, warnings or advisories</title>
<link href='http://alerts.weather.gov/cap/or.atom'/>
<summary>There are no active watches, warnings or advisories</summary>
</entry>
</feed>"""
class Test_Cap(unittest.TestCase):
def setUp(self):
pass
def test_parser(self):
c = CapParser(raw_cap=rc, geo=None)
c.get_alerts()
def test_empty_feed(self):
c = CapParser(empty_cap)
c.get_alerts()
if __name__ == '__main__':
unittest.main()
| 44.864238
| 406
| 0.771422
| 1,996
| 13,549
| 5.226453
| 0.198397
| 0.01783
| 0.016679
| 0.042178
| 0.74444
| 0.72824
| 0.722297
| 0.705426
| 0.693443
| 0.664015
| 0
| 0.124801
| 0.07329
| 13,549
| 301
| 407
| 45.013289
| 0.706037
| 0.003174
| 0
| 0.661818
| 0
| 0.170909
| 0.966822
| 0.365252
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010909
| false
| 0.003636
| 0.014545
| 0
| 0.029091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a250664ff998f707d427ff64c94f2f8320c31c08
| 54
|
py
|
Python
|
develop.py
|
tanyongyao111/demo
|
814d1c0d730231ff589bd4beec9dc48f78f6d659
|
[
"MIT"
] | null | null | null |
develop.py
|
tanyongyao111/demo
|
814d1c0d730231ff589bd4beec9dc48f78f6d659
|
[
"MIT"
] | 7
|
2019-12-04T23:45:24.000Z
|
2022-02-10T08:23:10.000Z
|
develop.py
|
tanyongyao111/demo
|
814d1c0d730231ff589bd4beec9dc48f78f6d659
|
[
"MIT"
] | null | null | null |
dldldldldlldldldlllllllllllllllllllllllalx,,x,x,x,x,x
| 27
| 53
| 0.87037
| 6
| 54
| 7.833333
| 0.333333
| 0.170213
| 0.191489
| 0.170213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018519
| 54
| 1
| 54
| 54
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a2d02e6faa26aff43a9d36b39387526b85db9156
| 88
|
py
|
Python
|
CybORG/CybORG/Shared/Actions/ShellActionsFolder/OpenConnectionFolder/CredentialAccessFolder/__init__.py
|
rafvasq/cage-challenge-1
|
95affdfa38afc1124f1a1a09c92fbc0ed5b96318
|
[
"MIT"
] | 18
|
2021-08-20T15:07:55.000Z
|
2022-03-11T12:05:15.000Z
|
CybORG/CybORG/Shared/Actions/ShellActionsFolder/OpenConnectionFolder/CredentialAccessFolder/__init__.py
|
rafvasq/cage-challenge-1
|
95affdfa38afc1124f1a1a09c92fbc0ed5b96318
|
[
"MIT"
] | 7
|
2021-11-09T06:46:58.000Z
|
2022-03-31T12:35:06.000Z
|
CybORG/CybORG/Shared/Actions/ShellActionsFolder/OpenConnectionFolder/CredentialAccessFolder/__init__.py
|
rafvasq/cage-challenge-1
|
95affdfa38afc1124f1a1a09c92fbc0ed5b96318
|
[
"MIT"
] | 13
|
2021-08-17T00:26:31.000Z
|
2022-03-29T20:06:45.000Z
|
from .BruteForceAccessFolder import SSHHydraBruteForce
from .SSHAccess import SSHAccess
| 29.333333
| 54
| 0.886364
| 8
| 88
| 9.75
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 88
| 2
| 55
| 44
| 0.975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0c430eeb775bf9396c1e09ef213b4470ca3ac6fc
| 529
|
py
|
Python
|
lib/Document.py
|
josndan/urabn-fortnight
|
5f0413997bea4298f83e1029e26e8ea78a57ac65
|
[
"MIT"
] | null | null | null |
lib/Document.py
|
josndan/urabn-fortnight
|
5f0413997bea4298f83e1029e26e8ea78a57ac65
|
[
"MIT"
] | null | null | null |
lib/Document.py
|
josndan/urabn-fortnight
|
5f0413997bea4298f83e1029e26e8ea78a57ac65
|
[
"MIT"
] | null | null | null |
class Document:
def __init__(self, docId, playId, sceneId, sceneNum, text, length):
self.docId = docId
self.playId = playId
self.sceneId = sceneId
self.sceneNum = sceneNum
self.text = text
self.length = length
self.score = 0
def getId(self):
return self.docId
def getSceneId(self):
return self.sceneId
def getPlayId(self):
return self.playId
def __repr__(self):
return f"{self.score}:{self.docId} : {self.sceneId}\n"
| 529
| 529
| 0.595463
| 62
| 529
| 4.951613
| 0.322581
| 0.117264
| 0.136808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002717
| 0.304348
| 529
| 1
| 529
| 529
| 0.831522
| 0
| 0
| 0
| 0
| 0
| 0.083176
| 0.047259
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0
| 0
| 0.235294
| 0.588235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a7506fdc94f5819349f6e4dc8e76afc42cb3c09c
| 17
|
py
|
Python
|
parlai/tasks/__init__.py
|
tosingithub/parlai
|
250765a5dad6fc6d283a659fdbf6b03ec621c5db
|
[
"MIT"
] | null | null | null |
parlai/tasks/__init__.py
|
tosingithub/parlai
|
250765a5dad6fc6d283a659fdbf6b03ec621c5db
|
[
"MIT"
] | null | null | null |
parlai/tasks/__init__.py
|
tosingithub/parlai
|
250765a5dad6fc6d283a659fdbf6b03ec621c5db
|
[
"MIT"
] | null | null | null |
# Empty init file
| 17
| 17
| 0.764706
| 3
| 17
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 17
| 1
| 17
| 17
| 0.928571
| 0.882353
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a761e17f43e809422444acb87dfb280d603a3d67
| 17,748
|
py
|
Python
|
tests/views/administration/roles_test.py
|
BMeu/Aerarium
|
119946cead727ef68b5ecea339990d982c006391
|
[
"MIT"
] | null | null | null |
tests/views/administration/roles_test.py
|
BMeu/Aerarium
|
119946cead727ef68b5ecea339990d982c006391
|
[
"MIT"
] | 139
|
2018-12-26T07:54:31.000Z
|
2021-06-01T23:14:45.000Z
|
tests/views/administration/roles_test.py
|
BMeu/Aerarium
|
119946cead727ef68b5ecea339990d982c006391
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from app.userprofile import Permission
from app.userprofile import Role
from tests.views import ViewTestCase
class RolesTest(ViewTestCase):
def test_roles_list(self):
"""
Test the list of all roles.
Expected result: All roles that fit on the requested page are displayed, sorted by name.
"""
self.app.config['ITEMS_PER_PAGE'] = 2
# Add roles, but not sorted by name.
role_guest = self.create_role(name='Guest')
role_user = self.create_role(name='User')
role_admin = self.create_role(Permission.EditRole, name='Administrator')
roles_assorted = [
role_guest,
role_user,
role_admin,
]
# Ensure that they are not sorted by name on the DB.
# noinspection PyUnresolvedReferences
roles = Role.query.all()
self.assertListEqual(roles_assorted, roles)
# Add a user with permissions to view this page.
self.create_and_login_user(role=role_admin)
data = self.get('administration/roles')
title_role_admin = f'Edit role “{role_admin.name}”'
title_role_guest = f'Edit role “{role_guest.name}”'
title_role_user = f'Edit role “{role_user.name}”'
self.assertIn('Roles', data)
self.assertIn(title_role_admin, data)
self.assertIn(title_role_guest, data)
self.assertNotIn(title_role_user, data)
self.assertIn('Displaying roles 1 to 2 of 3', data)
# Test that the order of the admin and guest role match.
pos_of_admin = data.find(title_role_admin)
pos_of_guest = data.find(title_role_guest)
self.assertLess(pos_of_admin, pos_of_guest)
def test_role_header_get_no_role(self):
"""
Test editing a role that does not exist.
Expected result: An error 404 is returned.
"""
role = self.create_role(Permission.EditRole)
self.create_and_login_user(role=role)
non_existing_role = 'Guest'
self.assertIsNone(Role.load_from_name(non_existing_role))
self.get(f'/administration/role/{non_existing_role}', expected_status=404)
def test_role_new_get(self):
"""
Test showing the form to create a new role.
Expected result: The new-role page is shown.
"""
role = self.create_role(Permission.EditRole)
self.create_and_login_user(role=role)
data = self.get('/administration/role/new')
self.assertIn('Add a New Role', data)
self.assertNotIn('The new role has been created.', data)
def test_role_new_post_invalid_name(self):
"""
Test creating a new role with an invalid name.
Expected result: The new-role page is shown and no role has been created.
"""
role = self.create_role(Permission.EditRole)
self.create_and_login_user(role=role)
name = Role.invalid_names[0]
data = self.post('/administration/role/new', data=dict(
name=name
))
self.assertIn('Add a New Role', data)
self.assertNotIn('The new role has been created.', data)
self.assertIsNone(Role.load_from_name(name))
def test_role_new_post_success(self):
"""
Test creating a new role.
Expected result: The list of roles is shown and the new role has been created.
"""
role = self.create_role(Permission.EditRole, name='Administrator')
self.create_and_login_user(role=role)
name = 'Guest'
permissions = Permission.EditRole | Permission.EditGlobalSettings
data = self.post('/administration/role/new', data=dict(
name=name,
editrole=True,
editglobalsettings=True
))
self.assertIn('Roles', data)
self.assertNotIn('Add a New Role', data)
self.assertIn('The new role has been created.', data)
role = Role.load_from_name(name)
self.assertIsNotNone(role)
self.assertEqual(permissions, role.permissions)
def test_role_header_get_existing_role(self):
"""
Test showing the edit form for an existing role.
Expected result: The edit page is shown.
"""
role = self.create_role(Permission.EditRole, name='Administrator')
self.create_and_login_user(role=role)
data = self.get(f'/administration/role/{role.name}')
self.assertIn(f'Edit Role “{role.name}”', data)
self.assertIn('Edit the role\'s header data', data)
self.assertNotIn('View the users who have this role assigned to them', data)
self.assertNotIn('Permanently delete this role', data)
self.assertNotIn('Define the permissions which the users to whom this role is assigned will have.', data)
def test_role_header_post_header_data_existing_name(self):
"""
Test editing a role by setting an existing name.
Expected result: The edit page is shown, the role is not updated.
"""
existing_role = self.create_role(name='Guest')
name = 'Administrator'
role = self.create_role(Permission.EditRole, name=name)
self.create_and_login_user(role=role)
role_id = role.id
data = self.post(f'/administration/role/{role.name}', data=dict(
name=existing_role.name
))
role = Role.load_from_id(role_id)
self.assertIn(f'Edit Role “{name}”', data)
self.assertNotIn('The role has been updated.', data)
self.assertEqual(name, role.name)
self.assertIn('Edit the role\'s header data', data)
self.assertNotIn('View the users who have this role assigned to them', data)
self.assertNotIn('Permanently delete this role', data)
self.assertNotIn('Define the permissions which the users to whom this role is assigned will have.', data)
def test_role_header_post_header_data_new_name(self):
"""
Test editing a role by setting a new name.
Expected result: The edit page is shown, the role is updated.
"""
name = 'Administrator'
role = self.create_role(Permission.EditRole, name=name)
self.create_and_login_user(role=role)
new_name = 'Guest'
data = self.post(f'/administration/role/{name}', data=dict(
name=new_name
))
role = Role.load_from_id(role.id)
self.assertIn(f'Edit Role “{new_name}”', data)
self.assertIn('The role has been updated.', data)
self.assertEqual(new_name, role.name)
self.assertIn('Edit the role\'s header data', data)
self.assertNotIn('View the users who have this role assigned to them', data)
self.assertNotIn('Permanently delete this role', data)
self.assertNotIn('Define the permissions which the users to whom this role is assigned will have.', data)
def test_role_permissions_get_no_role(self):
"""
Test editing the permissions of a role that does not exist.
Expected result: An error 404 is returned.
"""
role = self.create_role(Permission.EditRole, name='Administrator')
self.create_and_login_user(role=role)
non_existing_role = 'Guest'
self.assertIsNone(Role.load_from_name(non_existing_role))
self.get(f'/administration/role/{non_existing_role}/permissions', expected_status=404)
def test_role_permissions_get(self):
"""
Test accessing the permissions page of a role.
Expected result: The permissions are listed.
"""
role = self.create_role(Permission.EditRole)
self.create_and_login_user(role=role)
data = self.get(f'/administration/role/{role.name}/permissions')
self.assertIn('<h1>Edit Role “', data)
self.assertIn('Define the permissions which the users to whom this role is assigned will have.', data)
self.assertNotIn('View the users who have this role assigned to them', data)
self.assertNotIn('Permanently delete this role', data)
self.assertNotIn('Edit the role\'s header data', data)
def test_role_permissions_post(self):
"""
Test updating the permissions of a role.
Expected result: The new permissions are set on the role.
"""
other_role = self.create_role(Permission.EditRole, name='Administrator')
role = self.create_role(Permission.EditRole, name='Moderator')
self.create_and_login_user(role=other_role)
new_permissions = Permission.EditRole | Permission.EditGlobalSettings
data = self.post(f'/administration/role/{role.name}/permissions', data=dict(
editglobalsettings=True,
editrole=True,
edituser=None,
))
role = Role.load_from_name(role.name)
self.assertEqual(new_permissions, role.permissions)
self.assertIn('<h1>Edit Role “', data)
self.assertIn('Define the permissions which the users to whom this role is assigned will have.', data)
# The apostrophe is escaped...
self.assertIn('The role's permissions have been updated.', data)
self.assertNotIn('View the users who have this role assigned to them', data)
self.assertNotIn('Permanently delete this role', data)
self.assertNotIn('Edit the role\'s header data', data)
def test_role_permissions_post_only_role_to_edit_roles(self):
"""
Test updating the permissions of a role that is the only role allowed to edit roles. Unset the permission
to edit roles.
Expected result: The new permissions are set on the role, but the role keeps the permission to edit roles.
"""
role = self.create_role(Permission.EditRole)
self.create_and_login_user(role=role)
self.assertTrue(role.is_only_role_allowed_to_edit_roles())
new_permissions = Permission.EditRole | Permission.EditGlobalSettings
data = self.post(f'/administration/role/{role.name}/permissions', data=dict(
editglobalsettings=True,
editrole=False,
edituser=None,
))
role = Role.load_from_name(role.name)
self.assertEqual(new_permissions, role.permissions)
self.assertIn('<h1>Edit Role “', data)
self.assertIn('Define the permissions which the users to whom this role is assigned will have.', data)
# The apostrophe is escaped...
self.assertIn('The role's permissions have been updated.', data)
self.assertNotIn('View the users who have this role assigned to them', data)
self.assertNotIn('Permanently delete this role', data)
self.assertNotIn('Edit the role\'s header data', data)
def test_role_users_get_no_role(self):
"""
Test listing the users of a role that does not exist.
Expected result: An error 404 is returned.
"""
role = self.create_role(Permission.EditRole, name='Administrator')
self.create_and_login_user(role=role)
non_existing_role = 'Guest'
self.assertIsNone(Role.load_from_name(non_existing_role))
self.get(f'/administration/role/{non_existing_role}/users', expected_status=404)
def test_role_users_get(self):
"""
Test accessing the user page of a role.
Expected result: The users are listed.
"""
name = 'Jane Doe'
role = self.create_role(Permission.EditRole)
self.create_and_login_user(name=name, role=role)
data = self.get(f'/administration/role/{role.name}/users')
self.assertIn('<h1>Edit Role “', data)
self.assertIn('View the users who have this role assigned to them', data)
self.assertNotIn('Permanently delete this role', data)
self.assertNotIn('Edit the role\'s header data', data)
self.assertNotIn('Define the permissions which the users to whom this role is assigned will have.', data)
self.assertIn(name, data)
def test_role_delete_get_no_role(self):
"""
Test deleting a role that does not exist.
Expected result: An error 404 is returned.
"""
role = self.create_role(Permission.EditRole, name='Administrator')
self.create_and_login_user(role=role)
non_existing_role = 'Guest'
self.assertIsNone(Role.load_from_name(non_existing_role))
self.get(f'/administration/role/{non_existing_role}/delete', expected_status=404)
def test_role_delete_get_only_allowed_to_edit_roles(self):
"""
Test accessing the delete page if the role is the only one allowed to edit roles.
Expected result: The role delete form is not shown.
"""
role = self.create_role(Permission.EditRole)
self.create_and_login_user(role=role)
role_id = role.id
data = self.get(f'/administration/role/{role.name}/delete')
role = Role.load_from_id(role_id)
self.assertIsNotNone(role)
self.assertIsNotNone(role.id)
self.assertIn('<h1>Edit Role “', data)
self.assertIn('This role cannot be deleted because it is the only one that can edit roles.', data)
self.assertNotIn('View the users who have this role assigned to them', data)
self.assertNotIn('Edit the role\'s header data', data)
self.assertNotIn('Define the permissions which the users to whom this role is assigned will have.', data)
self.assertNotIn('Permanently delete this role', data)
self.assertNotIn('The role has been deleted.', data)
def test_role_delete_get(self):
"""
Test accessing the delete page.
Expected result: The role delete form.
"""
other_role = self.create_role(Permission.EditRole, name='Administrator')
role = self.create_role(name='Guest')
self.create_and_login_user(role=other_role)
role_id = role.id
self.assertListEqual([], role.users.all())
data = self.get(f'/administration/role/{role.name}/delete')
role = Role.load_from_id(role_id)
self.assertIsNotNone(role)
self.assertIsNotNone(other_role.id)
self.assertIn('<h1>Edit Role “', data)
self.assertNotIn('View the users who have this role assigned to them', data)
self.assertNotIn('Edit the role\'s header data', data)
self.assertNotIn('Define the permissions which the users to whom this role is assigned will have.', data)
self.assertIn('Permanently delete this role', data)
self.assertNotIn('The role has been deleted.', data)
def test_role_delete_post_no_users(self):
"""
Test deleting a role that has no users.
Expected result: The role is deleted.
"""
other_role = self.create_role(Permission.EditRole, name='Administrator')
role = self.create_role(name='Guest')
self.create_and_login_user(role=other_role)
role_id = role.id
self.assertListEqual([], role.users.all())
data = self.post(f'/administration/role/{role.name}/delete', data=dict(
new_role=0
))
role = Role.load_from_id(role_id)
self.assertIsNone(role)
self.assertIsNotNone(other_role.id)
self.assertNotIn('<h1>Edit Role “', data)
self.assertIn('The role has been deleted.', data)
def test_role_delete_post_only_allowed_to_edit_roles(self):
"""
Test accessing the delete page if the role is the only one allowed to edit roles.
Expected result: The role delete form is not shown.
"""
role = self.create_role(Permission.EditRole)
self.create_and_login_user(role=role)
role_id = role.id
data = self.post(f'/administration/role/{role.name}/delete', data=dict(
new_role=0,
))
role = Role.load_from_id(role_id)
self.assertIsNotNone(role)
self.assertIsNotNone(role.id)
self.assertIn('<h1>Edit Role “', data)
self.assertIn('This role cannot be deleted because it is the only one that can edit roles.', data)
self.assertNotIn('View the users who have this role assigned to them', data)
self.assertNotIn('Edit the role\'s header data', data)
self.assertNotIn('Define the permissions which the users to whom this role is assigned will have.', data)
self.assertNotIn('Permanently delete this role', data)
self.assertNotIn('The role has been deleted.', data)
def test_role_delete_post_has_users(self):
"""
Test deleting a role that has users.
Expected result: The role is deleted.
"""
other_role = self.create_role(Permission.EditRole, name='Administrator')
role = self.create_role(name='Guest')
self.create_and_login_user(role=other_role)
# Add a user for the role to delete.
other_user = self.create_user(email='john@doe.com', name='John Doe', password='ABC123!', role=role)
role_id = role.id
self.assertListEqual([other_user], role.users.all())
data = self.post(f'/administration/role/{role.name}/delete', data=dict(
new_role=other_role.id
))
role = Role.load_from_id(role_id)
self.assertIsNone(role)
self.assertIsNotNone(other_role.id)
self.assertNotIn('<h1>Edit Role “', data)
self.assertIn('The role has been deleted.', data)
# noinspection PyUnresolvedReferences
self.assertEqual(other_role, other_user.role)
| 37.052192
| 118
| 0.648017
| 2,308
| 17,748
| 4.839688
| 0.070624
| 0.053715
| 0.068039
| 0.038675
| 0.848702
| 0.810116
| 0.762041
| 0.738854
| 0.695613
| 0.689615
| 0
| 0.003634
| 0.255803
| 17,748
| 478
| 119
| 37.129707
| 0.842065
| 0.140636
| 0
| 0.639847
| 0
| 0
| 0.248581
| 0.049343
| 0
| 0
| 0
| 0
| 0.398467
| 1
| 0.076628
| false
| 0.003831
| 0.011494
| 0
| 0.091954
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a7823430065a26890c30cd4c9813a897dae00063
| 460
|
py
|
Python
|
build/lib.linux-x86_64-2.7/ryu/app/experiments/iperf_peers_fsize8.py
|
Helloworld1995/Ryu_SDN_Controller
|
2680f967debca361adc6ff14ddadcbbcde0c7082
|
[
"Apache-2.0"
] | 1
|
2021-03-11T01:47:35.000Z
|
2021-03-11T01:47:35.000Z
|
ryu/app/experiments/iperf_peers_fsize8.py
|
Helloworld1995/Ryu_SDN_Controller
|
2680f967debca361adc6ff14ddadcbbcde0c7082
|
[
"Apache-2.0"
] | null | null | null |
ryu/app/experiments/iperf_peers_fsize8.py
|
Helloworld1995/Ryu_SDN_Controller
|
2680f967debca361adc6ff14ddadcbbcde0c7082
|
[
"Apache-2.0"
] | null | null | null |
iperf_peers=[('h002', 'h004', '112.5M'), ('h001', 'h006', '112.5M'), ('h005', 'h006', '112.5M'), ('h011', 'h012', '112.5M'), ('h004', 'h003', '112.5M'), ('h007', 'h005', '112.5M'), ('h010', 'h004', '112.5M'), ('h016', 'h013', '112.5M'), ('h009', 'h003', '112.5M'), ('h008', 'h014', '112.5M'), ('h015', 'h014', '112.5M'), ('h013', 'h016', '112.5M'), ('h012', 'h005', '112.5M'), ('h006', 'h009', '112.5M'), ('h014', 'h013', '112.5M'), ('h003', 'h002', '112.5M')]
| 460
| 460
| 0.476087
| 66
| 460
| 3.30303
| 0.30303
| 0.366972
| 0.082569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.387409
| 0.102174
| 460
| 1
| 460
| 460
| 0.140436
| 0
| 0
| 0
| 0
| 0
| 0.4859
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a7c451fe5fd91253dda7fe9ea007952cec9c3a11
| 1,634
|
gyp
|
Python
|
build/android/pylib/remote/device/dummy/dummy.gyp
|
TwistedCore/external_v8
|
c6725dab9be251fbfc6fd7d53c3513a23e78c36c
|
[
"BSD-3-Clause"
] | 2
|
2019-01-28T08:09:58.000Z
|
2021-11-15T15:32:10.000Z
|
build/android/pylib/remote/device/dummy/dummy.gyp
|
TwistedCore/external_v8
|
c6725dab9be251fbfc6fd7d53c3513a23e78c36c
|
[
"BSD-3-Clause"
] | null | null | null |
build/android/pylib/remote/device/dummy/dummy.gyp
|
TwistedCore/external_v8
|
c6725dab9be251fbfc6fd7d53c3513a23e78c36c
|
[
"BSD-3-Clause"
] | 6
|
2020-09-23T08:56:12.000Z
|
2021-11-18T03:40:49.000Z
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Running gtests on a remote device via am instrument requires both an "app"
# APK and a "test" APK with different package names. Our gtests only use one
# APK, so we build a dummy APK to upload as the app.
{
'variables': {
'remote_device_dummy_apk_name': 'remote_device_dummy',
'remote_device_dummy_apk_path': '<(PRODUCT_DIR)/apks/<(remote_device_dummy_apk_name).apk',
},
'targets': [
{
# GN: //build/android/pylib/remote/device/dummy:remote_device_dummy_apk
'target_name': 'remote_device_dummy_apk',
'type': 'none',
'variables': {
'apk_name': '<(remote_device_dummy_apk_name)',
'final_apk_path': '<(remote_device_dummy_apk_path)',
'java_in_dir': '.',
'never_lint': 1,
'android_manifest_path': '../../../../../../build/android/AndroidManifest.xml',
},
'includes': [
'../../../../../../build/java_apk.gypi',
]
},
{
'target_name': 'require_remote_device_dummy_apk',
'message': 'Making sure <(remote_device_dummy_apk_path) has been built.',
'type': 'none',
'variables': {
'required_file': '<(PRODUCT_DIR)/remote_device_dummy_apk/<(remote_device_dummy_apk_name).apk.required',
},
'inputs': [
'<(remote_device_dummy_apk_path)',
],
'outputs': [
'<(required_file)',
],
'action': [
'python', '../../build/android/gyp/touch.py', '<(required_file)',
],
}
]
}
| 33.346939
| 111
| 0.615667
| 198
| 1,634
| 4.762626
| 0.469697
| 0.19088
| 0.252386
| 0.254507
| 0.300106
| 0.135737
| 0.078473
| 0
| 0
| 0
| 0
| 0.003937
| 0.222766
| 1,634
| 48
| 112
| 34.041667
| 0.738583
| 0.26071
| 0
| 0.205128
| 0
| 0
| 0.634167
| 0.4275
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
38f8775fdad60900e957bb5058a81eb2a7ac9b56
| 7,578
|
py
|
Python
|
tapiriik/testing/statistics.py
|
julian-r/tapiriik
|
219d84dcda3bc2f8f609cbeb34f28a0acc546cc6
|
[
"Apache-2.0"
] | 1
|
2021-06-17T03:55:01.000Z
|
2021-06-17T03:55:01.000Z
|
tapiriik/testing/statistics.py
|
alteredego/tapiriik
|
3ccb62bb82d2bc88ea8d28868a69ffab60d808ac
|
[
"Apache-2.0"
] | null | null | null |
tapiriik/testing/statistics.py
|
alteredego/tapiriik
|
3ccb62bb82d2bc88ea8d28868a69ffab60d808ac
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from tapiriik.testing.testtools import TapiriikTestCase
from tapiriik.services.interchange import ActivityStatistic, ActivityStatisticUnit
class StatisticTests(TapiriikTestCase):
def test_unitconv_temp(self):
stat = ActivityStatistic(ActivityStatisticUnit.DegreesCelcius, value=0)
self.assertEqual(stat.asUnits(ActivityStatisticUnit.DegreesFahrenheit).Value, 32)
stat = ActivityStatistic(ActivityStatisticUnit.DegreesCelcius, value=-40)
self.assertEqual(stat.asUnits(ActivityStatisticUnit.DegreesFahrenheit).Value, -40)
stat = ActivityStatistic(ActivityStatisticUnit.DegreesFahrenheit, value=-40)
self.assertEqual(stat.asUnits(ActivityStatisticUnit.DegreesCelcius).Value, -40)
stat = ActivityStatistic(ActivityStatisticUnit.DegreesFahrenheit, value=32)
self.assertEqual(stat.asUnits(ActivityStatisticUnit.DegreesCelcius).Value, 0)
def test_unitconv_distance_nonmetric(self):
stat = ActivityStatistic(ActivityStatisticUnit.Miles, value=1)
self.assertEqual(stat.asUnits(ActivityStatisticUnit.Feet).Value, 5280)
stat = ActivityStatistic(ActivityStatisticUnit.Feet, value=5280/2)
self.assertEqual(stat.asUnits(ActivityStatisticUnit.Miles).Value, 0.5)
def test_unitconv_distance_metric(self):
stat = ActivityStatistic(ActivityStatisticUnit.Kilometers, value=1)
self.assertEqual(stat.asUnits(ActivityStatisticUnit.Meters).Value, 1000)
stat = ActivityStatistic(ActivityStatisticUnit.Meters, value=250)
self.assertEqual(stat.asUnits(ActivityStatisticUnit.Kilometers).Value, 0.25)
def test_unitconv_distance_cross(self):
stat = ActivityStatistic(ActivityStatisticUnit.Kilometers, value=1)
self.assertAlmostEqual(stat.asUnits(ActivityStatisticUnit.Miles).Value, 0.6214, places=4)
stat = ActivityStatistic(ActivityStatisticUnit.Miles, value=1)
self.assertAlmostEqual(stat.asUnits(ActivityStatisticUnit.Kilometers).Value, 1.609, places=3)
stat = ActivityStatistic(ActivityStatisticUnit.Miles, value=1)
self.assertAlmostEqual(stat.asUnits(ActivityStatisticUnit.Meters).Value, 1609, places=0)
def test_unitconv_velocity_metric(self):
stat = ActivityStatistic(ActivityStatisticUnit.MetersPerSecond, value=100)
self.assertEqual(stat.asUnits(ActivityStatisticUnit.KilometersPerHour).Value, 360)
stat = ActivityStatistic(ActivityStatisticUnit.KilometersPerHour, value=50)
self.assertAlmostEqual(stat.asUnits(ActivityStatisticUnit.MetersPerSecond).Value, 13.89, places=2)
def test_unitconv_velocity_cross(self):
stat = ActivityStatistic(ActivityStatisticUnit.KilometersPerHour, value=100)
self.assertAlmostEqual(stat.asUnits(ActivityStatisticUnit.MilesPerHour).Value, 62, places=0)
stat = ActivityStatistic(ActivityStatisticUnit.MilesPerHour, value=60)
self.assertAlmostEqual(stat.asUnits(ActivityStatisticUnit.KilometersPerHour).Value, 96.5, places=0)
def test_unitconv_impossible(self):
stat = ActivityStatistic(ActivityStatisticUnit.KilometersPerHour, value=100)
self.assertRaises(ValueError, stat.asUnits, ActivityStatisticUnit.Meters)
stat = ActivityStatistic(ActivityStatisticUnit.DegreesCelcius, value=100)
self.assertRaises(ValueError, stat.asUnits, ActivityStatisticUnit.Miles)
def test_unitconv_noop(self):
stat = ActivityStatistic(ActivityStatisticUnit.KilometersPerHour, value=100)
self.assertEqual(stat.asUnits(ActivityStatisticUnit.KilometersPerHour).Value, 100)
def test_stat_coalesce(self):
stat1 = ActivityStatistic(ActivityStatisticUnit.Meters, value=1)
stat2 = ActivityStatistic(ActivityStatisticUnit.Meters, value=2)
stat1.coalesceWith(stat2)
self.assertEqual(stat1.Value, 1.5)
def test_stat_coalesce_missing(self):
stat1 = ActivityStatistic(ActivityStatisticUnit.Meters, value=None)
stat2 = ActivityStatistic(ActivityStatisticUnit.Meters, value=2)
stat1.coalesceWith(stat2)
self.assertEqual(stat1.Value, 2)
stat1 = ActivityStatistic(ActivityStatisticUnit.Meters, value=1)
stat2 = ActivityStatistic(ActivityStatisticUnit.Meters, value=None)
stat1.coalesceWith(stat2)
self.assertEqual(stat1.Value, 1)
def test_stat_coalesce_multi(self):
stat1 = ActivityStatistic(ActivityStatisticUnit.Meters, value=1)
stat2 = ActivityStatistic(ActivityStatisticUnit.Meters, value=2)
stat3 = ActivityStatistic(ActivityStatisticUnit.Meters, value=3)
stat4 = ActivityStatistic(ActivityStatisticUnit.Meters, value=4)
stat5 = ActivityStatistic(ActivityStatisticUnit.Meters, value=5)
stat1.coalesceWith(stat2)
stat1.coalesceWith(stat3)
stat1.coalesceWith(stat4)
stat1.coalesceWith(stat5)
self.assertEqual(stat1.Value, 3)
def test_stat_coalesce_multi_mixed(self):
stat1 = ActivityStatistic(ActivityStatisticUnit.Meters, value=1)
stat2 = ActivityStatistic(ActivityStatisticUnit.Meters, value=2)
stat3 = ActivityStatistic(ActivityStatisticUnit.Meters, value=3)
stat4 = ActivityStatistic(ActivityStatisticUnit.Meters, value=4)
stat5 = ActivityStatistic(ActivityStatisticUnit.Meters, value=5)
stat5.coalesceWith(stat2)
stat5.coalesceWith(stat3)
stat1.coalesceWith(stat5)
stat1.coalesceWith(stat4)
self.assertEqual(stat1.Value, 3)
def test_stat_coalesce_multi_mixed2(self):
stat1 = ActivityStatistic(ActivityStatisticUnit.Meters, value=1)
stat2 = ActivityStatistic(ActivityStatisticUnit.Meters, value=2)
stat3 = ActivityStatistic(ActivityStatisticUnit.Meters, value=3)
stat4 = ActivityStatistic(ActivityStatisticUnit.Meters, value=4)
stat5 = ActivityStatistic(ActivityStatisticUnit.Meters, value=5)
stat5.coalesceWith(stat2)
stat3.coalesceWith(stat5)
stat4.coalesceWith(stat3)
stat1.coalesceWith(stat4)
self.assertEqual(stat1.Value, 3)
def test_stat_coalesce_multi_missingmixed(self):
stat1 = ActivityStatistic(ActivityStatisticUnit.Meters, value=1)
stat2 = ActivityStatistic(ActivityStatisticUnit.Meters, value=2)
stat3 = ActivityStatistic(ActivityStatisticUnit.Meters, value=None)
stat4 = ActivityStatistic(ActivityStatisticUnit.Meters, value=None)
stat5 = ActivityStatistic(ActivityStatisticUnit.Meters, value=5)
stat5.coalesceWith(stat2)
stat3.coalesceWith(stat5)
stat4.coalesceWith(stat3)
stat1.coalesceWith(stat4)
self.assertAlmostEqual(stat1.Value, 8/3)
def test_stat_coalesce_multi_missingmixed_multivalued(self):
stat1 = ActivityStatistic(ActivityStatisticUnit.Meters, value=None, min=None)
stat2 = ActivityStatistic(ActivityStatisticUnit.Meters, value=2, max=2)
stat3 = ActivityStatistic(ActivityStatisticUnit.Meters, value=None, gain=3)
stat4 = ActivityStatistic(ActivityStatisticUnit.Meters, value=None, loss=4)
stat5 = ActivityStatistic(ActivityStatisticUnit.Meters, value=5, min=3)
stat5.coalesceWith(stat2)
stat3.coalesceWith(stat5)
stat4.coalesceWith(stat3)
stat1.coalesceWith(stat4)
self.assertAlmostEqual(stat1.Value, 7/2)
self.assertEqual(stat1.Min, 3)
self.assertEqual(stat1.Max, 2)
self.assertEqual(stat1.Gain, 3)
self.assertEqual(stat1.Loss, 4)
| 48.890323
| 107
| 0.747163
| 708
| 7,578
| 7.932203
| 0.121469
| 0.338319
| 0.193732
| 0.279202
| 0.830484
| 0.704594
| 0.674145
| 0.504274
| 0.412749
| 0.385328
| 0
| 0.035692
| 0.164423
| 7,578
| 155
| 108
| 48.890323
| 0.851232
| 0
| 0
| 0.446281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.247934
| 1
| 0.123967
| false
| 0
| 0.024793
| 0
| 0.157025
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ac07a7e6b9294d8de8c5deaadebff8fe2e3b4d3e
| 42
|
py
|
Python
|
necrobot/util/parse/__init__.py
|
pancelor/necrobot
|
03003f0b4208c4e952cfa6b41a33d30480a351d7
|
[
"MIT"
] | 8
|
2016-01-15T00:28:55.000Z
|
2020-02-10T21:23:11.000Z
|
necrobot/util/parse/__init__.py
|
pancelor/necrobot
|
03003f0b4208c4e952cfa6b41a33d30480a351d7
|
[
"MIT"
] | 12
|
2017-01-01T22:14:54.000Z
|
2021-02-10T00:09:51.000Z
|
necrobot/util/parse/__init__.py
|
pancelor/necrobot
|
03003f0b4208c4e952cfa6b41a33d30480a351d7
|
[
"MIT"
] | 18
|
2016-02-05T22:19:46.000Z
|
2020-02-12T05:11:57.000Z
|
"""
Utility classes for input parsing.
"""
| 14
| 34
| 0.690476
| 5
| 42
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 42
| 3
| 35
| 14
| 0.805556
| 0.809524
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ac5fd6bd647e5ab4ebed692844f6704538e88d9e
| 36
|
py
|
Python
|
tests/__init__.py
|
vlrusu/ad5761
|
120ae44c16b0894913218acd6e0d71df3f44235b
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
vlrusu/ad5761
|
120ae44c16b0894913218acd6e0d71df3f44235b
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
vlrusu/ad5761
|
120ae44c16b0894913218acd6e0d71df3f44235b
|
[
"MIT"
] | 1
|
2021-03-19T13:17:18.000Z
|
2021-03-19T13:17:18.000Z
|
"""Unit test package for ad5761."""
| 18
| 35
| 0.666667
| 5
| 36
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 0.138889
| 36
| 1
| 36
| 36
| 0.645161
| 0.805556
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ac8902b7cfc2a0102156e52433ea3c496dc6749e
| 62
|
py
|
Python
|
Graph/Graph/users/tests.py
|
MGijon/TheGraph.es
|
34fc54e8d14625eb033f7506f12a615e3078c98b
|
[
"MIT"
] | null | null | null |
Graph/Graph/users/tests.py
|
MGijon/TheGraph.es
|
34fc54e8d14625eb033f7506f12a615e3078c98b
|
[
"MIT"
] | 30
|
2020-01-10T21:20:52.000Z
|
2022-03-12T00:25:41.000Z
|
Graph/Graph/users/tests.py
|
MGijon/TheGraph.es
|
34fc54e8d14625eb033f7506f12a615e3078c98b
|
[
"MIT"
] | null | null | null |
"""Users Tests."""
# Django
from django.test import TestCase
| 12.4
| 32
| 0.709677
| 8
| 62
| 5.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145161
| 62
| 4
| 33
| 15.5
| 0.830189
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3bfa9d0982e6f1260f22e940e16ef0adaefa637e
| 71
|
py
|
Python
|
td3a_cpp/matmul/__init__.py
|
Axeldnahcram/td3a_cpp
|
679431cc292164d951129f0d62358a74af568551
|
[
"MIT"
] | null | null | null |
td3a_cpp/matmul/__init__.py
|
Axeldnahcram/td3a_cpp
|
679431cc292164d951129f0d62358a74af568551
|
[
"MIT"
] | null | null | null |
td3a_cpp/matmul/__init__.py
|
Axeldnahcram/td3a_cpp
|
679431cc292164d951129f0d62358a74af568551
|
[
"MIT"
] | null | null | null |
"""
Shortcuts to *matmul*.
"""
from .matmulpy import pymatmul # noqa
| 11.833333
| 38
| 0.661972
| 8
| 71
| 5.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183099
| 71
| 5
| 39
| 14.2
| 0.810345
| 0.394366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3bff710f5a53484a2271e3ec8ebcd902457babf0
| 166
|
py
|
Python
|
cookit_api/models/__init__.py
|
Heath-Lester/cookit_server
|
e0520c71b57230908e83f8139d0347299746cb8f
|
[
"MIT"
] | null | null | null |
cookit_api/models/__init__.py
|
Heath-Lester/cookit_server
|
e0520c71b57230908e83f8139d0347299746cb8f
|
[
"MIT"
] | null | null | null |
cookit_api/models/__init__.py
|
Heath-Lester/cookit_server
|
e0520c71b57230908e83f8139d0347299746cb8f
|
[
"MIT"
] | 1
|
2021-06-15T20:06:15.000Z
|
2021-06-15T20:06:15.000Z
|
from .saved_recipe import Saved_Recipe
from .meal import Meal
from .instruction import Instruction
from .ingredient import Ingredient
from .equipment import Equipment
| 33.2
| 38
| 0.855422
| 22
| 166
| 6.363636
| 0.363636
| 0.157143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114458
| 166
| 5
| 39
| 33.2
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cbf426f729b210b4d303345e7b49838d78564aed
| 130
|
py
|
Python
|
anndata/readwrite.py
|
rilango/anndata
|
0621bb947ea2dde45827ad493e7947ad5f97eeba
|
[
"BSD-3-Clause"
] | 1
|
2021-02-19T06:14:42.000Z
|
2021-02-19T06:14:42.000Z
|
anndata/readwrite.py
|
rilango/anndata
|
0621bb947ea2dde45827ad493e7947ad5f97eeba
|
[
"BSD-3-Clause"
] | 4
|
2020-06-26T21:35:03.000Z
|
2021-02-12T15:28:32.000Z
|
anndata/readwrite.py
|
rilango/anndata
|
0621bb947ea2dde45827ad493e7947ad5f97eeba
|
[
"BSD-3-Clause"
] | null | null | null |
from warnings import warn
warn("Please only import from anndata, not anndata.readwrite", DeprecationWarning)
from ._io import *
| 21.666667
| 82
| 0.792308
| 17
| 130
| 6
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138462
| 130
| 5
| 83
| 26
| 0.910714
| 0
| 0
| 0
| 0
| 0
| 0.415385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0212e9e8276cac722b36a21a515449724b6d7ea5
| 226
|
py
|
Python
|
vspreview/toolbars/__init__.py
|
shssoichiro/vapoursynth-preview
|
5fad872e8fdb06a81693a97571e4d47b010dde28
|
[
"Apache-2.0"
] | 75
|
2019-03-13T22:45:28.000Z
|
2022-02-23T11:13:31.000Z
|
vspreview/toolbars/__init__.py
|
shssoichiro/vapoursynth-preview
|
5fad872e8fdb06a81693a97571e4d47b010dde28
|
[
"Apache-2.0"
] | 36
|
2019-03-23T11:34:08.000Z
|
2021-11-03T13:18:55.000Z
|
vspreview/toolbars/__init__.py
|
shssoichiro/vapoursynth-preview
|
5fad872e8fdb06a81693a97571e4d47b010dde28
|
[
"Apache-2.0"
] | 27
|
2019-03-30T13:45:42.000Z
|
2022-03-19T06:55:44.000Z
|
from .debug import DebugToolbar
from .misc import MiscToolbar
from .pipette import PipetteToolbar
from .playback import PlaybackToolbar
from .scening import SceningToolbar
from .benchmark import BenchmarkToolbar
| 32.285714
| 39
| 0.80531
| 24
| 226
| 7.583333
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168142
| 226
| 6
| 40
| 37.666667
| 0.968085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
026fbfcd888945600ee2a77ef367f08f82f030d2
| 33
|
py
|
Python
|
app/decorators.py
|
rouzazari/fuzzyflask
|
5e5a4cc2e5a733be65d54c781c87b12a9157d81a
|
[
"MIT"
] | 1
|
2016-08-06T05:35:53.000Z
|
2016-08-06T05:35:53.000Z
|
app/decorators.py
|
rouzazari/fuzzyflask
|
5e5a4cc2e5a733be65d54c781c87b12a9157d81a
|
[
"MIT"
] | null | null | null |
app/decorators.py
|
rouzazari/fuzzyflask
|
5e5a4cc2e5a733be65d54c781c87b12a9157d81a
|
[
"MIT"
] | null | null | null |
# use this for custom decorators
| 16.5
| 32
| 0.787879
| 5
| 33
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 33
| 1
| 33
| 33
| 0.962963
| 0.909091
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
027ec3dae150263e55a1bbcedcc4e231a0979061
| 175
|
py
|
Python
|
mailing/mail/admin.py
|
todd-sudo/mailing
|
722827e373a4190698669f5787c2b8619773371d
|
[
"MIT"
] | null | null | null |
mailing/mail/admin.py
|
todd-sudo/mailing
|
722827e373a4190698669f5787c2b8619773371d
|
[
"MIT"
] | null | null | null |
mailing/mail/admin.py
|
todd-sudo/mailing
|
722827e373a4190698669f5787c2b8619773371d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import MailingList, Message, Client
admin.site.register(MailingList)
admin.site.register(Message)
admin.site.register(Client)
| 19.444444
| 48
| 0.817143
| 23
| 175
| 6.217391
| 0.478261
| 0.188811
| 0.356643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091429
| 175
| 8
| 49
| 21.875
| 0.899371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
65f4c36996e16a41686732d07f3ddeadc004740c
| 225
|
py
|
Python
|
pyplume/tests/testFigures.py
|
awa1k3r/plume-generation-and-analysis
|
926f2b09fa1011515310167f0d2b34a051539db1
|
[
"BSD-3-Clause"
] | null | null | null |
pyplume/tests/testFigures.py
|
awa1k3r/plume-generation-and-analysis
|
926f2b09fa1011515310167f0d2b34a051539db1
|
[
"BSD-3-Clause"
] | 1
|
2020-06-02T09:51:36.000Z
|
2020-06-02T09:51:36.000Z
|
pyplume/tests/testFigures.py
|
SoftwareDevEngResearch/pyplume
|
f7d92b71896edc702d9ef769c510f53f118fcecf
|
[
"BSD-3-Clause"
] | 1
|
2020-04-16T19:15:52.000Z
|
2020-04-16T19:15:52.000Z
|
import sys,os,pytest,filecmp,shutil,pytest
def testFiguresMain():
"""This is a temporary placeholder"""
assert True
def runTests():
"""Use this function to run pytests."""
pytest.main([__file__,"--verbose"])
| 25
| 43
| 0.684444
| 28
| 225
| 5.357143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164444
| 225
| 8
| 44
| 28.125
| 0.797872
| 0.288889
| 0
| 0
| 0
| 0
| 0.060403
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.4
| true
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
5a0c5251358441741a24db945724e244d3a9e3b5
| 105
|
py
|
Python
|
ado/apps/drivers/admin.py
|
edderleonardo/ado
|
fa3478e3aa55b71e7627a9b5017fa2fbff196c31
|
[
"BSD-3-Clause"
] | null | null | null |
ado/apps/drivers/admin.py
|
edderleonardo/ado
|
fa3478e3aa55b71e7627a9b5017fa2fbff196c31
|
[
"BSD-3-Clause"
] | null | null | null |
ado/apps/drivers/admin.py
|
edderleonardo/ado
|
fa3478e3aa55b71e7627a9b5017fa2fbff196c31
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from ado.apps.drivers.models import Driver
admin.site.register(Driver)
| 21
| 42
| 0.828571
| 16
| 105
| 5.4375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 105
| 4
| 43
| 26.25
| 0.915789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5a13e4d79d1b45b30066e433dd0eba301006f6c7
| 64
|
py
|
Python
|
y/prices/dex/balancer/__init__.py
|
cartercarlson/ypricemagic
|
f17fec155db7fb44ee624cd6e75193f17c6238cf
|
[
"MIT"
] | 1
|
2022-03-28T16:07:07.000Z
|
2022-03-28T16:07:07.000Z
|
y/prices/dex/balancer/__init__.py
|
cartercarlson/ypricemagic
|
f17fec155db7fb44ee624cd6e75193f17c6238cf
|
[
"MIT"
] | null | null | null |
y/prices/dex/balancer/__init__.py
|
cartercarlson/ypricemagic
|
f17fec155db7fb44ee624cd6e75193f17c6238cf
|
[
"MIT"
] | null | null | null |
from y.prices.dex.balancer.balancer import balancer_multiplexer
| 32
| 63
| 0.875
| 9
| 64
| 6.111111
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 64
| 1
| 64
| 64
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5a6575288e979ef157a8149e9deafb91d75940cf
| 298
|
py
|
Python
|
rpython/jit/backend/asmjs/test/test_loop_unroll.py
|
pypyjs/pypy
|
2c2c5c73d780ff71f03adc2f1bf2c1c4bb7cc031
|
[
"Apache-2.0",
"OpenSSL"
] | 34
|
2015-07-09T04:53:27.000Z
|
2021-07-19T05:22:27.000Z
|
idea2/pypyjs-3/deps/pypy/rpython/jit/backend/asmjs/test/test_loop_unroll.py
|
igormcoelho/neo-boa
|
c141b503183cab287744cd19be5dfd86d9bc8daf
|
[
"MIT"
] | 6
|
2015-05-30T17:20:45.000Z
|
2017-06-12T14:29:23.000Z
|
idea2/pypyjs-3/deps/pypy/rpython/jit/backend/asmjs/test/test_loop_unroll.py
|
igormcoelho/neo-boa
|
c141b503183cab287744cd19be5dfd86d9bc8daf
|
[
"MIT"
] | 11
|
2015-09-07T14:26:08.000Z
|
2020-04-10T07:20:41.000Z
|
import py
from rpython.jit.backend.asmjs.test.test_basic import JitASMJSMixin
from rpython.jit.metainterp.test import test_loop_unroll
class TestLoopSpec(JitASMJSMixin, test_loop_unroll.LoopUnrollTest):
# for the individual tests see
# ====> ../../../metainterp/test/test_loop.py
pass
| 33.111111
| 67
| 0.771812
| 39
| 298
| 5.74359
| 0.564103
| 0.107143
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120805
| 298
| 8
| 68
| 37.25
| 0.854962
| 0.241611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
5a748f9a4486bbd546c9d1df50c460b2d2ab26e8
| 36
|
py
|
Python
|
agagd/agagd_core/views/__init__.py
|
leeschumacher/agagd
|
3f23c7b8702c4dcf1b9f4f8a248ee5c5c0b81b9b
|
[
"MIT"
] | null | null | null |
agagd/agagd_core/views/__init__.py
|
leeschumacher/agagd
|
3f23c7b8702c4dcf1b9f4f8a248ee5c5c0b81b9b
|
[
"MIT"
] | null | null | null |
agagd/agagd_core/views/__init__.py
|
leeschumacher/agagd
|
3f23c7b8702c4dcf1b9f4f8a248ee5c5c0b81b9b
|
[
"MIT"
] | null | null | null |
from agagd_core.views.core import *
| 18
| 35
| 0.805556
| 6
| 36
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ce4d9fd5607dbfd7bd014f6333e453c46c894d29
| 90
|
py
|
Python
|
Codefights/arcade/code-arcade/level-4/30.Apple-Boxes/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codefights/arcade/code-arcade/level-4/30.Apple-Boxes/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codefights/arcade/code-arcade/level-4/30.Apple-Boxes/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python3
def appleBoxes(k):
return sum((-1) ** i * i ** 2 for i in range(1, k + 1))
| 18
| 59
| 0.533333
| 17
| 90
| 2.823529
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075758
| 0.266667
| 90
| 4
| 60
| 22.5
| 0.651515
| 0.077778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ce5e6891af341f7b44f55310759b93c4854f28ef
| 107
|
py
|
Python
|
src/ContactsForm.py
|
paramoun1h/testeee1
|
102fd28a045b78b01cb7b3975091fc0b1760df66
|
[
"MIT"
] | 2
|
2020-09-26T20:03:31.000Z
|
2021-08-07T09:05:35.000Z
|
src/ContactsForm.py
|
paramoun1h/testeee1
|
102fd28a045b78b01cb7b3975091fc0b1760df66
|
[
"MIT"
] | null | null | null |
src/ContactsForm.py
|
paramoun1h/testeee1
|
102fd28a045b78b01cb7b3975091fc0b1760df66
|
[
"MIT"
] | 1
|
2020-09-26T20:03:24.000Z
|
2020-09-26T20:03:24.000Z
|
from src import npyscreen
class ContactsForm(npyscreen.FormBaseNew):
def create(self):
pass
| 13.375
| 42
| 0.71028
| 12
| 107
| 6.333333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224299
| 107
| 7
| 43
| 15.285714
| 0.915663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
ce7e73f7ee9d013b2644fe2a6b3bddfd5f1fe7f8
| 104
|
py
|
Python
|
pydow/router/__init__.py
|
gijswobben/pydow-2
|
e04c1fc37f344988b3a07c4f39b3c43edf3d5bba
|
[
"MIT"
] | null | null | null |
pydow/router/__init__.py
|
gijswobben/pydow-2
|
e04c1fc37f344988b3a07c4f39b3c43edf3d5bba
|
[
"MIT"
] | null | null | null |
pydow/router/__init__.py
|
gijswobben/pydow-2
|
e04c1fc37f344988b3a07c4f39b3c43edf3d5bba
|
[
"MIT"
] | null | null | null |
from pydow.router.router import Router
from pydow.router.link import Link
__all__ = ["Router", "Link"]
| 20.8
| 38
| 0.759615
| 15
| 104
| 5
| 0.4
| 0.24
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 104
| 4
| 39
| 26
| 0.824176
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ce9bdfba8e07cc900dcae875e81081fe4f3e9b65
| 348
|
py
|
Python
|
tests/snippets/builtin_divmod.py
|
ypyf/RustPython
|
86103bfd0187a6073cab91142f698cb6b0a0de51
|
[
"MIT"
] | 1
|
2021-09-03T15:59:36.000Z
|
2021-09-03T15:59:36.000Z
|
tests/snippets/builtin_divmod.py
|
ypyf/RustPython
|
86103bfd0187a6073cab91142f698cb6b0a0de51
|
[
"MIT"
] | null | null | null |
tests/snippets/builtin_divmod.py
|
ypyf/RustPython
|
86103bfd0187a6073cab91142f698cb6b0a0de51
|
[
"MIT"
] | null | null | null |
from testutils import assert_raises
assert divmod(11, 3) == (3, 2)
assert divmod(8,11) == (0, 8)
assert divmod(0.873, 0.252) == (3.0, 0.11699999999999999)
assert divmod(-86340, 86400) == (-1, 60)
assert_raises(ZeroDivisionError, lambda: divmod(5, 0), 'divmod by zero')
assert_raises(ZeroDivisionError, lambda: divmod(5.0, 0.0), 'divmod by zero')
| 34.8
| 76
| 0.704023
| 55
| 348
| 4.4
| 0.418182
| 0.198347
| 0.239669
| 0.289256
| 0.355372
| 0.355372
| 0.355372
| 0
| 0
| 0
| 0
| 0.186885
| 0.123563
| 348
| 9
| 77
| 38.666667
| 0.606557
| 0
| 0
| 0
| 0
| 0
| 0.08046
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| true
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ceabab4c87260022d3e0711aa1091a67fa0a4e3b
| 64
|
py
|
Python
|
apps/locations/managers/__init__.py
|
jorgesaw/kmarket
|
bffdced85c55585a664622b346e272af60b67c33
|
[
"MIT"
] | null | null | null |
apps/locations/managers/__init__.py
|
jorgesaw/kmarket
|
bffdced85c55585a664622b346e272af60b67c33
|
[
"MIT"
] | 1
|
2019-09-20T01:33:45.000Z
|
2019-09-20T01:33:45.000Z
|
apps/locations/managers/__init__.py
|
jorgesaw/kmarket
|
bffdced85c55585a664622b346e272af60b67c33
|
[
"MIT"
] | null | null | null |
from .states import StateManager
from .cities import CityManager
| 32
| 32
| 0.859375
| 8
| 64
| 6.875
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 64
| 2
| 33
| 32
| 0.964912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cebf1db46ab247c0e66c1d5d3a474564e45660c5
| 61,845
|
py
|
Python
|
tests/app/views/test_drafts.py
|
pebblecode/cirrus-marketplace-api
|
64d9e3be8705a2fe64c964b16947e9877885de7b
|
[
"MIT"
] | null | null | null |
tests/app/views/test_drafts.py
|
pebblecode/cirrus-marketplace-api
|
64d9e3be8705a2fe64c964b16947e9877885de7b
|
[
"MIT"
] | null | null | null |
tests/app/views/test_drafts.py
|
pebblecode/cirrus-marketplace-api
|
64d9e3be8705a2fe64c964b16947e9877885de7b
|
[
"MIT"
] | null | null | null |
from tests.app.helpers import BaseApplicationTest, JSONUpdateTestMixin
from datetime import datetime
from flask import json
import mock
from app.models import Supplier, ContactInformation, Service, Framework, \
DraftService
from app import db
from nose.tools import assert_equal, assert_in, assert_false
class TestDraftServices(BaseApplicationTest):
service_id = None
updater_json = None
create_draft_json = None
def setup(self):
super(TestDraftServices, self).setup()
payload = self.load_example_listing("G6-IaaS")
self.service_id = str(payload['id'])
self.updater_json = {
'updated_by': 'joeblogs'
}
self.create_draft_json = self.updater_json.copy()
self.create_draft_json['services'] = {
'frameworkSlug': 'g-cloud-7',
'lot': 'scs',
'supplierId': 1
}
with self.app.app_context():
db.session.add(
Supplier(supplier_id=1, name=u"Supplier 1")
)
db.session.add(
ContactInformation(
supplier_id=1,
contact_name=u"Liz",
email=u"liz@royal.gov.uk",
postcode=u"SW1A 1AA"
)
)
Framework.query.filter_by(slug='g-cloud-5') \
.update(dict(status='live'))
Framework.query.filter_by(slug='g-cloud-7') \
.update(dict(status='open'))
db.session.commit()
self.client.put(
'/services/%s' % self.service_id,
data=json.dumps(
{'updated_by': 'joeblogs',
'services': payload}),
content_type='application/json')
def service_count(self):
with self.app.app_context():
return Service.query.count()
def draft_service_count(self):
with self.app.app_context():
return DraftService.query.count()
def test_reject_list_drafts_no_supplier_id(self):
res = self.client.get('/draft-services')
assert_equal(res.status_code, 400)
def test_reject_list_drafts_invalid_supplier_id(self):
res = self.client.get('/draft-services?supplier_id=invalid')
assert_equal(res.status_code, 400)
def test_reject_list_drafts_if_no_supplier_for_id(self):
res = self.client.get('/draft-services?supplier_id=12345667')
assert_equal(res.status_code, 404)
def test_returns_empty_list_if_no_drafts(self):
res = self.client.get('/draft-services?supplier_id=1')
assert_equal(res.status_code, 200)
drafts = json.loads(res.get_data())
assert_equal(len(drafts['services']), 0)
def test_returns_drafts_for_supplier(self):
self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
res = self.client.get('/draft-services?supplier_id=1')
assert_equal(res.status_code, 200)
drafts = json.loads(res.get_data())
assert_equal(len(drafts['services']), 1)
def test_returns_drafts_for_framework_with_drafts(self):
self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
res = self.client.get(
'/draft-services?supplier_id=1&framework=g-cloud-6'
)
assert_equal(res.status_code, 200)
drafts = json.loads(res.get_data())
assert_equal(len(drafts['services']), 1)
def test_does_not_return_drafts_for_framework_with_no_drafts(self):
self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
res = self.client.get(
'/draft-services?supplier_id=1&framework=g-cloud-7'
)
assert_equal(res.status_code, 200)
drafts = json.loads(res.get_data())
assert_equal(len(drafts['services']), 0)
def test_does_not_return_drafts_from_non_existant_framework(self):
self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
res = self.client.get(
'/draft-services?supplier_id=1&framework=this-is-not-valid'
)
assert res.status_code == 404
assert json.loads(res.get_data(as_text=True))["error"] == "framework 'this-is-not-valid' not found"
def test_returns_all_drafts_for_supplier_on_single_page(self):
with self.app.app_context():
now = datetime.utcnow()
service_ids = [
1234567890123411,
1234567890123412,
1234567890123413,
1234567890123414,
1234567890123415,
1234567890123416,
1234567890123417,
1234567890123418,
1234567890123419,
1234567890123410
]
for service_id in service_ids:
db.session.add(
Service(
service_id=str(service_id),
supplier_id=1,
updated_at=now,
status='published',
created_at=now,
data={'foo': 'bar'},
lot_id=1,
framework_id=1)
)
for service_id in service_ids:
self.client.put(
'/draft-services/copy-from/{}'.format(service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
res = self.client.get('/draft-services?supplier_id=1')
assert_equal(res.status_code, 200)
drafts = json.loads(res.get_data())
assert_equal(len(drafts['services']), 10)
def test_returns_drafts_for_supplier_has_no_links(self):
self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
res = self.client.get('/draft-services?supplier_id=1')
assert_equal(res.status_code, 200)
drafts = json.loads(res.get_data())
assert_equal(len(drafts['links']), 0)
def test_reject_update_with_no_updater_details(self):
res = self.client.post('/draft-services/0000000000')
assert_equal(res.status_code, 400)
def test_reject_copy_with_no_updated_by(self):
res = self.client.put('/draft-services/copy-from/0000000000')
assert_equal(res.status_code, 400)
def test_reject_create_with_no_updated_by(self):
res = self.client.post('/draft-services')
assert_equal(res.status_code, 400)
def test_reject_invalid_service_id_on_copy(self):
res = self.client.put(
'/draft-services/copy-from/invalid-id!',
data=json.dumps(self.updater_json),
content_type='application/json')
assert_equal(res.status_code, 400)
def test_should_404_if_service_does_not_exist_on_copy(self):
res = self.client.put(
'/draft-services/copy-from/0000000000',
data=json.dumps(self.updater_json),
content_type='application/json')
assert_equal(res.status_code, 404)
def test_reject_invalid_service_id_on_get(self):
res = self.client.get('/draft-services?service_id=invalid-id!')
assert_equal(res.status_code, 400)
def test_reject_delete_with_no_updated_by(self):
res = self.client.delete('/draft-services/0000000000',
data=json.dumps({}),
content_type='application/json')
assert_equal(res.status_code, 400)
def test_reject_publish_with_no_updated_by(self):
res = self.client.post('/draft-services/0000000000/publish',
data=json.dumps({}),
content_type='application/json')
assert_equal(res.status_code, 400)
def test_should_create_draft_with_minimal_data(self):
res = self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json')
data = json.loads(res.get_data())
assert_equal(res.status_code, 201)
assert_equal(data['services']['frameworkSlug'], 'g-cloud-7')
assert_equal(data['services']['frameworkName'], 'G-Cloud 7')
assert_equal(data['services']['status'], 'not-submitted')
assert_equal(data['services']['supplierId'], 1)
assert_equal(data['services']['lot'], 'scs')
def test_create_draft_checks_page_questions(self):
self.create_draft_json['page_questions'] = ['serviceName']
res = self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json')
data = json.loads(res.get_data())
assert_equal(res.status_code, 400)
assert_equal(data['error'], {'serviceName': 'answer_required'})
def test_create_draft_only_checks_valid_page_questions(self):
self.create_draft_json['page_questions'] = ['tea_and_cakes']
res = self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json')
assert_equal(res.status_code, 201)
def test_create_draft_should_create_audit_event(self):
res = self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json')
assert_equal(res.status_code, 201)
data = json.loads(res.get_data())
draft_id = data['services']['id']
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 2)
assert_equal(data['auditEvents'][0]['type'], 'import_service')
assert_equal(data['auditEvents'][1]['user'], 'joeblogs')
assert_equal(data['auditEvents'][1]['type'], 'create_draft_service')
assert_equal(data['auditEvents'][1]['data']['draftId'], draft_id)
assert_equal(data['auditEvents'][1]['data']['draftJson'], self.create_draft_json['services'])
def test_should_not_create_draft_with_invalid_data(self):
invalid_create_json = self.create_draft_json.copy()
invalid_create_json['services']['supplierId'] = "ShouldBeInt"
res = self.client.post(
'/draft-services',
data=json.dumps(invalid_create_json),
content_type='application/json')
data = json.loads(res.get_data())
assert_equal(res.status_code, 400)
assert_in("Invalid supplier ID 'ShouldBeInt'", data['error'])
def test_should_not_create_draft_on_not_open_framework(self):
draft_json = self.create_draft_json.copy()
draft_json['services']['frameworkSlug'] = 'g-cloud-5'
res = self.client.post(
'/draft-services',
data=json.dumps(draft_json),
content_type='application/json')
data = json.loads(res.get_data())
assert_equal(res.status_code, 400)
assert_in("'g-cloud-5' is not open for submissions", data['error'])
def test_should_not_create_draft_with_invalid_lot(self):
draft_json = self.create_draft_json.copy()
draft_json['services']['lot'] = 'newlot'
res = self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json')
data = json.loads(res.get_data())
assert_equal(res.status_code, 400)
assert_in("Incorrect lot 'newlot' for framework 'g-cloud-7'", data['error'])
def test_can_save_additional_fields_to_draft(self):
res = self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json')
data = json.loads(res.get_data())
draft_id = data['services']['id']
draft_update_json = self.updater_json.copy()
draft_update_json['services'] = {
'serviceTypes': ['Implementation'],
'serviceBenefits': ['Tests pass']
}
res2 = self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps(draft_update_json),
content_type='application/json')
data2 = json.loads(res2.get_data())
assert_equal(res2.status_code, 200)
assert_equal(data2['services']['frameworkSlug'], 'g-cloud-7')
assert_equal(data2['services']['frameworkName'], 'G-Cloud 7')
assert_equal(data2['services']['status'], 'not-submitted')
assert_equal(data2['services']['supplierId'], 1)
assert_equal(data2['services']['serviceTypes'], ['Implementation'])
assert_equal(data2['services']['serviceBenefits'], ['Tests pass'])
@mock.patch('app.db')
def test_update_draft_uses_serializable_isolation_level(self, db):
self.client.post('/draft-services/1234')
db.session.connection.assert_called_with(execution_options={'isolation_level': 'SERIALIZABLE'})
def test_update_draft_should_create_audit_event(self):
res = self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json')
data = json.loads(res.get_data())
draft_id = data['services']['id']
draft_update_json = self.updater_json.copy()
draft_update_json['services'] = {
'serviceTypes': ['Implementation'],
'serviceBenefits': ['Tests pass']
}
res2 = self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps(draft_update_json),
content_type='application/json')
assert_equal(res2.status_code, 200)
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 3)
assert_equal(data['auditEvents'][0]['type'], 'import_service')
assert_equal(data['auditEvents'][1]['user'], 'joeblogs')
assert_equal(data['auditEvents'][1]['type'], 'create_draft_service')
assert_equal(
data['auditEvents'][1]['data']['draftId'], draft_id
)
assert_equal(data['auditEvents'][2]['user'], 'joeblogs')
assert_equal(data['auditEvents'][2]['type'], 'update_draft_service')
assert_equal(
data['auditEvents'][2]['data']['draftId'], draft_id
)
def test_update_draft_should_purge_keys_with_null_values(self):
res = self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json')
data = json.loads(res.get_data())
draft_id = data['services']['id']
draft_update_json = self.updater_json.copy()
draft_update_json['services'] = {
'serviceName': "What a great service",
'serviceTypes': ['Implementation'],
'serviceBenefits': ['Tests pass']
}
res2 = self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps(draft_update_json),
content_type='application/json')
assert_equal(res2.status_code, 200)
data2 = json.loads(res2.get_data())['services']
assert('serviceName' in data2)
assert('serviceBenefits' in data2)
assert('serviceTypes' in data2)
draft_update_json['services'] = {
'serviceTypes': None,
'serviceBenefits': None
}
res3 = self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps(draft_update_json),
content_type='application/json')
assert_equal(res3.status_code, 200)
data3 = json.loads(res3.get_data())['services']
assert('serviceName' in data3)
assert('serviceBenefits' not in data3)
assert('serviceTypes' not in data3)
def test_update_draft_should_validate_full_draft_if_submitted(self):
draft_id = self.create_draft_service()['id']
self.complete_draft_service(draft_id)
res = self.client.get('/draft-services/{}'.format(draft_id))
submitted_draft = json.loads(res.get_data())['services']
submitted_draft['serviceName'] = None
submitted_draft['serviceBenefits'] = None
draft_update_json = self.updater_json.copy()
draft_update_json['services'] = submitted_draft
res2 = self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps(draft_update_json),
content_type='application/json')
errors = json.loads(res2.get_data())['error']
assert_equal(res2.status_code, 400)
assert_equal(errors, {u'serviceName': u'answer_required', u'serviceBenefits': u'answer_required'})
def test_update_draft_should_not_validate_full_draft_if_not_submitted(self):
draft_id = self.create_draft_service()['id']
res = self.client.get('/draft-services/{}'.format(draft_id))
submitted_draft = json.loads(res.get_data())['services']
submitted_draft['serviceName'] = None
submitted_draft['serviceBenefits'] = None
draft_update_json = self.updater_json.copy()
draft_update_json['services'] = submitted_draft
res2 = self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps(draft_update_json),
content_type='application/json')
updated_draft = json.loads(res2.get_data())['services']
assert_equal(res2.status_code, 200)
assert_equal(updated_draft['status'], 'not-submitted')
assert('serviceName' not in updated_draft)
assert('serviceBenefits' not in updated_draft)
def test_validation_errors_returned_for_invalid_update_of_new_draft(self):
res = self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json')
data = json.loads(res.get_data())
draft_id = data['services']['id']
draft_update_json = self.updater_json.copy()
draft_update_json['services'] = {
'serviceTypes': ['Bad Type'],
'serviceBenefits': ['Too many words 4 5 6 7 8 9 10 11']
}
res2 = self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps(draft_update_json),
content_type='application/json')
data2 = json.loads(res2.get_data())
assert_equal(res2.status_code, 400)
assert_in("'Bad Type' is not one of", data2['error']['serviceTypes'])
def test_validation_errors_returned_for_invalid_update_of_copy(self):
res = self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
draft_id = json.loads(res.get_data())['services']['id']
res = self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps({
'updated_by': 'joeblogs',
'services': {
'badField': 'new service name',
'priceUnit': 'chickens'
}
}),
content_type='application/json')
data = json.loads(res.get_data())
assert_equal(res.status_code, 400)
assert_in("'badField' was unexpected", str(data['error']['_form']))
assert_in("no_unit_specified", data['error']['priceUnit'])
def test_should_create_draft_from_existing_service(self):
res = self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
data = json.loads(res.get_data())
assert_equal(res.status_code, 201)
assert_equal(data['services']['serviceId'], self.service_id)
def test_create_draft_from_existing_should_create_audit_event(self):
res = self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
assert_equal(res.status_code, 201)
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 2)
assert_equal(data['auditEvents'][0]['type'], 'import_service')
assert_equal(data['auditEvents'][1]['user'], 'joeblogs')
assert_equal(data['auditEvents'][1]['type'], 'create_draft_service')
assert_equal(
data['auditEvents'][1]['data']['serviceId'], self.service_id
)
def test_should_not_create_two_drafts_from_existing_service(self):
self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
res = self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
data = json.loads(res.get_data())
assert_equal(res.status_code, 400)
assert_in(
'Draft already exists for service {}'.format(self.service_id),
data['error'])
def test_submission_draft_should_not_prevent_draft_being_created_from_existing_service(self):
res = self.publish_new_draft_service()
service = json.loads(res.get_data())['services']
res = self.client.put(
'/draft-services/copy-from/{}'.format(service['id']),
data=json.dumps(self.updater_json),
content_type='application/json')
assert res.status_code == 201
def test_should_fetch_a_draft(self):
res = self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
assert_equal(res.status_code, 201)
draft_id = json.loads(res.get_data())['services']['id']
fetch = self.client.get('/draft-services/{}'.format(draft_id))
assert_equal(fetch.status_code, 200)
data = json.loads(res.get_data())
assert_equal(data['services']['serviceId'], self.service_id)
def test_invalid_draft_should_have_validation_errors(self):
res = self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json')
assert res.status_code == 201
data = json.loads(res.get_data())
res = self.client.get('/draft-services/{}'.format(data['services']['id']))
assert res.status_code == 200
data = json.loads(res.get_data())
assert data['validationErrors']
def test_valid_draft_should_have_no_validation_errors(self):
draft = self.create_draft_service()
res = self.client.get('/draft-services/{}'.format(draft['id']))
assert res.status_code == 200
data = json.loads(res.get_data())
assert not data['validationErrors']
def test_should_404_on_fetch_a_draft_that_doesnt_exist(self):
fetch = self.client.get('/draft-services/0000000000')
assert_equal(fetch.status_code, 404)
def test_should_404_on_delete_a_draft_that_doesnt_exist(self):
res = self.client.delete(
'/draft-services/0000000000',
data=json.dumps(self.updater_json),
content_type='application/json')
assert_equal(res.status_code, 404)
def test_should_delete_a_draft(self):
res = self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
assert_equal(res.status_code, 201)
draft_id = json.loads(res.get_data())['services']['id']
fetch = self.client.get('/draft-services/{}'.format(draft_id))
assert_equal(fetch.status_code, 200)
delete = self.client.delete(
'/draft-services/{}'.format(draft_id),
data=json.dumps(self.updater_json),
content_type='application/json')
assert_equal(delete.status_code, 200)
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 3)
assert_equal(data['auditEvents'][0]['type'], 'import_service')
assert_equal(data['auditEvents'][1]['type'], 'create_draft_service')
assert_equal(data['auditEvents'][2]['user'], 'joeblogs')
assert_equal(data['auditEvents'][2]['type'], 'delete_draft_service')
assert_equal(
data['auditEvents'][2]['data']['serviceId'], self.service_id
)
fetch_again = self.client.get('/draft-services/{}'.format(draft_id))
assert_equal(fetch_again.status_code, 404)
def test_should_be_able_to_update_a_draft(self):
res = self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
draft_id = json.loads(res.get_data())['services']['id']
update = self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps({
'updated_by': 'joeblogs',
'services': {
'serviceName': 'new service name'
}
}),
content_type='application/json')
data = json.loads(update.get_data())
assert_equal(update.status_code, 200)
assert_equal(data['services']['serviceName'], 'new service name')
fetch = self.client.get('/draft-services/{}'.format(draft_id))
data = json.loads(fetch.get_data())
assert_equal(fetch.status_code, 200)
assert_equal(data['services']['serviceName'], 'new service name')
def test_whitespace_is_stripped_when_updating_a_draft(self):
res = self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
draft_id = json.loads(res.get_data())['services']['id']
update = self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps({
'updated_by': 'joeblogs',
'services': {
'serviceName': ' a new service name ',
'serviceFeatures': [
" Feature 1 ",
" ",
"",
" second feature "
],
}
}),
content_type='application/json')
data = json.loads(update.get_data())
assert_equal(update.status_code, 200)
assert_equal(data['services']['serviceName'], 'a new service name')
fetch = self.client.get('/draft-services/{}'.format(draft_id))
data = json.loads(fetch.get_data())
assert_equal(fetch.status_code, 200)
assert_equal(data['services']['serviceName'], 'a new service name')
assert_equal(len(data['services']['serviceFeatures']), 2)
assert_equal(data['services']['serviceFeatures'][0], 'Feature 1')
assert_equal(data['services']['serviceFeatures'][1], 'second feature')
def test_should_edit_draft_with_audit_event(self):
res = self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
draft_id = json.loads(res.get_data())['services']['id']
update = self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps({
'updated_by': 'joeblogs',
'services': {
'serviceName': 'new service name'
}
}),
content_type='application/json')
assert_equal(update.status_code, 200)
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 3)
assert_equal(data['auditEvents'][0]['type'], 'import_service')
assert_equal(data['auditEvents'][2]['user'], 'joeblogs')
assert_equal(data['auditEvents'][2]['type'], 'update_draft_service')
assert_equal(
data['auditEvents'][2]['data']['serviceId'], self.service_id
)
assert_equal(
data['auditEvents'][2]['data']['updateJson']['serviceName'],
'new service name'
)
def test_should_be_a_400_if_no_service_block_in_update(self):
self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
update = self.client.post(
'/draft-services/{}'.format(self.service_id),
data=json.dumps({
'updated_by': 'joeblogs'
}),
content_type='application/json')
assert_equal(update.status_code, 400)
def test_should_not_be_able_to_publish_if_no_draft_exists(self):
res = self.client.post(
'/draft-services/98765/publish',
data=json.dumps({'updated_by': 'joeblogs'}),
content_type='application/json')
assert_equal(res.status_code, 404)
@mock.patch('app.service_utils.search_api_client')
def test_should_be_able_to_publish_valid_copied_draft_service(self, search_api_client):
initial = self.client.get('/services/{}'.format(self.service_id))
assert_equal(initial.status_code, 200)
assert_equal(
json.loads(initial.get_data())['services']['serviceName'],
'My Iaas Service')
res = self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
draft_id = json.loads(res.get_data())['services']['id']
first_draft = self.client.get(
'/draft-services/{}'.format(draft_id))
assert_equal(first_draft.status_code, 200)
assert_equal(
json.loads(first_draft.get_data())['services']['serviceName'],
'My Iaas Service')
self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps({
'updated_by': 'joeblogs',
'services': {
'serviceName': 'chickens'
}
}),
content_type='application/json')
updated_draft = self.client.get(
'/draft-services/{}'.format(draft_id))
assert_equal(updated_draft.status_code, 200)
assert_equal(
json.loads(updated_draft.get_data())['services']['serviceName'],
'chickens')
res = self.client.post(
'/draft-services/{}/publish'.format(draft_id),
data=json.dumps({'updated_by': 'joeblogs'}),
content_type='application/json')
assert_equal(res.status_code, 200)
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 4)
assert_equal(data['auditEvents'][0]['type'], 'import_service')
assert_equal(data['auditEvents'][1]['type'], 'create_draft_service')
assert_equal(data['auditEvents'][2]['type'], 'update_draft_service')
assert_equal(data['auditEvents'][3]['type'], 'publish_draft_service')
# draft should no longer exist
fetch = self.client.get('/draft-services/{}'.format(self.service_id))
assert_equal(fetch.status_code, 404)
# published should be updated
updated_draft = self.client.get('/services/{}'.format(self.service_id))
assert_equal(updated_draft.status_code, 200)
assert_equal(
json.loads(updated_draft.get_data())['services']['serviceName'],
'chickens')
# archive should be updated
archives = self.client.get(
'/archived-services?service-id={}'.format(self.service_id))
assert_equal(archives.status_code, 200)
assert_equal(
json.loads(archives.get_data())['services'][0]['serviceName'],
'My Iaas Service')
assert search_api_client.index.called
def test_should_not_be_able_to_publish_submission_if_not_submitted(self):
draft = self.create_draft_service()
res = self.publish_draft_service(draft['id'])
assert_equal(res.status_code, 400)
def test_should_not_be_able_to_republish_submission(self):
draft = self.create_draft_service()
self.complete_draft_service(draft['id'])
res = self.publish_draft_service(draft['id'])
assert_equal(res.status_code, 200)
res = self.publish_draft_service(draft['id'])
assert_equal(res.status_code, 400)
@mock.patch('app.service_utils.search_api_client')
def test_search_api_should_be_called_on_publish_if_framework_is_live(self, search_api_client):
draft_id = self.create_draft_service()['id']
self.complete_draft_service(draft_id)
with self.app.app_context():
Framework.query.filter_by(slug='g-cloud-7').update(dict(status='live'))
db.session.commit()
res = self.publish_draft_service(draft_id)
assert res.status_code == 200
assert search_api_client.index.called
@mock.patch('app.service_utils.search_api_client')
def test_should_be_able_to_publish_valid_new_draft_service(self, search_api_client):
draft_id = self.create_draft_service()['id']
self.complete_draft_service(draft_id)
res = self.publish_draft_service(draft_id)
assert_equal(res.status_code, 200)
created_service_data = json.loads(res.get_data())
new_service_id = created_service_data['services']['id']
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 5)
assert_equal(data['auditEvents'][0]['type'], 'import_service')
assert_equal(data['auditEvents'][1]['type'], 'create_draft_service')
assert_equal(data['auditEvents'][2]['type'], 'update_draft_service')
assert_equal(data['auditEvents'][3]['type'], 'complete_draft_service')
assert_equal(data['auditEvents'][4]['type'], 'publish_draft_service')
# draft should still exist
fetch = self.client.get('/draft-services/{}'.format(draft_id))
assert_equal(fetch.status_code, 200)
# G-Cloud 7 service should be visible from API
# (frontends hide them based on statuses)
fetch2 = self.client.get('/services/{}'.format(new_service_id))
assert_equal(fetch2.status_code, 200)
assert_equal(json.loads(fetch2.get_data())['services']['status'],
"published")
# archive should be updated
archives = self.client.get(
'/archived-services?service-id={}'.format(new_service_id))
assert_equal(archives.status_code, 200)
assert_equal(
json.loads(archives.get_data())['services'][0]['serviceName'],
'An example G-7 SCS Service')
# service should not be indexed as G-Cloud 7 is not live
assert not search_api_client.index.called
def create_draft_service(self):
res = self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json')
assert_equal(res.status_code, 201)
draft = json.loads(res.get_data())['services']
g7_complete = self.load_example_listing("G7-SCS").copy()
g7_complete.pop('id')
draft_update_json = {'services': g7_complete,
'updated_by': 'joeblogs'}
res2 = self.client.post(
'/draft-services/{}'.format(draft['id']),
data=json.dumps(draft_update_json),
content_type='application/json')
assert_equal(res2.status_code, 200)
draft = json.loads(res2.get_data())['services']
return draft
def complete_draft_service(self, draft_id):
return self.client.post(
'/draft-services/{}/complete'.format(draft_id),
data=json.dumps({'updated_by': 'joeblogs'}),
content_type='application/json')
def publish_draft_service(self, draft_id):
return self.client.post(
'/draft-services/{}/publish'.format(draft_id),
data=json.dumps({
'updated_by': 'joeblogs'
}),
content_type='application/json')
def publish_new_draft_service(self):
draft = self.create_draft_service()
res = self.complete_draft_service(draft['id'])
assert res.status_code == 200
res = self.publish_draft_service(draft['id'])
assert res.status_code == 200
return res
def test_submitted_drafts_are_not_deleted_when_published(self):
draft = self.create_draft_service()
self.complete_draft_service(draft['id'])
assert self.draft_service_count() == 1
assert self.publish_draft_service(draft['id']).status_code == 200
assert self.draft_service_count() == 1
def test_drafts_made_from_services_are_deleted_when_published(self):
res = self.client.put(
'/draft-services/copy-from/{}'.format(self.service_id),
data=json.dumps(self.updater_json),
content_type='application/json')
draft = json.loads(res.get_data())['services']
assert self.service_count() == 1
assert self.draft_service_count() == 1
assert self.publish_draft_service(draft['id']).status_code == 200
assert self.service_count() == 1
assert self.draft_service_count() == 0
@mock.patch('app.models.generate_new_service_id')
def test_service_id_collisions_should_be_handled(self, generate_new_service_id):
# Return the same ID a few times (cause collisions) and then return
# a different one.
generate_new_service_id.side_effect = [
'1234567890123457',
'1234567890123457',
'1234567890123457',
'1234567890123458',
]
res = self.publish_new_draft_service()
assert_equal(res.status_code, 200)
res = self.publish_new_draft_service()
assert_equal(res.status_code, 200)
# Count is 3 because we create one in the setup
assert self.service_count() == 3
res = self.client.get('/services?framework=g-cloud-7')
services = json.loads(res.get_data())['services']
assert services[0]['id'] == '1234567890123457'
assert services[1]['id'] == '1234567890123458'
assert self.draft_service_count() == 2
def test_get_draft_returns_last_audit_event(self):
draft = json.loads(self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json'
).get_data())['services']
res = self.client.get(
'/draft-services/%d' % draft['id'],
data=json.dumps(self.create_draft_json),
content_type='application/json'
)
assert_equal(res.status_code, 200)
data = json.loads(res.get_data())
draft, audit_event = data['services'], data['auditEvents']
assert_equal(audit_event['type'], 'create_draft_service')
class TestCopyDraft(BaseApplicationTest, JSONUpdateTestMixin):
endpoint = '/draft-services/{self.draft_id}/copy'
method = 'post'
def setup(self):
super(TestCopyDraft, self).setup()
with self.app.app_context():
db.session.add(
Supplier(supplier_id=1, name=u"Supplier 1")
)
db.session.add(
ContactInformation(
supplier_id=1,
contact_name=u"Liz",
email=u"liz@royal.gov.uk",
postcode=u"SW1A 1AA"
)
)
Framework.query.filter_by(slug='g-cloud-5') \
.update(dict(status='live'))
Framework.query.filter_by(slug='g-cloud-7') \
.update(dict(status='open'))
db.session.commit()
create_draft_json = {
'updated_by': 'joeblogs',
'services': {
'frameworkSlug': 'g-cloud-7',
'lot': 'scs',
'supplierId': 1,
'serviceName': "Draft",
'status': 'submitted',
'serviceSummary': 'This is a summary',
"termsAndConditionsDocumentURL": "http://localhost/example.pdf",
"pricingDocumentURL": "http://localhost/example.pdf",
"serviceDefinitionDocumentURL": "http://localhost/example.pdf",
"sfiaRateDocumentURL": "http://localhost/example.pdf",
}
}
draft = self.client.post(
'/draft-services',
data=json.dumps(create_draft_json),
content_type='application/json')
self.draft = json.loads(draft.get_data())['services']
self.draft_id = self.draft['id']
def test_copy_draft(self):
res = self.client.post(
'/draft-services/%s/copy' % self.draft_id,
data=json.dumps({'updated_by': 'joeblogs'}),
content_type='application/json')
data = json.loads(res.get_data())
assert_equal(res.status_code, 201, res.get_data())
assert_equal(data['services']['lot'], 'scs')
assert_equal(data['services']['status'], 'not-submitted')
assert_equal(data['services']['serviceName'], 'Draft copy')
assert_equal(data['services']['supplierId'], 1)
assert_equal(data['services']['frameworkSlug'], self.draft['frameworkSlug'])
assert_equal(data['services']['frameworkName'], self.draft['frameworkName'])
def test_copy_draft_should_create_audit_event(self):
res = self.client.post(
'/draft-services/%s/copy' % self.draft_id,
data=json.dumps({'updated_by': 'joeblogs'}),
content_type='application/json')
assert_equal(res.status_code, 201)
data = json.loads(res.get_data())
draft_id = data['services']['id']
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 2)
assert_equal(data['auditEvents'][1]['user'], 'joeblogs')
assert_equal(data['auditEvents'][1]['type'], 'create_draft_service')
assert_equal(data['auditEvents'][1]['data'], {
'draftId': draft_id,
'originalDraftId': self.draft_id
})
def test_should_not_create_draft_with_invalid_data(self):
res = self.client.post(
'/draft-services/1000/copy',
data=json.dumps({'updated_by': 'joeblogs'}),
content_type='application/json')
assert_equal(res.status_code, 404)
def test_should_not_copy_draft_service_description(self):
res = self.client.post(
'/draft-services/{}/copy'.format(self.draft_id),
data=json.dumps({"updated_by": "me"}),
content_type="application/json")
data = json.loads(res.get_data())
assert_equal(res.status_code, 201)
assert_false("serviceSummary" in data['services'])
def test_should_not_copy_draft_documents(self):
res = self.client.post(
'/draft-services/{}/copy'.format(self.draft_id),
data=json.dumps({"updated_by": "me"}),
content_type="application/json")
data = json.loads(res.get_data())
assert_equal(res.status_code, 201)
assert_false("termsAndConditionsDocumentURL" in data['services'])
assert_false("pricingDocumentURL" in data['services'])
assert_false("serviceDefinitionDocumentURL" in data['services'])
assert_false("sfiaRateDocumentURL" in data['services'])
class TestCompleteDraft(BaseApplicationTest, JSONUpdateTestMixin):
endpoint = '/draft-services/{self.draft_id}/complete'
method = 'post'
def setup(self):
super(TestCompleteDraft, self).setup()
with self.app.app_context():
db.session.add(Supplier(supplier_id=1, name=u"Supplier 1"))
db.session.add(
ContactInformation(
supplier_id=1,
contact_name=u"Test",
email=u"supplier@user.dmdev",
postcode=u"SW1A 1AA"
)
)
Framework.query.filter_by(slug='g-cloud-7').update(dict(status='open'))
db.session.commit()
draft_json = self.load_example_listing("G7-SCS")
draft_json['frameworkSlug'] = 'g-cloud-7'
create_draft_json = {
'updated_by': 'joeblogs',
'services': draft_json
}
draft = self.client.post(
'/draft-services',
data=json.dumps(create_draft_json),
content_type='application/json')
self.draft = json.loads(draft.get_data())['services']
self.draft_id = self.draft['id']
def test_complete_draft(self):
res = self.client.post(
'/draft-services/%s/complete' % self.draft_id,
data=json.dumps({'updated_by': 'joeblogs'}),
content_type='application/json')
data = json.loads(res.get_data())
assert_equal(res.status_code, 200, res.get_data())
assert_equal(data['services']['status'], 'submitted')
def test_complete_draft_should_create_audit_event(self):
res = self.client.post(
'/draft-services/%s/complete' % self.draft_id,
data=json.dumps({'updated_by': 'joeblogs'}),
content_type='application/json')
assert_equal(res.status_code, 200)
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 2)
assert_equal(data['auditEvents'][1]['user'], 'joeblogs')
assert_equal(data['auditEvents'][1]['type'], 'complete_draft_service')
assert_equal(data['auditEvents'][1]['data'], {
'draftId': self.draft_id,
})
def test_should_not_complete_draft_without_updated_by(self):
res = self.client.post(
'/draft-services/%s/complete' % self.draft_id,
data=json.dumps({}),
content_type='application/json')
assert_equal(res.status_code, 400)
def test_should_not_complete_invalid_draft(self):
create_draft_json = {
'updated_by': 'joeblogs',
'services': {
'frameworkSlug': 'g-cloud-7',
'lot': 'scs',
'supplierId': 1,
'serviceName': 'Name',
}
}
draft = self.client.post(
'/draft-services',
data=json.dumps(create_draft_json),
content_type='application/json'
)
draft = json.loads(draft.get_data())['services']
res = self.client.post(
'/draft-services/%s/complete' % draft['id'],
data=json.dumps({'updated_by': 'joeblogs'}),
content_type='application/json')
assert_equal(res.status_code, 400)
errors = json.loads(res.get_data())['error']
assert_in('serviceSummary', errors)
class TestDOSServices(BaseApplicationTest):
updater_json = None
create_draft_json = None
def setup(self):
super(TestDOSServices, self).setup()
payload = self.load_example_listing("DOS-digital-specialist")
self.updater_json = {
'updated_by': 'joeblogs'
}
self.create_draft_json = self.updater_json.copy()
self.create_draft_json['services'] = payload
self.create_draft_json['services']['frameworkSlug'] = 'digital-outcomes-and-specialists'
with self.app.app_context():
self.set_framework_status('digital-outcomes-and-specialists', 'open')
db.session.add(
Supplier(supplier_id=1, name=u"Supplier 1")
)
db.session.add(
ContactInformation(
supplier_id=1,
contact_name=u"Liz",
email=u"liz@royal.gov.uk",
postcode=u"SW1A 1AA"
)
)
db.session.commit()
def _post_dos_draft(self, draft_json=None):
res = self.client.post(
'/draft-services',
data=json.dumps(draft_json or self.create_draft_json),
content_type='application/json')
assert_equal(res.status_code, 201, res.get_data())
return res
def _edit_dos_draft(self, draft_id, services, page_questions=None):
res = self.client.post(
'/draft-services/{}'.format(draft_id),
data=json.dumps({
'updated_by': 'joeblogs',
'services': services,
'page_questions': page_questions if page_questions is not None else []
}),
content_type='application/json')
return res
def test_should_create_dos_draft_with_minimal_data(self):
res = self._post_dos_draft()
data = json.loads(res.get_data())
assert_equal(data['services']['frameworkSlug'], 'digital-outcomes-and-specialists')
assert_equal(data['services']['frameworkName'], 'Digital Outcomes and Specialists')
assert_equal(data['services']['status'], 'not-submitted')
assert_equal(data['services']['supplierId'], 1)
assert_equal(data['services']['lot'], 'digital-specialists')
def test_disallow_multiple_drafts_for_one_service_lots(self):
self._post_dos_draft()
res = self.client.post(
'/draft-services',
data=json.dumps(self.create_draft_json),
content_type='application/json')
data = json.loads(res.get_data())
assert_equal(res.status_code, 400)
assert_equal(data['error'], "'digital-specialists' service already exists for supplier '1'")
def test_create_dos_draft_should_create_audit_event(self):
res = self._post_dos_draft()
data = json.loads(res.get_data())
draft_id = data['services']['id']
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 1)
assert_equal(data['auditEvents'][0]['user'], 'joeblogs')
assert_equal(data['auditEvents'][0]['type'], 'create_draft_service')
assert_equal(
data['auditEvents'][0]['data']['draftId'], draft_id
)
def test_should_fetch_a_dos_draft(self):
res = self._post_dos_draft()
draft_id = json.loads(res.get_data())['services']['id']
fetch = self.client.get('/draft-services/{}'.format(draft_id))
assert_equal(fetch.status_code, 200)
data = json.loads(res.get_data())
assert_equal(data['services']['dataProtocols'], True)
assert_equal(data['services']['id'], draft_id)
def test_should_delete_a_dos_draft(self):
res = self._post_dos_draft()
draft_id = json.loads(res.get_data())['services']['id']
fetch = self.client.get('/draft-services/{}'.format(draft_id))
assert_equal(fetch.status_code, 200)
delete = self.client.delete(
'/draft-services/{}'.format(draft_id),
data=json.dumps(self.updater_json),
content_type='application/json')
assert_equal(delete.status_code, 200)
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 2)
assert_equal(data['auditEvents'][0]['type'], 'create_draft_service')
assert_equal(data['auditEvents'][1]['user'], 'joeblogs')
assert_equal(data['auditEvents'][1]['type'], 'delete_draft_service')
assert_equal(
data['auditEvents'][1]['data']['draftId'], draft_id
)
fetch_again = self.client.get(
'/draft-services/{}'.format(draft_id),
data=json.dumps(self.updater_json),
content_type='application/json')
assert_equal(fetch_again.status_code, 404)
def test_should_edit_dos_draft(self):
res = self._post_dos_draft()
draft_id = json.loads(res.get_data())['services']['id']
update = self._edit_dos_draft(
draft_id=draft_id,
services={'dataProtocols': False}
)
assert_equal(update.status_code, 200)
fetch = self.client.get('/draft-services/{}'.format(draft_id))
assert_equal(fetch.status_code, 200)
data = json.loads(fetch.get_data())
assert_equal(data['services']['dataProtocols'], False)
assert_equal(data['services']['id'], draft_id)
def test_should_not_edit_draft_with_invalid_price_strings(self):
res = self._post_dos_draft()
draft_id = json.loads(res.get_data())['services']['id']
update = self._edit_dos_draft(
draft_id=draft_id,
services={
"agileCoachPriceMin": 'not_a_valid_price',
"agileCoachPriceMax": '!@#$%^&*('},
page_questions=[]
)
data = json.loads(update.get_data())
for key in ['agileCoachPriceMin', 'agileCoachPriceMax']:
assert_equal(data['error'][key], 'not_money_format')
assert_equal(update.status_code, 400)
def test_should_not_edit_draft_with_max_price_less_than_min_price(self):
res = self._post_dos_draft()
draft_id = json.loads(res.get_data())['services']['id']
update = self._edit_dos_draft(
draft_id=draft_id,
services={
"agileCoachPriceMin": '200',
"agileCoachPriceMax": '100'},
page_questions=[]
)
data = json.loads(update.get_data())
assert_equal(data['error']['agileCoachPriceMax'], 'max_less_than_min')
assert_equal(update.status_code, 400)
def test_should_not_edit_draft_if_dependencies_missing(self):
res = self._post_dos_draft()
draft_id = json.loads(res.get_data())['services']['id']
update = self._edit_dos_draft(
draft_id=draft_id,
services={
# missing "developerLocations"
"dataProtocols": True,
"developerPriceMin": "1"},
page_questions=[]
)
data = json.loads(update.get_data())
for key in ['developerLocations', 'developerPriceMax']:
assert_equal(data['error'][key], 'answer_required')
assert_equal(update.status_code, 400)
def test_should_filter_out_invalid_page_questions(self):
res = self._post_dos_draft()
draft_id = json.loads(res.get_data())['services']['id']
update = self._edit_dos_draft(
draft_id=draft_id,
services={
"dataProtocols": True},
page_questions=[
# neither of these keys exist in the schema
"clemenule",
"firecracker",
# keys which exist in anyOf requirements are ignored
"developerLocations",
"developerPriceMax",
"developerPriceMin"]
)
assert_equal(update.status_code, 200)
def test_should_not_copy_one_service_limit_lot_draft(self):
draft = json.loads(self._post_dos_draft().get_data())
res = self.client.post(
'/draft-services/{}/copy'.format(draft['services']['id']),
data=json.dumps({"updated_by": "me"}),
content_type="application/json")
data = json.loads(res.get_data())
assert_equal(res.status_code, 400)
assert_in("Cannot copy a 'digital-specialists' draft", data['error'])
def test_complete_valid_dos_draft(self):
res = self._post_dos_draft()
draft_id = json.loads(res.get_data())['services']['id']
complete = self.client.post(
'/draft-services/{}/complete'.format(draft_id),
data=json.dumps(self.updater_json),
content_type='application/json'
)
assert_equal(complete.status_code, 200)
def test_should_not_complete_invalid_dos_draft(self):
draft_json = self.create_draft_json
draft_json['services'].pop('agileCoachLocations')
draft_json['services'].pop('agileCoachPriceMin')
draft_json['services'].pop('agileCoachPriceMax')
res = self._post_dos_draft(draft_json)
draft_id = json.loads(res.get_data())['services']['id']
complete = self.client.post(
'/draft-services/{}/complete'.format(draft_id),
data=json.dumps(self.updater_json),
content_type='application/json'
)
data = json.loads(complete.get_data())
assert_in("specialist_required", "{}".format(data['error']['_form']))
assert_equal(complete.status_code, 400)
class TestUpdateDraftStatus(BaseApplicationTest, JSONUpdateTestMixin):
endpoint = '/draft-services/{self.draft_id}/update-status'
method = 'post'
def setup(self):
super(TestUpdateDraftStatus, self).setup()
with self.app.app_context():
db.session.add(Supplier(supplier_id=1, name=u"Supplier 1"))
db.session.add(
ContactInformation(
supplier_id=1,
contact_name=u"Test",
email=u"supplier@user.dmdev",
postcode=u"SW1A 1AA"
)
)
Framework.query.filter_by(slug='g-cloud-7').update(dict(status='open'))
db.session.commit()
draft_json = self.load_example_listing("G7-SCS")
draft_json['frameworkSlug'] = 'g-cloud-7'
create_draft_json = {
'updated_by': 'joeblogs',
'services': draft_json
}
draft = self.client.post(
'/draft-services',
data=json.dumps(create_draft_json),
content_type='application/json')
self.draft = json.loads(draft.get_data())['services']
self.draft_id = self.draft['id']
def test_update_draft_status(self):
res = self.client.post(
'/draft-services/%s/update-status' % self.draft_id,
data=json.dumps({'services': {'status': 'failed'}, 'updated_by': 'joeblogs'}),
content_type='application/json')
data = json.loads(res.get_data())
assert_equal(res.status_code, 200, res.get_data())
assert_equal(data['services']['status'], 'failed')
def test_update_draft_status_should_create_audit_event(self):
res = self.client.post(
'/draft-services/%s/update-status' % self.draft_id,
data=json.dumps({'services': {'status': 'failed'}, 'updated_by': 'joeblogs'}),
content_type='application/json')
assert_equal(res.status_code, 200)
audit_response = self.client.get('/audit-events')
assert_equal(audit_response.status_code, 200)
data = json.loads(audit_response.get_data())
assert_equal(len(data['auditEvents']), 2)
assert_equal(data['auditEvents'][1]['user'], 'joeblogs')
assert_equal(data['auditEvents'][1]['type'], 'update_draft_service_status')
assert_equal(data['auditEvents'][1]['data'], {
'draftId': self.draft_id, 'status': 'failed'
})
def test_should_not_update_draft_status_to_invalid_status(self):
res = self.client.post(
'/draft-services/%s/update-status' % self.draft_id,
data=json.dumps({'services': {'status': 'INVALID-STATUS'}, 'updated_by': 'joeblogs'}),
content_type='application/json')
assert_equal(res.status_code, 400)
assert_equal(json.loads(res.get_data()), {"error": "'INVALID-STATUS' is not a valid status"})
| 39.977376
| 107
| 0.610332
| 7,079
| 61,845
| 5.068795
| 0.052974
| 0.070815
| 0.035951
| 0.060866
| 0.827267
| 0.79675
| 0.754389
| 0.724291
| 0.693886
| 0.673262
| 0
| 0.020328
| 0.255461
| 61,845
| 1,546
| 108
| 40.003234
| 0.758937
| 0.008473
| 0
| 0.670295
| 0
| 0
| 0.193325
| 0.039686
| 0
| 0
| 0
| 0
| 0.218507
| 1
| 0.073872
| false
| 0.00311
| 0.010886
| 0.001555
| 0.103421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
cecfc7cd482f8cab03db18e5924ccb8750270c21
| 141
|
py
|
Python
|
tests/Unit/PointwiseFunctions/AnalyticSolutions/ScalarAdvection/Sinusoid.py
|
nilsvu/spectre
|
1455b9a8d7e92db8ad600c66f54795c29c3052ee
|
[
"MIT"
] | 117
|
2017-04-08T22:52:48.000Z
|
2022-03-25T07:23:36.000Z
|
tests/Unit/PointwiseFunctions/AnalyticSolutions/ScalarAdvection/Sinusoid.py
|
GitHimanshuc/spectre
|
4de4033ba36547113293fe4dbdd77591485a4aee
|
[
"MIT"
] | 3,177
|
2017-04-07T21:10:18.000Z
|
2022-03-31T23:55:59.000Z
|
tests/Unit/PointwiseFunctions/AnalyticSolutions/ScalarAdvection/Sinusoid.py
|
geoffrey4444/spectre
|
9350d61830b360e2d5b273fdd176dcc841dbefb0
|
[
"MIT"
] | 85
|
2017-04-07T19:36:13.000Z
|
2022-03-01T10:21:00.000Z
|
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
def u(x, t):
return np.sin(np.pi * (x[0] - t))
| 15.666667
| 37
| 0.64539
| 26
| 141
| 3.5
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009091
| 0.219858
| 141
| 8
| 38
| 17.625
| 0.818182
| 0.446809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
ced187b2048ceff38f34cdcfa91cdd2113e5ea9e
| 52
|
py
|
Python
|
SlackHandler/__init__.py
|
ShigekiYoshioka/SlackHandler
|
52c0e910a64cd9ed999fdb1b01065cb69b49edfb
|
[
"MIT"
] | 1
|
2019-05-08T10:08:00.000Z
|
2019-05-08T10:08:00.000Z
|
SlackHandler/__init__.py
|
ShigekiYoshioka/SlackHandler
|
52c0e910a64cd9ed999fdb1b01065cb69b49edfb
|
[
"MIT"
] | null | null | null |
SlackHandler/__init__.py
|
ShigekiYoshioka/SlackHandler
|
52c0e910a64cd9ed999fdb1b01065cb69b49edfb
|
[
"MIT"
] | null | null | null |
from SlackHandler.slack_handler import SlackHandler
| 26
| 51
| 0.903846
| 6
| 52
| 7.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 52
| 1
| 52
| 52
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ced5f67e62e59661f8994cdcbd0d4ce022cd1fd4
| 629
|
py
|
Python
|
src/rnaseq_lib/__init__.py
|
jvivian/rnaseq-lib
|
688ab84b73b44c2a3b6256ed390f1a54d13bfa8a
|
[
"MIT"
] | null | null | null |
src/rnaseq_lib/__init__.py
|
jvivian/rnaseq-lib
|
688ab84b73b44c2a3b6256ed390f1a54d13bfa8a
|
[
"MIT"
] | null | null | null |
src/rnaseq_lib/__init__.py
|
jvivian/rnaseq-lib
|
688ab84b73b44c2a3b6256ed390f1a54d13bfa8a
|
[
"MIT"
] | null | null | null |
# Single module imports
import rnaseq_lib.bam
import rnaseq_lib.civic
import rnaseq_lib.data
import rnaseq_lib.diff_exp
import rnaseq_lib.dim_red
import rnaseq_lib.docker
import rnaseq_lib.drugs
import rnaseq_lib.graphs
import rnaseq_lib.gtf
import rnaseq_lib.images
import rnaseq_lib.jupyter
import rnaseq_lib.math
import rnaseq_lib.math.dists
import rnaseq_lib.ml
# Main plot module and opts
import rnaseq_lib.plot
import rnaseq_lib.plot.opts
import rnaseq_lib.tissues
import rnaseq_lib.utils
# Derivative web modules
import rnaseq_lib.web
import rnaseq_lib.web.kegg
import rnaseq_lib.web.openfda
import rnaseq_lib.web.synapse
| 24.192308
| 29
| 0.860095
| 106
| 629
| 4.877358
| 0.320755
| 0.510638
| 0.638298
| 0.139265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09221
| 629
| 25
| 30
| 25.16
| 0.905429
| 0.111288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cede7b1642828cc7a0f76cfc0ef9f24279ada32f
| 37
|
py
|
Python
|
mayan/apps/lock_manager/exceptions.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 2,743
|
2017-12-18T07:12:30.000Z
|
2022-03-27T17:21:25.000Z
|
mayan/apps/lock_manager/exceptions.py
|
kyper999/mayan-edms
|
ca7b8301a1f68548e8e718d42a728a500d67286e
|
[
"Apache-2.0"
] | 15
|
2020-06-06T00:00:48.000Z
|
2022-03-12T00:03:54.000Z
|
mayan/apps/lock_manager/exceptions.py
|
kyper999/mayan-edms
|
ca7b8301a1f68548e8e718d42a728a500d67286e
|
[
"Apache-2.0"
] | 257
|
2017-12-18T03:12:58.000Z
|
2022-03-25T08:59:10.000Z
|
class LockError(Exception):
pass
| 12.333333
| 27
| 0.72973
| 4
| 37
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189189
| 37
| 2
| 28
| 18.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.