hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8963d51d86e3a35a5dd98f260b0e14b7e511322e | 222 | py | Python | drop/config/__init__.py | jemten/drop | 6e9b586304875c30862dacee320959d16cc98cfe | [
"MIT"
] | 58 | 2019-10-18T22:53:21.000Z | 2022-03-30T08:37:05.000Z | drop/config/__init__.py | jemten/drop | 6e9b586304875c30862dacee320959d16cc98cfe | [
"MIT"
] | 185 | 2020-01-10T13:39:12.000Z | 2022-03-31T15:25:01.000Z | drop/config/__init__.py | jemten/drop | 6e9b586304875c30862dacee320959d16cc98cfe | [
"MIT"
] | 32 | 2019-10-15T15:13:20.000Z | 2022-03-22T05:25:25.000Z | from .DropConfig import DropConfig
from .SampleAnnotation import SampleAnnotation
from .submodules.MonoallelicExpression import MAE
from .submodules.AberrantSplicing import AS
from .submodules.AberrantExpression import AE
| 37 | 49 | 0.873874 | 23 | 222 | 8.434783 | 0.478261 | 0.216495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09009 | 222 | 5 | 50 | 44.4 | 0.960396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7f98fa450ca03011ca15bb37c9f26a66c935b1fa | 111 | py | Python | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/src/modules/sound_package/effects/reverse.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 11 | 2021-02-18T04:53:44.000Z | 2022-01-16T10:57:39.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/src/modules/sound_package/effects/reverse.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 162 | 2021-03-09T01:52:11.000Z | 2022-03-12T01:09:07.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/src/modules/sound_package/effects/reverse.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 8 | 2021-02-18T05:12:34.000Z | 2022-03-06T19:02:14.000Z | """Reverse effect."""
def reverse_function():
"""Reveres function mock"""
return "Do reverse effect"
| 15.857143 | 31 | 0.648649 | 12 | 111 | 5.916667 | 0.666667 | 0.366197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.189189 | 111 | 6 | 32 | 18.5 | 0.788889 | 0.333333 | 0 | 0 | 0 | 0 | 0.269841 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
7fa4727c55c05d542226a8534997913844b65165 | 66 | py | Python | khopesh/__init__.py | Varkal/khopesh | f55830ae8392394733dd05939cbe6f943b3f08ae | [
"MIT"
] | null | null | null | khopesh/__init__.py | Varkal/khopesh | f55830ae8392394733dd05939cbe6f943b3f08ae | [
"MIT"
] | null | null | null | khopesh/__init__.py | Varkal/khopesh | f55830ae8392394733dd05939cbe6f943b3f08ae | [
"MIT"
] | null | null | null | def cli():
from .app import KhopeshApp
KhopeshApp().run()
| 16.5 | 31 | 0.636364 | 8 | 66 | 5.25 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.227273 | 66 | 3 | 32 | 22 | 0.823529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7fb9470ae0ad2b2dd3093c910f04b520e4510376 | 11,466 | py | Python | subhalo_metals.py | lzkelley/arepo-mbh-sims_analysis | f14519552cedd39a040b53e6d7cc538b5b8f38a3 | [
"MIT"
] | null | null | null | subhalo_metals.py | lzkelley/arepo-mbh-sims_analysis | f14519552cedd39a040b53e6d7cc538b5b8f38a3 | [
"MIT"
] | null | null | null | subhalo_metals.py | lzkelley/arepo-mbh-sims_analysis | f14519552cedd39a040b53e6d7cc538b5b8f38a3 | [
"MIT"
] | null | null | null | """
"""
import os
import sys
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import h5py
import illustris_python as ill
import zcode.math as zmath
from . import utils
from . const import CONV_ILL_TO_SOL, PARTS
def subhalos_metallicity_redshift(arepo_sim_dir):
beg_all = datetime.now()
arepo_output_dir = os.path.join(arepo_sim_dir, "output", "")
sim_name = os.path.split(arepo_sim_dir)[-1]
print("subhalo_metals.main() : {} - {}".format(sim_name, arepo_output_dir))
HALOS = True
redshifts = utils.load_snapshot_redshifts(arepo_output_dir)
# num_snaps = utils.get_num_snaps(arepo_output_dir)
num_snaps = len(redshifts)
print("Snaps: ", num_snaps)
fields = ['SubhaloBHMass', 'SubhaloGasMetallicity', 'SubhaloGasMetallicityHalfRad',
'SubhaloGasMetallicityMaxRad', 'SubhaloGasMetallicitySfr',
'SubhaloGasMetallicitySfrWeighted', 'SubhaloLenType', 'SubhaloMass',
'SubhaloMassInRadType', 'SubhaloSFR', 'SubhaloSFRinRad', 'SubhaloStarMetallicity',
'SubhaloVelDisp']
NBINS_MASSES = 60
NBINS_METALS = 80
edges_metals = np.logspace(-8, 0, NBINS_METALS+1)
edges_masses = np.logspace(8, 14, NBINS_MASSES+1)
# SIGMA = [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]
SIGMA = [0.0, 1.0, 2.0, 3.0]
PERCS = np.array(sorted(list(set(np.concatenate(zmath.sigma(SIGMA, boundaries=True))))))
NUM_PERCS = len(PERCS)
METALS_KEYS = ['SubhaloGasMetallicity', 'SubhaloGasMetallicityHalfRad',
'SubhaloGasMetallicitySfr', 'SubhaloGasMetallicitySfrWeighted']
NUM_KEYS = len(METALS_KEYS)
metals = np.zeros((NUM_KEYS, num_snaps, NBINS_METALS, NBINS_MASSES))
PERCS_MASS_BIN_EDGES = np.logspace(8, 14, 7)
NUM_PMB = PERCS_MASS_BIN_EDGES.size - 1
metals_percs = np.zeros((NUM_KEYS, num_snaps, NUM_PMB, NUM_PERCS))
metals_percs_num = np.zeros((NUM_KEYS, num_snaps, NUM_PMB), dtype=int)
metals_percs_nonzero = np.zeros((NUM_KEYS, num_snaps, NUM_PMB, NUM_PERCS))
metals_percs_nonzero_num = np.zeros((NUM_KEYS, num_snaps, NUM_PMB), dtype=int)
# masses_percs = np.zeros((NUM_KEYS, num_snaps, NBINS_MASSES, NUM_PERCS))
edges = [edges_metals, edges_masses]
for snap in range(num_snaps):
beg = datetime.now()
print(snap, " ------")
subhalos = ill.groupcat.loadSubhalos(arepo_output_dir, snap, fields=fields)
# print(subhalos.keys())
masses_snap = subhalos['SubhaloMass'] * CONV_ILL_TO_SOL.MASS
num_subh = len(masses_snap)
print("\tLoaded {} subhalos after {}".format(num_subh, str(datetime.now()-beg)))
for kk, key in enumerate(METALS_KEYS):
# metals_snap = subhalos['SubhaloGasMetallicity']
metals_snap = subhalos[key]
print("\t", key)
print("\t\tminmax = {:.2e}, {:.2e}".format(np.min(metals_snap), np.max(metals_snap)))
metals[kk, snap, :, :], xe, ye = np.histogram2d(metals_snap, masses_snap, bins=edges)
for jj in range(NUM_PMB):
lo = PERCS_MASS_BIN_EDGES[jj]
hi = PERCS_MASS_BIN_EDGES[jj+1]
idx = (lo < masses_snap) & (masses_snap <= hi)
if any(idx):
metals_percs[kk, snap, jj, :] = np.percentile(metals_snap[idx], 100*PERCS)
metals_percs_num[kk, snap, jj] = np.sum(idx)
idx = (lo < masses_snap) & (masses_snap <= hi) & (metals_snap > 0.0)
if any(idx):
metals_percs_nonzero[kk, snap, jj, :] = np.percentile(metals_snap[idx], 100*PERCS)
metals_percs_nonzero_num[kk, snap, jj] = np.sum(idx)
fname = "{}_subhalos_metals".format(sim_name)
shape_str = np.array("(NUM_KEYS, num_snaps, NBINS_METALS, NBINS)")
keys_str = np.array("(" + ", ".join(METALS_KEYS) + ")")
np.savez(fname, metals=metals, metals_percs=metals_percs, metals_percs_num=metals_percs_num,
metals_percs_nonzero=metals_percs_nonzero, metals_percs_nonzero_num=metals_percs_nonzero_num,
redshifts=redshifts,
edges_metals=edges_metals, edges_masses=edges_masses, edges_percs_masses=PERCS_MASS_BIN_EDGES,
sigma=SIGMA, percs=PERCS,
shape=shape_str, metal_keys=keys_str)
print("Saved data to '{}'".format(os.path.abspath(fname)))
print("Done after {}".format(datetime.now()-beg_all))
return
def halos_metallicity_redshift(arepo_sim_dir):
beg_all = datetime.now()
arepo_output_dir = os.path.join(arepo_sim_dir, "output", "")
sim_name = os.path.split(arepo_sim_dir)[-1]
print("subhalo_metals.halos_metallicity_redshift() : {} - {}".format(sim_name, arepo_output_dir))
redshifts = utils.load_snapshot_redshifts(arepo_output_dir)
num_snaps = len(redshifts)
print("Snaps: ", num_snaps)
fields = ['GroupGasMetallicity', 'GroupMass']
NBINS_MASSES = 80
NBINS_METALS = 90
edges_metals = np.logspace(-8, 0, NBINS_METALS+1)
edges_masses = np.logspace(8, 16, NBINS_MASSES+1)
SIGMA = [0.0, 1.0, 2.0, 3.0]
PERCS = np.array(sorted(list(set(np.concatenate(zmath.sigma(SIGMA, boundaries=True))))))
NUM_PERCS = len(PERCS)
metals = np.zeros((num_snaps, NBINS_METALS, NBINS_MASSES))
PERCS_MASS_BIN_EDGES = np.logspace(8, 16, 9)
NUM_PMB = PERCS_MASS_BIN_EDGES.size - 1
metals_percs = np.zeros((num_snaps, NUM_PMB, NUM_PERCS))
metals_percs_num = np.zeros((num_snaps, NUM_PMB), dtype=int)
metals_percs_nonzero = np.zeros((num_snaps, NUM_PMB, NUM_PERCS))
metals_percs_nonzero_num = np.zeros((num_snaps, NUM_PMB), dtype=int)
edges = [edges_metals, edges_masses]
for snap in range(num_snaps):
beg = datetime.now()
print(snap, " ------")
halos = ill.groupcat.loadHalos(arepo_output_dir, snap, fields=fields)
# print(halos.keys())
masses_snap = halos['GroupMass'] * CONV_ILL_TO_SOL.MASS
num_halo = len(masses_snap)
print("\tLoaded {} halos after {}".format(num_halo, str(datetime.now()-beg)))
metals_snap = halos['GroupGasMetallicity']
print("\t\tminmax = {:.2e}, {:.2e}".format(np.min(metals_snap), np.max(metals_snap)))
metals[snap, :, :], xe, ye = np.histogram2d(metals_snap, masses_snap, bins=edges)
for jj in range(NUM_PMB):
lo = PERCS_MASS_BIN_EDGES[jj]
hi = PERCS_MASS_BIN_EDGES[jj+1]
idx = (lo < masses_snap) & (masses_snap <= hi)
if any(idx):
metals_percs[snap, jj, :] = np.percentile(metals_snap[idx], 100*PERCS)
metals_percs_num[snap, jj] = np.sum(idx)
idx = (lo < masses_snap) & (masses_snap <= hi) & (metals_snap > 0.0)
if any(idx):
metals_percs_nonzero[snap, jj, :] = np.percentile(metals_snap[idx], 100*PERCS)
metals_percs_nonzero_num[snap, jj] = np.sum(idx)
fname = "{}_halos_metals".format(sim_name)
shape_str = np.array("(num_snaps, NBINS_METALS, NBINS)")
np.savez(fname, metals=metals, metals_percs=metals_percs, metals_percs_num=metals_percs_num,
metals_percs_nonzero=metals_percs_nonzero, metals_percs_nonzero_num=metals_percs_nonzero_num,
redshifts=redshifts,
edges_metals=edges_metals, edges_masses=edges_masses, edges_percs_masses=PERCS_MASS_BIN_EDGES,
sigma=SIGMA, percs=PERCS, shape=shape_str)
print("Saved data to '{}'".format(os.path.abspath(fname)))
print("Done after {}".format(datetime.now()-beg_all))
return
def halos_stars_metallicity_redshift(arepo_sim_dir):
beg_all = datetime.now()
arepo_output_dir = os.path.join(arepo_sim_dir, "output", "")
sim_name = os.path.split(arepo_sim_dir)[-1]
print("subhalo_metals.halos_stars_metallicity_redshift() : {} - {}".format(sim_name, arepo_output_dir))
redshifts = utils.load_snapshot_redshifts(arepo_output_dir)
num_snaps = len(redshifts)
print("Snaps: ", num_snaps)
fields = ['GroupGasMetallicity', 'GroupMassType', 'GroupLenType']
NBINS_MASSES = 70
NBINS_METALS = 60
edges_metals = np.logspace(-8, 0, NBINS_METALS+1)
edges_masses = np.logspace(6, 12, NBINS_MASSES+1)
SIGMA = [0.0, 1.0, 2.0]
PERCS = np.array(sorted(list(set(np.concatenate(zmath.sigma(SIGMA, boundaries=True))))))
NUM_PERCS = len(PERCS)
metals = np.zeros((num_snaps, NBINS_METALS, NBINS_MASSES))
PERCS_MASS_BIN_EDGES = np.logspace(6, 12, 7)
NUM_PMB = PERCS_MASS_BIN_EDGES.size - 1
metals_percs = np.zeros((num_snaps, NUM_PMB, NUM_PERCS))
metals_percs_num = np.zeros((num_snaps, NUM_PMB), dtype=int)
metals_percs_nonzero = np.zeros((num_snaps, NUM_PMB, NUM_PERCS))
metals_percs_nonzero_num = np.zeros((num_snaps, NUM_PMB), dtype=int)
edges = [edges_metals, edges_masses]
for snap in range(num_snaps):
beg = datetime.now()
print(snap, " ------")
halos = ill.groupcat.loadHalos(arepo_output_dir, snap, fields=fields)
# print(halos.keys())
# masses_snap = halos['GroupMass'] * CONV_ILL_TO_SOL.MASS
masses_stars = halos['GroupMassType'][:, PARTS.STAR] * CONV_ILL_TO_SOL.MASS
num_halo = len(masses_stars)
idx = (masses_stars > 0)
num_halo_stars = np.sum(idx)
print("\tLoaded {} halos after {}".format(num_halo, str(datetime.now()-beg)))
print("\tWith stars: {:.1e}/{:.1e} = {:.4f}".format(
num_halo_stars, num_halo, num_halo_stars/num_halo))
masses_snap = masses_stars[idx]
metals_snap = halos['GroupGasMetallicity'][idx]
print("\t\tminmax = {:.2e}, {:.2e}".format(np.min(metals_snap), np.max(metals_snap)))
metals[snap, :, :], xe, ye = np.histogram2d(metals_snap, masses_snap, bins=edges)
for jj in range(NUM_PMB):
lo = PERCS_MASS_BIN_EDGES[jj]
hi = PERCS_MASS_BIN_EDGES[jj+1]
idx = (lo < masses_snap) & (masses_snap <= hi)
if any(idx):
metals_percs[snap, jj, :] = np.percentile(metals_snap[idx], 100*PERCS)
metals_percs_num[snap, jj] = np.sum(idx)
idx = (lo < masses_snap) & (masses_snap <= hi) & (metals_snap > 0.0)
if any(idx):
metals_percs_nonzero[snap, jj, :] = np.percentile(metals_snap[idx], 100*PERCS)
metals_percs_nonzero_num[snap, jj] = np.sum(idx)
fname = "{}_halos_stars_metals".format(sim_name)
shape_str = np.array("(num_snaps, NBINS_METALS, NBINS)")
np.savez(fname, metals=metals, metals_percs=metals_percs, metals_percs_num=metals_percs_num,
metals_percs_nonzero=metals_percs_nonzero, metals_percs_nonzero_num=metals_percs_nonzero_num,
redshifts=redshifts,
edges_metals=edges_metals, edges_masses=edges_masses, edges_percs_masses=PERCS_MASS_BIN_EDGES,
sigma=SIGMA, percs=PERCS, shape=shape_str)
print("Saved data to '{}'".format(os.path.abspath(fname)))
print("Done after {}".format(datetime.now()-beg_all))
return
if __name__ == "__main__":
print(sys.argv)
if len(sys.argv) > 1:
arepo_sim_dir = sys.argv[1]
else:
raise RuntimeError("No directory provided!")
# main(arepo_sim_dir)
# subhalos_metallicity_redshift(arepo_sim_dir)
# halos_metallicity_redshift(arepo_sim_dir)
halos_stars_metallicity_redshift(arepo_sim_dir)
| 40.95 | 107 | 0.655678 | 1,534 | 11,466 | 4.599087 | 0.105606 | 0.074841 | 0.061233 | 0.036145 | 0.808079 | 0.798583 | 0.777179 | 0.752799 | 0.752799 | 0.740751 | 0 | 0.014409 | 0.213152 | 11,466 | 279 | 108 | 41.096774 | 0.767568 | 0.038287 | 0 | 0.58794 | 0 | 0 | 0.108285 | 0.035701 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015075 | false | 0 | 0.050251 | 0 | 0.080402 | 0.120603 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f6ebceeb364d35add669255c1840536652da811c | 33 | py | Python | models/__init__.py | liv20/deep-rl | ffa51e8896e14fec1f34829de87b219a58098a55 | [
"MIT"
] | null | null | null | models/__init__.py | liv20/deep-rl | ffa51e8896e14fec1f34829de87b219a58098a55 | [
"MIT"
] | null | null | null | models/__init__.py | liv20/deep-rl | ffa51e8896e14fec1f34829de87b219a58098a55 | [
"MIT"
] | null | null | null | from models.sequential import mlp | 33 | 33 | 0.878788 | 5 | 33 | 5.8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 33 | 1 | 33 | 33 | 0.966667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
63d8551f949c7e46f15050dec46ca46fb98ac4f8 | 2,226 | py | Python | tests/unit/test_cluster.py | Nomow/yass | 9cc5cc5c5435a664b378bba9332e5b77eb792ff8 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_cluster.py | Nomow/yass | 9cc5cc5c5435a664b378bba9332e5b77eb792ff8 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_cluster.py | Nomow/yass | 9cc5cc5c5435a664b378bba9332e5b77eb792ff8 | [
"Apache-2.0"
] | null | null | null | """
process.run tests, checking that the pipeline finishes without errors for
some configurations
"""
import yass
from yass import preprocess
from yass import detect
from yass import cluster
def test_cluster_threshold(path_to_config_threshold, make_tmp_folder):
yass.set_config(path_to_config_threshold, make_tmp_folder)
(standarized_path,
standarized_params,
whiten_filter) = preprocess.run()
spike_index_all = detect.run(standarized_path,
standarized_params,
whiten_filter)
cluster.run(None, spike_index_all)
def test_cluster_nnet(path_to_config, make_tmp_folder):
yass.set_config(path_to_config, make_tmp_folder)
(standarized_path,
standarized_params,
whiten_filter) = preprocess.run()
spike_index_all = detect.run(standarized_path,
standarized_params,
whiten_filter)
cluster.run(None, spike_index_all)
def test_cluster_save_results(path_to_config, make_tmp_folder):
yass.set_config(path_to_config, make_tmp_folder)
(standarized_path,
standarized_params,
whiten_filter) = preprocess.run()
spike_index_all = detect.run(standarized_path,
standarized_params,
whiten_filter)
cluster.run(None, spike_index_all, save_results=True)
# FIXME: this feature has not been implemented in the new clustering step
# def test_cluster_loads_from_disk_if_all_files_exist(path_to_config,
# make_tmp_folder):
# yass.set_config(path_to_config, make_tmp_folder)
# (standarized_path,
# standarized_params,
# whiten_filter) = preprocess.run()
# spike_index_all = detect.run(standarized_path,
# standarized_params,
# whiten_filter)
# # save results
# cluster.run(None, spike_index_all, save_results=True)
# assert cluster.run.executed
# # next time this should not run and just load from files
# cluster.run(None, spike_index_all, save_results=True)
# assert not cluster.run.executed
| 29.289474 | 73 | 0.659479 | 261 | 2,226 | 5.249042 | 0.249042 | 0.065693 | 0.085401 | 0.186861 | 0.716788 | 0.716788 | 0.716788 | 0.694891 | 0.694891 | 0.656204 | 0 | 0 | 0.275382 | 2,226 | 75 | 74 | 29.68 | 0.849349 | 0.385445 | 0 | 0.709677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013333 | 0 | 1 | 0.096774 | false | 0 | 0.129032 | 0 | 0.225806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
121a6f0c273152113a9ec0735239037032ab8d99 | 49 | py | Python | imgravy/__init__.py | gravy-jones-locker/imgravy | 36239139bd13982a5cb31e17b43ceb2ad11c0cb0 | [
"MIT"
] | null | null | null | imgravy/__init__.py | gravy-jones-locker/imgravy | 36239139bd13982a5cb31e17b43ceb2ad11c0cb0 | [
"MIT"
] | null | null | null | imgravy/__init__.py | gravy-jones-locker/imgravy | 36239139bd13982a5cb31e17b43ceb2ad11c0cb0 | [
"MIT"
] | null | null | null | from .image import Image
from .utils import Utils | 24.5 | 24 | 0.816327 | 8 | 49 | 5 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 49 | 2 | 25 | 24.5 | 0.952381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
122600b6eba859c2b0c01ba8518b150373aa913f | 23 | py | Python | ext/__init__.py | Ahmosys/PythonLeakCheckAPI | 3e53c5b068ae875054e916b6799f9069deaae8b3 | [
"MIT"
] | 4 | 2021-02-01T07:43:10.000Z | 2021-04-27T06:58:54.000Z | ext/__init__.py | Ahmosys/PythonLeakCheckAPI | 3e53c5b068ae875054e916b6799f9069deaae8b3 | [
"MIT"
] | null | null | null | ext/__init__.py | Ahmosys/PythonLeakCheckAPI | 3e53c5b068ae875054e916b6799f9069deaae8b3 | [
"MIT"
] | null | null | null | from .utility import *
| 11.5 | 22 | 0.73913 | 3 | 23 | 5.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 23 | 1 | 23 | 23 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
12546118961d8e95eaea3ce139446c8dc1a18305 | 52 | py | Python | dl_translate/lang/__init__.py | kondeeza/dl-translate | 45f8d865592b96c074f5a7921cffa688b63e85e9 | [
"MIT"
] | 200 | 2021-03-17T07:56:20.000Z | 2022-03-10T07:25:51.000Z | dl_translate/lang/__init__.py | Lexxos/dl-translate | 2c4f88db599c787f402819076225d2f39ec6b19b | [
"MIT"
] | 16 | 2021-03-17T17:52:52.000Z | 2022-03-30T17:02:19.000Z | dl_translate/lang/__init__.py | Lexxos/dl-translate | 2c4f88db599c787f402819076225d2f39ec6b19b | [
"MIT"
] | 17 | 2021-03-17T17:11:01.000Z | 2022-03-03T16:12:48.000Z | from .m2m100 import *
from . import m2m100, mbart50
| 17.333333 | 29 | 0.75 | 7 | 52 | 5.571429 | 0.571429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.232558 | 0.173077 | 52 | 2 | 30 | 26 | 0.674419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
125da523db47db419c1623e97958aa0c80e81c7c | 15,113 | py | Python | tests/test_mix.py | p42ul/medleydb | 027cdaa32564eb0784dad9c879b8aac49c80b4cb | [
"MIT"
] | 125 | 2015-06-10T16:31:19.000Z | 2022-03-29T11:03:05.000Z | tests/test_mix.py | p42ul/medleydb | 027cdaa32564eb0784dad9c879b8aac49c80b4cb | [
"MIT"
] | 81 | 2015-11-09T15:51:04.000Z | 2021-11-30T12:30:55.000Z | tests/test_mix.py | p42ul/medleydb | 027cdaa32564eb0784dad9c879b8aac49c80b4cb | [
"MIT"
] | 45 | 2015-10-19T02:47:33.000Z | 2022-03-06T16:20:33.000Z | """ Tests for medleydb.mix
"""
import unittest
import os
from medleydb import multitrack
from medleydb import mix
from medleydb import AUDIO_PATH
OUTPUT_PATH = 'test_out.wav'
def clean_output():
if os.path.exists(OUTPUT_PATH):
os.remove(OUTPUT_PATH)
class TestMixMultitrack(unittest.TestCase):
def setUp(self):
self.mtrack = multitrack.MultiTrack('LizNelson_Rainfall')
def test_defaults(self):
clean_output()
actual_fullpaths, actual_weights = mix.mix_multitrack(
self.mtrack, OUTPUT_PATH
)
self.assertTrue(os.path.exists(OUTPUT_PATH))
actual_basenames = [os.path.basename(f) for f in actual_fullpaths]
expected_basenames = [
'LizNelson_Rainfall_STEM_01.wav',
'LizNelson_Rainfall_STEM_02.wav',
'LizNelson_Rainfall_STEM_03.wav',
'LizNelson_Rainfall_STEM_04.wav',
'LizNelson_Rainfall_STEM_05.wav'
]
expected_weights = [
0.9138225670999782,
0.88655832334783,
0.7820245646673145,
0.9709353677932278,
0.7734022629465723
]
self.assertEqual(expected_basenames, actual_basenames)
self.assertEqual(expected_weights, actual_weights)
clean_output()
def test_less_stems(self):
clean_output()
actual_fullpaths, actual_weights = mix.mix_multitrack(
self.mtrack, OUTPUT_PATH, stem_indices=[2, 4]
)
self.assertTrue(os.path.exists(OUTPUT_PATH))
actual_basenames = [os.path.basename(f) for f in actual_fullpaths]
expected_basenames = [
'LizNelson_Rainfall_STEM_02.wav',
'LizNelson_Rainfall_STEM_04.wav'
]
expected_weights = [
0.88655832334783,
0.9709353677932278
]
self.assertEqual(expected_basenames, actual_basenames)
self.assertEqual(expected_weights, actual_weights)
clean_output()
def test_alt_weights(self):
clean_output()
actual_fullpaths, actual_weights = mix.mix_multitrack(
self.mtrack, OUTPUT_PATH, alternate_weights={2: 2.0, 4: 0.5}
)
self.assertTrue(os.path.exists(OUTPUT_PATH))
actual_basenames = [os.path.basename(f) for f in actual_fullpaths]
expected_basenames = [
'LizNelson_Rainfall_STEM_01.wav',
'LizNelson_Rainfall_STEM_02.wav',
'LizNelson_Rainfall_STEM_03.wav',
'LizNelson_Rainfall_STEM_04.wav',
'LizNelson_Rainfall_STEM_05.wav'
]
expected_weights = [
0.9138225670999782,
2.0,
0.7820245646673145,
0.5,
0.7734022629465723
]
self.assertEqual(expected_basenames, actual_basenames)
self.assertEqual(expected_weights, actual_weights)
clean_output()
def test_alt_files(self):
clean_output()
actual_fullpaths, actual_weights = mix.mix_multitrack(
self.mtrack, OUTPUT_PATH,
alternate_files={1: self.mtrack.mix_path}
)
self.assertTrue(os.path.exists(OUTPUT_PATH))
actual_basenames = [os.path.basename(f) for f in actual_fullpaths]
expected_basenames = [
'LizNelson_Rainfall_MIX.wav',
'LizNelson_Rainfall_STEM_02.wav',
'LizNelson_Rainfall_STEM_03.wav',
'LizNelson_Rainfall_STEM_04.wav',
'LizNelson_Rainfall_STEM_05.wav'
]
expected_weights = [
0.9138225670999782,
0.88655832334783,
0.7820245646673145,
0.9709353677932278,
0.7734022629465723
]
self.assertEqual(expected_basenames, actual_basenames)
self.assertEqual(expected_weights, actual_weights)
clean_output()
def test_additional_files(self):
clean_output()
actual_fullpaths, actual_weights = mix.mix_multitrack(
self.mtrack, OUTPUT_PATH,
additional_files=[(self.mtrack.mix_path, 2.1)]
)
self.assertTrue(os.path.exists(OUTPUT_PATH))
actual_basenames = [os.path.basename(f) for f in actual_fullpaths]
expected_basenames = [
'LizNelson_Rainfall_STEM_01.wav',
'LizNelson_Rainfall_STEM_02.wav',
'LizNelson_Rainfall_STEM_03.wav',
'LizNelson_Rainfall_STEM_04.wav',
'LizNelson_Rainfall_STEM_05.wav',
'LizNelson_Rainfall_MIX.wav'
]
expected_weights = [
0.9138225670999782,
0.88655832334783,
0.7820245646673145,
0.9709353677932278,
0.7734022629465723,
2.1
]
self.assertEqual(expected_basenames, actual_basenames)
self.assertEqual(expected_weights, actual_weights)
clean_output()
def test_one_stem_mix(self):
clean_output()
actual_fullpaths, actual_weights = mix.mix_multitrack(
self.mtrack, OUTPUT_PATH, stem_indices=[2]
)
self.assertTrue(os.path.exists(OUTPUT_PATH))
actual_basenames = [os.path.basename(f) for f in actual_fullpaths]
expected_basenames = [
'LizNelson_Rainfall_STEM_02.wav'
]
expected_weights = [
0.88655832334783
]
self.assertEqual(expected_basenames, actual_basenames)
self.assertEqual(expected_weights, actual_weights)
clean_output()
class TestBuildMixArgs(unittest.TestCase):
def setUp(self):
self.mtrack = multitrack.MultiTrack('LizNelson_Rainfall')
def test_defaults(self):
actual_filepaths, actual_weights = mix._build_mix_args(
self.mtrack, None, None, None, None
)
expected_filepaths = [
os.path.join(AUDIO_PATH, 'LizNelson_Rainfall',
'LizNelson_Rainfall_STEMS',
'LizNelson_Rainfall_STEM_{}.wav'.format(i))
for i in ['01', '02', '03', '04', '05']
]
expected_weights = [
0.9138225670999782,
0.88655832334783,
0.7820245646673145,
0.9709353677932278,
0.7734022629465723
]
self.assertEqual(expected_filepaths, actual_filepaths)
self.assertEqual(expected_weights, actual_weights)
def test_defaults_no_mixing_coeffs(self):
mtrack = multitrack.MultiTrack('AHa_TakeOnMe')
for k in mtrack.stems.keys():
mtrack.stems[k].mixing_coefficient = None
actual_filepaths, actual_weights = mix._build_mix_args(
mtrack, None, None, None, None
)
expected_filepaths = [
os.path.join(AUDIO_PATH, 'AHa_TakeOnMe',
'AHa_TakeOnMe_STEMS',
'AHa_TakeOnMe_STEM_{}.wav'.format(i))
for i in ['01', '02', '03', '04', '05', '06']
]
expected_weights = [1, 1, 1, 1, 1, 1]
self.assertEqual(expected_filepaths, actual_filepaths)
self.assertEqual(expected_weights, actual_weights)
def test_less_stems(self):
actual_filepaths, actual_weights = mix._build_mix_args(
self.mtrack, [2, 4], None, None, None
)
expected_filepaths = [
os.path.join(AUDIO_PATH, 'LizNelson_Rainfall',
'LizNelson_Rainfall_STEMS',
'LizNelson_Rainfall_STEM_{}.wav'.format(i))
for i in ['02', '04']
]
expected_weights = [
0.88655832334783, 0.9709353677932278
]
self.assertEqual(expected_filepaths, actual_filepaths)
self.assertEqual(expected_weights, actual_weights)
def test_alt_weights(self):
actual_filepaths, actual_weights = mix._build_mix_args(
self.mtrack, None, {2: 2.0, 4: 0.5}, None, None
)
expected_filepaths = [
os.path.join(AUDIO_PATH, 'LizNelson_Rainfall',
'LizNelson_Rainfall_STEMS',
'LizNelson_Rainfall_STEM_{}.wav'.format(i))
for i in ['01', '02', '03', '04', '05']
]
expected_weights = [
0.9138225670999782,
2.0,
0.7820245646673145,
0.5,
0.7734022629465723
]
self.assertEqual(expected_filepaths, actual_filepaths)
self.assertEqual(expected_weights, actual_weights)
def test_alt_files(self):
actual_filepaths, actual_weights = mix._build_mix_args(
self.mtrack, None, None, {1: self.mtrack.mix_path}, None
)
expected_filepaths = [
os.path.join(
AUDIO_PATH, 'LizNelson_Rainfall', 'LizNelson_Rainfall_MIX.wav'
)
]
expected_filepaths.extend([
os.path.join(AUDIO_PATH, 'LizNelson_Rainfall',
'LizNelson_Rainfall_STEMS',
'LizNelson_Rainfall_STEM_{}.wav'.format(i))
for i in ['02', '03', '04', '05']
])
expected_weights = [
0.9138225670999782,
0.88655832334783,
0.7820245646673145,
0.9709353677932278,
0.7734022629465723
]
self.assertEqual(expected_filepaths, actual_filepaths)
self.assertEqual(expected_weights, actual_weights)
def test_additional_files(self):
actual_filepaths, actual_weights = mix._build_mix_args(
self.mtrack, None, None, None, [(self.mtrack.mix_path, 2.1)]
)
expected_filepaths = [
os.path.join(AUDIO_PATH, 'LizNelson_Rainfall',
'LizNelson_Rainfall_STEMS',
'LizNelson_Rainfall_STEM_{}.wav'.format(i))
for i in ['01', '02', '03', '04', '05']
]
expected_filepaths.append(self.mtrack.mix_path)
expected_weights = [
0.9138225670999782,
0.88655832334783,
0.7820245646673145,
0.9709353677932278,
0.7734022629465723,
2.1
]
self.assertEqual(expected_filepaths, actual_filepaths)
self.assertEqual(expected_weights, actual_weights)
def test_one_stem_mix(self):
actual_filepaths, actual_weights = mix._build_mix_args(
self.mtrack, [2], None, None, None
)
expected_filepaths = [
os.path.join(AUDIO_PATH, 'LizNelson_Rainfall',
'LizNelson_Rainfall_STEMS',
'LizNelson_Rainfall_STEM_{}.wav'.format(i))
for i in ['02']
]
expected_weights = [
0.88655832334783
]
self.assertEqual(expected_filepaths, actual_filepaths)
self.assertEqual(expected_weights, actual_weights)
class TestMixMelodyStems(unittest.TestCase):
def setUp(self):
self.mtrack = multitrack.MultiTrack('Phoenix_ScotchMorris')
def test_defaults(self):
clean_output()
actual_melody, actual_stem = mix.mix_melody_stems(
self.mtrack, OUTPUT_PATH
)
expected_melody = [2, 3]
expected_stem = [2, 3]
self.assertEqual(expected_melody, actual_melody)
self.assertEqual(expected_stem, actual_stem)
self.assertTrue(os.path.exists(OUTPUT_PATH))
clean_output()
def test_max_melody_stems1(self):
clean_output()
actual_melody, actual_stem = mix.mix_melody_stems(
self.mtrack, OUTPUT_PATH, max_melody_stems=1
)
expected_melody = [2]
expected_stem = [2]
self.assertEqual(expected_melody, actual_melody)
self.assertEqual(expected_stem, actual_stem)
self.assertTrue(os.path.exists(OUTPUT_PATH))
clean_output()
def test_max_melody_stems2(self):
clean_output()
actual_melody, actual_stem = mix.mix_melody_stems(
self.mtrack, OUTPUT_PATH, max_melody_stems=3
)
expected_melody = [2, 3]
expected_stem = [2, 3]
self.assertEqual(expected_melody, actual_melody)
self.assertEqual(expected_stem, actual_stem)
self.assertTrue(os.path.exists(OUTPUT_PATH))
clean_output()
def test_include_percussion(self):
clean_output()
actual_melody, actual_stem = mix.mix_melody_stems(
self.mtrack, OUTPUT_PATH, include_percussion=True
)
expected_melody = [2, 3]
expected_stem = [2, 3]
self.assertEqual(expected_melody, actual_melody)
self.assertEqual(expected_stem, actual_stem)
self.assertTrue(os.path.exists(OUTPUT_PATH))
clean_output()
def test_require_mono(self):
clean_output()
actual_melody, actual_stem = mix.mix_melody_stems(
self.mtrack, OUTPUT_PATH, require_mono=True
)
expected_melody = [2, 3]
expected_stem = [2, 3]
self.assertEqual(expected_melody, actual_melody)
self.assertEqual(expected_stem, actual_stem)
self.assertTrue(os.path.exists(OUTPUT_PATH))
clean_output()
class TestMixMonoStems(unittest.TestCase):
def setUp(self):
self.mtrack = multitrack.MultiTrack('Phoenix_ScotchMorris')
def test_defaults(self):
clean_output()
actual_mono, actual_stem = mix.mix_mono_stems(
self.mtrack, OUTPUT_PATH
)
expected_melody = [2, 3]
expected_stem = [2, 3]
self.assertEqual(expected_melody, actual_mono)
self.assertEqual(expected_stem, actual_stem)
self.assertTrue(os.path.exists(OUTPUT_PATH))
clean_output()
def test_include_percussion(self):
clean_output()
actual_mono, actual_stem = mix.mix_mono_stems(
self.mtrack, OUTPUT_PATH, include_percussion=True
)
expected_melody = [2, 3]
expected_stem = [2, 3]
self.assertEqual(expected_melody, actual_mono)
self.assertEqual(expected_stem, actual_stem)
self.assertTrue(os.path.exists(OUTPUT_PATH))
clean_output()
class TestMixNoVocals(unittest.TestCase):
def setUp(self):
self.mtrack = multitrack.MultiTrack('LizNelson_Rainfall')
def test_defaults(self):
clean_output()
actual_stem = mix.mix_no_vocals(
self.mtrack, OUTPUT_PATH
)
expected_stem = [4, 5]
self.assertEqual(expected_stem, actual_stem)
self.assertTrue(os.path.exists(OUTPUT_PATH))
clean_output()
class TestRemixVocals(unittest.TestCase):
def setUp(self):
self.mtrack = multitrack.MultiTrack('LizNelson_Rainfall')
def test_defaults(self):
clean_output()
alt_weights = mix.remix_vocals(
self.mtrack, OUTPUT_PATH, 2.0
)
expected_weights = {
1: 1.8276451341999564,
2: 1.77311664669566,
3: 1.564049129334629
}
self.assertEqual(expected_weights, alt_weights)
self.assertTrue(os.path.exists(OUTPUT_PATH))
clean_output()
| 33.734375 | 78 | 0.611924 | 1,580 | 15,113 | 5.546835 | 0.06519 | 0.093108 | 0.110224 | 0.046554 | 0.915107 | 0.891716 | 0.883158 | 0.876312 | 0.86901 | 0.852921 | 0 | 0.086674 | 0.296897 | 15,113 | 447 | 79 | 33.809843 | 0.738095 | 0.001456 | 0 | 0.699229 | 0 | 0 | 0.095074 | 0.072002 | 0 | 0 | 0 | 0 | 0.14653 | 1 | 0.07455 | false | 0 | 0.012853 | 0 | 0.102828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
89df62f40f2beb99e102cf80765d5905d2fcd30f | 129 | py | Python | cookiecutters/cookiecutter-cpp-lib/{{cookiecutter.project_name}}/python/tests/test_{{cookiecutter.project_pymodule}}.py | sbrisard/sbeculoos | 5b63005fccd49dcd6026afcecbd66cce0d768351 | [
"BSD-3-Clause"
] | null | null | null | cookiecutters/cookiecutter-cpp-lib/{{cookiecutter.project_name}}/python/tests/test_{{cookiecutter.project_pymodule}}.py | sbrisard/sbeculoos | 5b63005fccd49dcd6026afcecbd66cce0d768351 | [
"BSD-3-Clause"
] | 12 | 2020-09-28T07:05:32.000Z | 2020-11-16T09:12:22.000Z | cookiecutters/cookiecutter-cpp-lib/{{cookiecutter.project_name}}/python/tests/test_{{cookiecutter.project_pymodule}}.py | sbrisard/sbeculoos | 5b63005fccd49dcd6026afcecbd66cce0d768351 | [
"BSD-3-Clause"
] | null | null | null | import {{cookiecutter.project_pymodule}}
def test_return_one():
assert {{cookiecutter.project_pymodule}}.return_one() == 1
| 21.5 | 62 | 0.751938 | 15 | 129 | 6.133333 | 0.666667 | 0.413043 | 0.586957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008696 | 0.108527 | 129 | 5 | 63 | 25.8 | 0.791304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 0 | null | null | 0 | 0.333333 | null | null | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
d63ed644e43993df803fb400c97c6062e332679c | 598 | py | Python | cache.py | uuefi/speech-to-text-benchmark | 214f0dedad888730944676be2f2876e8c48efed5 | [
"Apache-2.0"
] | null | null | null | cache.py | uuefi/speech-to-text-benchmark | 214f0dedad888730944676be2f2876e8c48efed5 | [
"Apache-2.0"
] | null | null | null | cache.py | uuefi/speech-to-text-benchmark | 214f0dedad888730944676be2f2876e8c48efed5 | [
"Apache-2.0"
] | null | null | null | # todo: write and read processed files from local cache
import os
from utils import transcript_name, transcript_json_name
def get_cache(path): # args
# cache_path = os.path.join(CACHE_PATH, args., path.split(os.path.sep)[-1].replace('.wav', '.aws'))
# if os.path.exists(cache_path):
# with open(cache_path) as f:
# return f.read()
# with open(path, 'rb') as f:
# content = f.read()
return None, None
def write_cache():
pass
# with open(cache_path, 'w') as f:
# f.write(res)
# with open(cache_path, 'w') as f:
# f.write(res)
| 23.92 | 103 | 0.613712 | 92 | 598 | 3.869565 | 0.423913 | 0.176966 | 0.109551 | 0.143258 | 0.168539 | 0.168539 | 0.168539 | 0.168539 | 0.168539 | 0.168539 | 0 | 0.002208 | 0.242475 | 598 | 24 | 104 | 24.916667 | 0.783664 | 0.650502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041667 | 0 | 1 | 0.333333 | false | 0.166667 | 0.333333 | 0.166667 | 0.833333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 6 |
c390f31c0c0145c6631caaf75b09ad7f545675fe | 199 | py | Python | en/026/python/main.py | franciscogomes2020/exercises | 8b33c4b9349a9331e4002a8225adc2a482c70024 | [
"MIT"
] | null | null | null | en/026/python/main.py | franciscogomes2020/exercises | 8b33c4b9349a9331e4002a8225adc2a482c70024 | [
"MIT"
] | null | null | null | en/026/python/main.py | franciscogomes2020/exercises | 8b33c4b9349a9331e4002a8225adc2a482c70024 | [
"MIT"
] | null | null | null | # Make a program that reads a sentence from the keyboard and shows how many times the letter "A" appears, in which position it appears the first time, and in which position it appears the last time.
| 99.5 | 198 | 0.78392 | 36 | 199 | 4.333333 | 0.638889 | 0.089744 | 0.192308 | 0.217949 | 0.346154 | 0.346154 | 0 | 0 | 0 | 0 | 0 | 0 | 0.18593 | 199 | 1 | 199 | 199 | 0.962963 | 0.984925 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c3a67f28d7d4c17733554946c9d523c4232be36c | 388 | py | Python | Codewars/8kyu/century-from-year/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codewars/8kyu/century-from-year/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codewars/8kyu/century-from-year/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python - 3.6.0
test.assert_equals(century(1705), 18, 'Testing for year 1705')
test.assert_equals(century(1900), 19, 'Testing for year 1900')
test.assert_equals(century(1601), 17, 'Testing for year 1601')
test.assert_equals(century(2000), 20, 'Testing for year 2000')
test.assert_equals(century(356), 4, 'Testing for year 356')
test.assert_equals(century(89), 1, 'Testing for year 89')
| 43.111111 | 62 | 0.744845 | 64 | 388 | 4.421875 | 0.359375 | 0.212014 | 0.339223 | 0.487633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.158501 | 0.10567 | 388 | 8 | 63 | 48.5 | 0.657061 | 0.036082 | 0 | 0 | 0 | 0 | 0.330645 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7f123f35f3e06456f4ffd151e4463fa13d29ace0 | 7,862 | py | Python | django-rgd/tests/test_checksumfile.py | ResonantGeoData/ResonantGeoData | 72b3d4085cc5700d0ad5556f31b7eb96ed2d3b8a | [
"Apache-2.0"
] | 40 | 2020-05-07T17:15:26.000Z | 2022-02-27T14:45:04.000Z | django-rgd/tests/test_checksumfile.py | ResonantGeoData/ResonantGeoData | 72b3d4085cc5700d0ad5556f31b7eb96ed2d3b8a | [
"Apache-2.0"
] | 408 | 2020-05-07T15:10:35.000Z | 2022-03-30T03:08:47.000Z | django-rgd/tests/test_checksumfile.py | ResonantGeoData/ResonantGeoData | 72b3d4085cc5700d0ad5556f31b7eb96ed2d3b8a | [
"Apache-2.0"
] | 3 | 2021-04-12T20:16:22.000Z | 2021-06-22T14:03:46.000Z | import os
from django.db import IntegrityError
import pytest
from rgd.datastore import datastore, registry
from rgd.models import common, utils
from rgd.models.collection import Collection
FILENAME = 'stars.png'
@pytest.fixture
def file_path():
return datastore.fetch(FILENAME)
@pytest.fixture
def s3_url():
return 's3://sentinel-cogs/sentinel-s2-l2a-cogs/2020/S2A_31QHU_20200714_0_L2A/S2A_31QHU_20200714_0_L2A.json'
@pytest.mark.django_db(transaction=True)
def test_create_local_file(file_path):
model = common.ChecksumFile()
model.type = common.FileSourceType.FILE_FIELD
with open(file_path, 'rb') as f:
model.file.save(FILENAME, f)
model.save()
model.post_save_job()
model.refresh_from_db()
assert model.checksum == registry[FILENAME].split(':')[1]
assert model.name == FILENAME
@pytest.mark.django_db(transaction=True)
def test_create_url():
model = common.ChecksumFile()
model.type = common.FileSourceType.URL
model.url = datastore.get_url(FILENAME)
model.save()
model.post_save_job()
model.refresh_from_db()
assert model.checksum == registry[FILENAME].split(':')[1]
assert model.name
@pytest.mark.django_db(transaction=True)
def test_constraint_mismatch(file_path):
with pytest.raises(IntegrityError):
model = common.ChecksumFile()
model.type = common.FileSourceType.FILE_FIELD
model.url = datastore.get_url(FILENAME)
model.save()
with pytest.raises(IntegrityError):
model = common.ChecksumFile()
model.type = common.FileSourceType.URL
with open(file_path, 'rb') as f:
model.file.save(FILENAME, f)
@pytest.mark.django_db(transaction=True)
def test_constraint_url_null():
with pytest.raises(IntegrityError):
model = common.ChecksumFile()
model.type = common.FileSourceType.URL
model.save()
@pytest.mark.django_db(transaction=True)
def test_constraint_url_empty():
with pytest.raises(IntegrityError):
model = common.ChecksumFile()
model.type = common.FileSourceType.URL
model.url = '' # empty string
model.save()
@pytest.mark.django_db(transaction=True)
def test_constraint_file_with_empty_url(file_path):
# Make sure the constraint passes when an empty string URL is given with
# the FileField choice. This happens when adding files in the admin interface
model = common.ChecksumFile()
model.type = common.FileSourceType.FILE_FIELD
model.url = ''
with open(file_path, 'rb') as f:
model.file.save(FILENAME, f)
assert not model.url
assert model.file.name
@pytest.mark.django_db(transaction=True)
def test_yield_local_path_file(file_path):
model = common.ChecksumFile()
model.type = common.FileSourceType.FILE_FIELD
with open(file_path, 'rb') as f:
model.file.save(FILENAME, f)
model.save()
path = model.yield_local_path()
with model.yield_local_path() as path:
assert os.path.exists(path)
# Make sure it is cleaned up after context ends
assert not os.path.exists(path)
# Now test that is gets cleaned up during an exception
with pytest.raises(ValueError):
with model.yield_local_path() as path:
raise ValueError()
assert not os.path.exists(path)
@pytest.mark.django_db(transaction=True)
def test_yield_local_path_url_http():
model = common.ChecksumFile()
model.type = common.FileSourceType.URL
model.url = datastore.get_url(FILENAME)
model.save()
with model.yield_local_path() as path:
assert os.path.exists(path)
# Make sure it is cleaned up after context ends
assert not os.path.exists(path)
# Now test that is gets cleaned up during an exception
with pytest.raises(ValueError):
with model.yield_local_path() as path:
raise ValueError()
assert not os.path.exists(path)
@pytest.mark.django_db(transaction=True)
def test_yield_local_path_url_s3(s3_url):
model = common.ChecksumFile()
model.type = common.FileSourceType.URL
model.url = s3_url
model.save()
with model.yield_local_path() as path:
assert os.path.exists(path)
# Make sure it is cleaned up after context ends
assert not os.path.exists(path)
# Now test that is gets cleaned up during an exception
with pytest.raises(ValueError):
with model.yield_local_path() as path:
raise ValueError()
assert not os.path.exists(path)
@pytest.mark.django_db(transaction=True)
def test_get_or_create_file(file_path):
with open(file_path, 'rb') as f:
file, created = utils.get_or_create_checksumfile(file=f)
assert created
with open(file_path, 'rb') as f:
file, created = utils.get_or_create_checksumfile(file=f)
assert not created
@pytest.mark.django_db(transaction=True)
def test_get_or_create_file_permissions(file_path):
collection = Collection.objects.create(name='Foo')
with open(file_path, 'rb') as f:
file, created = utils.get_or_create_checksumfile(collection=collection, file=f)
assert created
assert file.collection == collection
with open(file_path, 'rb') as f:
file, created = utils.get_or_create_checksumfile(collection=collection, file=f)
assert not created
with open(file_path, 'rb') as f:
file, created = utils.get_or_create_checksumfile(file=f)
# Because this passed collection is None, make sure a new file is created
assert created
assert file.collection is None
@pytest.mark.django_db(transaction=True)
def test_get_or_create_url():
url = datastore.get_url(FILENAME)
file, created = utils.get_or_create_checksumfile(url=url)
assert created
file, created = utils.get_or_create_checksumfile(url=url)
assert not created
@pytest.mark.django_db(transaction=True)
def test_get_or_create_url_permissions():
url = datastore.get_url(FILENAME)
collection = Collection.objects.create(name='Foo')
file, created = utils.get_or_create_checksumfile(collection=collection, url=url)
assert created
assert file.collection == collection
file, created = utils.get_or_create_checksumfile(collection=collection, url=url)
assert not created
# Because this passed collection is None, make sure a new file is created
file, created = utils.get_or_create_checksumfile(url=url)
assert created
assert file.collection is None
@pytest.mark.django_db(transaction=True)
def test_get_or_create_url_checksum():
url = datastore.get_url(FILENAME)
file, created = utils.get_or_create_checksumfile(url=url)
assert created
file, created = utils.get_or_create_checksumfile(url=url, precompute_url_checksum=True)
assert not created
@pytest.mark.django_db(transaction=True)
def test_yield_checksumfiles(s3_url):
# Two URL files
url = datastore.get_url('afie_1.jpg')
file_1, _ = utils.get_or_create_checksumfile(url=url, name='afie_1.jpg')
url = datastore.get_url('afie_2.jpg')
file_2, _ = utils.get_or_create_checksumfile(url=url, name='the/best/dog/afie_2.jpeg')
# One S3 URL file
file_3, _ = utils.get_or_create_checksumfile(url=s3_url, name='s3/file/stuff.json')
# One FileField file
with open(datastore.fetch('afie_3.jpg'), 'rb') as f:
file_4, _ = utils.get_or_create_checksumfile(file=f, name='afie_3.jpg')
# Checkout all of these files under a single temporary directory
# Note that 2 files are at top level and one file is nested
files = common.ChecksumFile.objects.all()
assert files.count() == 4
with utils.yield_checksumfiles(files) as directory:
assert os.path.exists(directory)
for f in files.all():
assert os.path.exists(os.path.join(directory, f.name))
# Make sure the directory is cleaned up
assert not os.path.exists(directory)
| 34.787611 | 112 | 0.718774 | 1,110 | 7,862 | 4.910811 | 0.127928 | 0.019263 | 0.042378 | 0.046964 | 0.793799 | 0.767382 | 0.738397 | 0.727389 | 0.713447 | 0.679875 | 0 | 0.008567 | 0.183414 | 7,862 | 225 | 113 | 34.942222 | 0.840498 | 0.103027 | 0 | 0.728324 | 0 | 0.00578 | 0.032414 | 0.017486 | 0 | 0 | 0 | 0 | 0.202312 | 1 | 0.098266 | false | 0 | 0.034682 | 0.011561 | 0.144509 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7f2360c2dea9c1b8f8f3e8d88dd132736a85de9e | 2,420 | py | Python | tests/test_include_exclude.py | charlesdong1991/nbQA | 48ae8fce4de4140a4dc9cca6214b035df887102e | [
"MIT"
] | null | null | null | tests/test_include_exclude.py | charlesdong1991/nbQA | 48ae8fce4de4140a4dc9cca6214b035df887102e | [
"MIT"
] | 2 | 2020-10-17T01:39:37.000Z | 2020-10-17T09:08:30.000Z | tests/test_include_exclude.py | girip11/nbQA | ab1185ffbc4d136d900ef8fd11f9fcf44239f608 | [
"MIT"
] | null | null | null | """Check include-exclude work."""
import re
from pathlib import Path
from textwrap import dedent
from typing import TYPE_CHECKING
import pytest
from nbqa.__main__ import main
if TYPE_CHECKING:
from _pytest.capture import CaptureFixture
def test_cli_files(capsys: "CaptureFixture"):
"""
Test --nbqa-files is picked up correctly.
Parameters
----------
capsys
Pytest fixture to capture stdout and stderr.
"""
with pytest.raises(SystemExit):
main(["flake8", "tests", "--nbqa-files", "^tests/data/notebook_for"])
out, _ = capsys.readouterr()
assert out and all(
re.search(r"^tests.data.notebook_for", i) for i in out.splitlines()
)
def test_cli_exclude(capsys: "CaptureFixture"):
"""
Test --nbqa-exclude is picked up correctly.
Parameters
----------
capsys
Pytest fixture to capture stdout and stderr.
"""
with pytest.raises(SystemExit):
main(["flake8", "tests", "--nbqa-exclude", "^tests/data/notebook_for"])
out, _ = capsys.readouterr()
assert out and all(
re.search(r"^tests.data.notebook_for", i) is None for i in out.splitlines()
)
def test_config_files(capsys: "CaptureFixture"):
"""
Test [nbqa.files] config is picked up correctly.
Parameters
----------
capsys
Pytest fixture to capture stdout and stderr.
"""
Path("setup.cfg").write_text(
dedent(
"""\
[nbqa.files]
flake8 = ^tests/data/notebook_for
"""
)
)
with pytest.raises(SystemExit):
main(["flake8", "tests"])
Path("setup.cfg").unlink()
out, _ = capsys.readouterr()
assert out and all(
re.search(r"^tests.data.notebook_for", i) for i in out.splitlines()
)
def test_config_exclude(capsys: "CaptureFixture"):
"""
Test [nbqa.exclude] config is picked up correctly.
Parameters
----------
capsys
Pytest fixture to capture stdout and stderr.
"""
Path("setup.cfg").write_text(
dedent(
"""\
[nbqa.exclude]
flake8 = ^tests/data/notebook_for
"""
)
)
with pytest.raises(SystemExit):
main(["flake8", "tests"])
Path("setup.cfg").unlink()
out, _ = capsys.readouterr()
assert out and all(
re.search(r"^tests.data.notebook_for", i) is None for i in out.splitlines()
)
| 22.830189 | 83 | 0.600826 | 282 | 2,420 | 5.053191 | 0.20922 | 0.050526 | 0.095439 | 0.112281 | 0.841404 | 0.841404 | 0.729123 | 0.729123 | 0.715789 | 0.715789 | 0 | 0.003354 | 0.260744 | 2,420 | 105 | 84 | 23.047619 | 0.793181 | 0.218595 | 0 | 0.521739 | 0 | 0 | 0.191131 | 0.089944 | 0 | 0 | 0 | 0 | 0.086957 | 1 | 0.086957 | false | 0 | 0.152174 | 0 | 0.23913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
618393a55cb4057d656249f35b982fbe21f869c9 | 26 | py | Python | seq_tag/layers/__init__.py | Jacen789/sequence-tagging | 4c08cad1e3c083a21424f2b75c2884cc1d973884 | [
"MIT"
] | 2 | 2020-04-06T14:23:28.000Z | 2020-12-28T11:43:30.000Z | seq_tag/layers/__init__.py | Jacen789/sequence-tagging | 4c08cad1e3c083a21424f2b75c2884cc1d973884 | [
"MIT"
] | 2 | 2020-09-14T12:53:28.000Z | 2021-06-11T10:18:57.000Z | seq_tag/layers/__init__.py | Jacen789/sequence-tagging | 4c08cad1e3c083a21424f2b75c2884cc1d973884 | [
"MIT"
] | 1 | 2020-12-16T01:54:35.000Z | 2020-12-16T01:54:35.000Z | from .torchcrf import CRF
| 13 | 25 | 0.807692 | 4 | 26 | 5.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 26 | 1 | 26 | 26 | 0.954545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
61b18c455ef2f5d050460d77241eb7b1ed79e329 | 128 | py | Python | main/cron.py | Harsh-Sanklecha/stockanalysis | f5c6a454863ff72e0bac5e29035033cbf1bb3a06 | [
"MIT"
] | null | null | null | main/cron.py | Harsh-Sanklecha/stockanalysis | f5c6a454863ff72e0bac5e29035033cbf1bb3a06 | [
"MIT"
] | null | null | null | main/cron.py | Harsh-Sanklecha/stockanalysis | f5c6a454863ff72e0bac5e29035033cbf1bb3a06 | [
"MIT"
] | 2 | 2021-01-08T12:32:59.000Z | 2021-02-14T11:41:51.000Z | from .models import endOfDay
from .views import testModule
def my_scheduled_job():
endOfDay.objects.create(symbol="TEST5")
| 21.333333 | 43 | 0.78125 | 17 | 128 | 5.764706 | 0.823529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008929 | 0.125 | 128 | 5 | 44 | 25.6 | 0.866071 | 0 | 0 | 0 | 0 | 0 | 0.039063 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | true | 0 | 0.5 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
4ee4ae3a0067de0f540a279c154f0fdd360b1664 | 192 | py | Python | src/sjournal/__init__.py | SamuelStuver/sjournal | 07637e6a2a8616f529aa2c572dfee23dfefd1fac | [
"MIT"
] | null | null | null | src/sjournal/__init__.py | SamuelStuver/sjournal | 07637e6a2a8616f529aa2c572dfee23dfefd1fac | [
"MIT"
] | 24 | 2022-03-11T02:04:58.000Z | 2022-03-23T04:17:03.000Z | src/sjournal/__init__.py | SamuelStuver/sjournal | 07637e6a2a8616f529aa2c572dfee23dfefd1fac | [
"MIT"
] | null | null | null | from .utilities.version import __version__
from .sjournal import main, SJournal, Note
from .utilities.arguments import parse_args
from .utilities.utilities import get_newest_file, range_parser | 48 | 62 | 0.854167 | 26 | 192 | 6 | 0.576923 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09375 | 192 | 4 | 62 | 48 | 0.896552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
4ef39b08bf348ee5b13dfbd9891f8eb4abdeeb23 | 5,996 | py | Python | back/prescricoes/views.py | ldurans/app-prontuario-tasy | 51098806e289326d7afdd9f4908b1aab75f6d308 | [
"Apache-2.0"
] | null | null | null | back/prescricoes/views.py | ldurans/app-prontuario-tasy | 51098806e289326d7afdd9f4908b1aab75f6d308 | [
"Apache-2.0"
] | null | null | null | back/prescricoes/views.py | ldurans/app-prontuario-tasy | 51098806e289326d7afdd9f4908b1aab75f6d308 | [
"Apache-2.0"
] | null | null | null | from django.db import connections
from rest_framework import viewsets, status
from rest_framework.generics import ListAPIView, UpdateAPIView, RetrieveAPIView
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from django.contrib.auth.models import UserManager
from django.core.cache import cache
import json
from django.core.cache import cache
class ListarPrescricoes(ListAPIView):
queryset = UserManager
permission_classes = [IsAuthenticated]
def dictfetchall(self, cursor):
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def list(self, request, *args, **kwargs):
user_id = request.user.id
nr_atendimento = request.query_params.get('nr_atendimento', None)
cd_pessoa_fisica = request.query_params.get('cd_pessoa_fisica', None)
str_cache = f'laudos_paciente_{cd_pessoa_fisica}'
with connections['udi'].cursor() as cursor:
prescricoes = f'''
SELECT a.NR_PRESCRICAO,
a.CD_PESSOA_FISICA,
a.NR_ATENDIMENTO,
a.DT_PRESCRICAO,
a.DT_LIBERACAO,
a.DT_VALIDADE_PRESCR,
a.NR_HORAS_VALIDADE,
a.DS_JUSTIFICATIVA,
a.DT_SUSPENSAO,
nvl(a.dt_liberacao,a.dt_liberacao_medico) dt_liberacao_prescr,
substr(obter_nome_medico(a.cd_medico,'N'),1,150) nm_medico,
--substr(obter_desc_protocolo(cd_protocolo),1,255)desc_protocolo,
--substr(obter_desc_protocolo_medic(nr_seq_protocolo,cd_protocolo),1,50)ds_medic_protocolo,
substr(obter_valor_dominio(9,a.ie_origem_inf),1,100) ds_origem_inf,
to_number(obter_cirurgia_prescricao(a.nr_prescricao)) nr_cirurgia_grid,
substr(obter_funcao_usuario_orig(a.nm_usuario_original),1,240) ds_funcao_prescritor,
substr(obter_itens_prescr(a.nr_prescricao, a.ds_itens_prescr),1,255) ds_item,
substr(a.ds_observacao,1,255) ds_observacao_grid,
substr(obter_valor_dominio(136,a.ie_motivo_prescricao),1,60) ds_motivo_prescr,
r.DS_RESUMO
FROM prescr_medica a,
prescr_medica_resumo r
WHERE a.NR_PRESCRICAO = r.NR_PRESCRICAO(+)
AND a.DT_LIBERACAO <= nvl(r.DT_ATUALIZACAO, SYSDATE)
AND a.nr_atendimento = '{nr_atendimento}'
order by a.nr_prescricao
'''
cursor.execute(prescricoes)
rows = self.dictfetchall(cursor)
for row in rows:
if not bool(row['DS_RESUMO']):
# executar a geração do resumo da prescrição
html = cursor.callproc('Executar_REP_Gerar_Resumo_PCK', [
row['NR_PRESCRICAO'],
'RTF',
'APPUDI',
])
cursor.execute(prescricoes)
rows = self.dictfetchall(cursor)
return Response(rows, status.HTTP_200_OK)
class BuscarPrescricao(RetrieveAPIView):
queryset = UserManager
permission_classes = [IsAuthenticated]
def dictfetchall(self, cursor):
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def get(self, request, *args, **kwargs):
user_id = request.user.id
nr_atendimento = request.query_params.get('nr_atendimento', None)
nr_prescricao = request.query_params.get('nr_prescricao', None)
with connections['udi'].cursor() as cursor:
# executar a geração do resumo da prescrição
html = cursor.callproc('Executar_REP_Gerar_Resumo_PCK', [
nr_prescricao,
'html',
'APPUDI',
])
# realizar a consulta da prescrição
prescricao = f'''
SELECT a.NR_PRESCRICAO,
a.CD_PESSOA_FISICA,
a.NR_ATENDIMENTO,
a.DT_PRESCRICAO,
a.DT_LIBERACAO,
a.DT_VALIDADE_PRESCR,
a.NR_HORAS_VALIDADE,
a.DS_JUSTIFICATIVA,
a.DT_SUSPENSAO,
nvl(a.dt_liberacao,a.dt_liberacao_medico) dt_liberacao_prescr,
substr(obter_nome_medico(a.cd_medico,'N'),1,150) nm_medico,
--substr(obter_desc_protocolo(cd_protocolo),1,255)desc_protocolo,
--substr(obter_desc_protocolo_medic(nr_seq_protocolo,cd_protocolo),1,50)ds_medic_protocolo,
substr(obter_valor_dominio(9,a.ie_origem_inf),1,100) ds_origem_inf,
to_number(obter_cirurgia_prescricao(a.nr_prescricao)) nr_cirurgia_grid,
substr(obter_funcao_usuario_orig(a.nm_usuario_original),1,240) ds_funcao_prescritor,
substr(obter_itens_prescr(a.nr_prescricao, a.ds_itens_prescr),1,255) ds_item,
substr(a.ds_observacao,1,255) ds_observacao_grid,
substr(obter_valor_dominio(136,a.ie_motivo_prescricao),1,60) ds_motivo_prescr,
r.DS_RESUMO
FROM prescr_medica a,
prescr_medica_resumo r
WHERE a.NR_PRESCRICAO = r.NR_PRESCRICAO(+)
AND a.DT_LIBERACAO <= nvl(r.DT_ATUALIZACAO, SYSDATE)
and a.nr_prescricao = '{nr_prescricao}'
AND a.nr_atendimento = '{nr_atendimento}'
order by a.nr_prescricao
'''
cursor.execute(prescricao)
prescricao = self.dictfetchall(cursor)
return Response(prescricao, status.HTTP_200_OK)
| 45.082707 | 111 | 0.59473 | 680 | 5,996 | 4.95 | 0.216176 | 0.064171 | 0.042484 | 0.024955 | 0.794415 | 0.772727 | 0.735888 | 0.710042 | 0.710042 | 0.710042 | 0 | 0.018738 | 0.323549 | 5,996 | 132 | 112 | 45.424242 | 0.811144 | 0.019847 | 0 | 0.747826 | 0 | 0 | 0.594245 | 0.230547 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034783 | false | 0 | 0.078261 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f6324cb1965255cbd817e151f320dc5811457b04 | 5,735 | py | Python | modules/audits/linux/hosts_file_checks/lib_hosts_file_checks.py | Ck1998/SecurusAudire | fb6288a72d470f11f6114e0ea6ffb6fa63da80e6 | [
"MIT"
] | null | null | null | modules/audits/linux/hosts_file_checks/lib_hosts_file_checks.py | Ck1998/SecurusAudire | fb6288a72d470f11f6114e0ea6ffb6fa63da80e6 | [
"MIT"
] | null | null | null | modules/audits/linux/hosts_file_checks/lib_hosts_file_checks.py | Ck1998/SecurusAudire | fb6288a72d470f11f6114e0ea6ffb6fa63da80e6 | [
"MIT"
] | null | null | null | from modules.audits.base_model import BaseTest
import config as CONFIG
class HostsFileChecks(BaseTest):
def __init__(self):
super().__init__()
self.test_results = {}
self.hosts_file_location = f"{CONFIG.ROOT_DIR}etc/hosts"
self.hosts_allow_file_location = f"{CONFIG.ROOT_DIR}etc/hosts.allow"
self.hosts_file_checks_db = "db/linux_db/check_hosts_file_db.txt"
self.db_regex = r"@(.+)(?:\s)!(.+)(?:\s)!(?:\s)?(.*)(?:\s)?"
def check_hosts_file(self):
try:
with open(self.hosts_file_location, 'r') as read_obj:
hosts_file_data = read_obj.read()
with open(self.hosts_file_checks_db, 'r') as db_read_obj:
for checks in db_read_obj.readlines():
if "#" in checks[0] or len(checks) < 2:
continue
test_parameters = self.util_obj.run_regex_search(self.db_regex, checks)
file_name = test_parameters.group(1)
remote_host_name_to_check = test_parameters.group(2)
negative_message = test_parameters.group(3)
if file_name.lower() != "/etc/hosts":
continue
try:
isinstance(self.test_results[file_name], dict)
except KeyError:
self.test_results[file_name] = {}
regex_to_search = rf"(.*{remote_host_name_to_check})"
if self.util_obj.run_regex_search(regex=regex_to_search, data=hosts_file_data):
remote_host_name = remote_host_name_to_check.replace(r"\.", ".")
self.test_results[file_name][remote_host_name] = {
"Result": f"{negative_message} - {remote_host_name}"
}
CONFIG.TOTAL_SCORE_POSSIBLE += 1
CONFIG.WARNING_DICT[
f"Irregularities found in file - {CONFIG.ROOT_DIR}{file_name} - {remote_host_name}"] = {
"Warning": negative_message,
"Support Link": "http://manpages.ubuntu.com/manpages/trusty/man5/hosts.5.html"
}
else:
CONFIG.TOTAL_SCORE_POSSIBLE += 1
CONFIG.SYSTEM_SCORE += 1
remote_host_name = remote_host_name_to_check.replace(r"\.", ".")
self.test_results[file_name][remote_host_name] = {
"Result": f"Not found in {file_name}"
}
except FileNotFoundError:
CONFIG.TOTAL_SCORE_POSSIBLE += 1
CONFIG.WARNING_DICT[f'{CONFIG.ROOT_DIR}etc/hosts not found'] = {
"Warning": f'{CONFIG.ROOT_DIR}etc/hosts not found on the system',
"Support Link": "http://manpages.ubuntu.com/manpages/trusty/man5/hosts.5.html"
}
def check_hosts_allow_file(self):
try:
with open(self.hosts_allow_file_location, 'r') as read_obj:
hosts_file_data = read_obj.read()
with open(self.hosts_file_checks_db, 'r') as db_read_obj:
for checks in db_read_obj.readlines():
if "#" in checks[0] or len(checks) < 2:
continue
test_parameters = self.util_obj.run_regex_search(self.db_regex, checks)
file_name = test_parameters.group(1)
remote_host_name_to_check = test_parameters.group(2)
negative_message = test_parameters.group(3)
if file_name.lower() != "/etc/hosts.allow":
continue
try:
isinstance(self.test_results[file_name], dict)
except KeyError:
self.test_results[file_name] = {}
regex_to_search = rf"(.*{remote_host_name_to_check})"
if self.util_obj.run_regex_search(regex=regex_to_search, data=hosts_file_data):
remote_host_name = remote_host_name_to_check.replace(r"\.", ".")
self.test_results[file_name][remote_host_name_to_check] = {
"Result": f"{negative_message} - {remote_host_name}"
}
CONFIG.TOTAL_SCORE_POSSIBLE += 1
CONFIG.WARNING_DICT[
f"Irregularities found in file - {CONFIG.ROOT_DIR}{file_name} - {remote_host_name}"] = {
"Warning": negative_message,
"Support Link": "https://linux.die.net/man/5/hosts.allow"
}
else:
CONFIG.TOTAL_SCORE_POSSIBLE += 1
CONFIG.SYSTEM_SCORE += 1
remote_host_name = remote_host_name_to_check.replace(r"\.", ".")
self.test_results[file_name][remote_host_name] = {
"Result": f"Not found in {file_name}"
}
except FileNotFoundError:
CONFIG.TOTAL_SCORE_POSSIBLE += 1
def run_test(self):
self.check_hosts_file()
self.check_hosts_allow_file()
return self.test_results
# TODO: Use better method for this audit
| 47.791667 | 120 | 0.499738 | 591 | 5,735 | 4.499154 | 0.172589 | 0.075216 | 0.105303 | 0.067695 | 0.860098 | 0.845055 | 0.842422 | 0.821361 | 0.773599 | 0.76683 | 0 | 0.006759 | 0.406626 | 5,735 | 119 | 121 | 48.193277 | 0.774611 | 0.006626 | 0 | 0.677083 | 0 | 0 | 0.149605 | 0.05338 | 0 | 0 | 0 | 0.008403 | 0 | 1 | 0.041667 | false | 0 | 0.020833 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
9cb4d5930e61f0f9fa0812ec241ead621ed7546d | 71 | py | Python | pyalign/problems/__init__.py | poke1024/pyalign | 23a15f3fac2cb80f2b75599fece615e1aaa0129e | [
"MIT"
] | 21 | 2021-06-23T10:22:56.000Z | 2022-01-15T20:32:10.000Z | pyalign/problems/__init__.py | poke1024/pyalign | 23a15f3fac2cb80f2b75599fece615e1aaa0129e | [
"MIT"
] | null | null | null | pyalign/problems/__init__.py | poke1024/pyalign | 23a15f3fac2cb80f2b75599fece615e1aaa0129e | [
"MIT"
] | 1 | 2022-01-12T21:21:41.000Z | 2022-01-12T21:21:41.000Z | from .instance import *
from .function import *
from .factory import *
| 17.75 | 23 | 0.746479 | 9 | 71 | 5.888889 | 0.555556 | 0.377358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.169014 | 71 | 3 | 24 | 23.666667 | 0.898305 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9cbd219201f6ecd8e6d8d4bb379f19f1c075a4f3 | 121 | py | Python | Python/doctest.py | amanullahtariq/python-practice | cd19310a6af97e609680beb07c38f11a3805f22e | [
"MIT"
] | 1 | 2022-03-25T13:20:29.000Z | 2022-03-25T13:20:29.000Z | Python/doctest.py | amanullahtariq/python-practice | cd19310a6af97e609680beb07c38f11a3805f22e | [
"MIT"
] | null | null | null | Python/doctest.py | amanullahtariq/python-practice | cd19310a6af97e609680beb07c38f11a3805f22e | [
"MIT"
] | null | null | null | import math
def multiply(a, b):
return a * b
def add(x,y):
return x + y
def subtract(x,y):
return x - y
| 10.083333 | 19 | 0.561983 | 23 | 121 | 2.956522 | 0.478261 | 0.117647 | 0.235294 | 0.264706 | 0.294118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.31405 | 121 | 11 | 20 | 11 | 0.819277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.428571 | false | 0 | 0.142857 | 0.428571 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
9cbf32c29eded677486e15447e12d099886b0062 | 20 | py | Python | keg_apps/templating/__init__.py | level12/keg | 6f148a9bd0b8e167007ed5c2a0000daf7de3aee2 | [
"BSD-3-Clause"
] | 15 | 2015-06-26T09:01:53.000Z | 2020-08-28T16:29:14.000Z | keg_apps/profile/__init__.py | level12/keg | 6f148a9bd0b8e167007ed5c2a0000daf7de3aee2 | [
"BSD-3-Clause"
] | 165 | 2015-03-27T06:49:38.000Z | 2022-03-11T21:39:52.000Z | keg_apps/web/__init__.py | level12/keg | 6f148a9bd0b8e167007ed5c2a0000daf7de3aee2 | [
"BSD-3-Clause"
] | 9 | 2015-04-22T17:03:32.000Z | 2018-06-25T17:48:15.000Z | # silence pep8 W391
| 10 | 19 | 0.75 | 3 | 20 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.2 | 20 | 1 | 20 | 20 | 0.6875 | 0.85 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
9cd208204b3521a5aa16592b5734102f181582fc | 47 | wsgi | Python | core/flaskapp.wsgi | dasushi/silkymitties-server | 8314de95647fefb8556e0ba7aff96545a405088c | [
"MIT"
] | null | null | null | core/flaskapp.wsgi | dasushi/silkymitties-server | 8314de95647fefb8556e0ba7aff96545a405088c | [
"MIT"
] | null | null | null | core/flaskapp.wsgi | dasushi/silkymitties-server | 8314de95647fefb8556e0ba7aff96545a405088c | [
"MIT"
] | null | null | null | #flaskapp.wsgi
import sys
#Expand paths with
| 9.4 | 19 | 0.765957 | 7 | 47 | 5.142857 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.170213 | 47 | 4 | 20 | 11.75 | 0.923077 | 0.638298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9cd69fe58ddc7b8f9177a272f59a8a4d4d3f95e5 | 127 | py | Python | src/queue.py | jonpemby/scheduler | f27b413aa2d724b3e7e71019da46782c2e04c69e | [
"Unlicense"
] | null | null | null | src/queue.py | jonpemby/scheduler | f27b413aa2d724b3e7e71019da46782c2e04c69e | [
"Unlicense"
] | null | null | null | src/queue.py | jonpemby/scheduler | f27b413aa2d724b3e7e71019da46782c2e04c69e | [
"Unlicense"
] | null | null | null | class AbstractQueue:
def peek(self):
pass
def push(self, item):
pass
def pop(self):
pass
| 12.7 | 25 | 0.519685 | 15 | 127 | 4.4 | 0.6 | 0.242424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.393701 | 127 | 9 | 26 | 14.111111 | 0.857143 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.428571 | false | 0.428571 | 0 | 0 | 0.571429 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
144cc2bd0567355748ad7f496b208912b4ed395b | 104 | py | Python | mnmt/model/__init__.py | Lawhy/Multi-task-NMT | d8e6a957f3d6e870172f6aa92e9871769d863244 | [
"MIT"
] | 5 | 2020-12-05T14:53:33.000Z | 2022-01-12T02:04:10.000Z | mnmt/model/__init__.py | Lawhy/Multi-task-NMT | d8e6a957f3d6e870172f6aa92e9871769d863244 | [
"MIT"
] | null | null | null | mnmt/model/__init__.py | Lawhy/Multi-task-NMT | d8e6a957f3d6e870172f6aa92e9871769d863244 | [
"MIT"
] | 2 | 2021-01-15T02:37:55.000Z | 2022-01-12T02:04:14.000Z | from mnmt.model.basic_seq2seq import Seq2Seq
from mnmt.model.multi_task_seq2seq import Seq2MultiSeq
| 26 | 55 | 0.846154 | 15 | 104 | 5.666667 | 0.6 | 0.188235 | 0.305882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 0.115385 | 104 | 3 | 56 | 34.666667 | 0.880435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
1466f1d0d38542454235791db0f0c339aac71a8f | 34 | py | Python | arnold/output/__init__.py | hacklabza/arnold | d51f6b751ce6530650555cd33bf707f00b60af59 | [
"BSD-3-Clause"
] | 2 | 2021-08-20T05:19:37.000Z | 2022-01-11T09:39:39.000Z | arnold/output/__init__.py | hacklabza/arnold | d51f6b751ce6530650555cd33bf707f00b60af59 | [
"BSD-3-Clause"
] | null | null | null | arnold/output/__init__.py | hacklabza/arnold | d51f6b751ce6530650555cd33bf707f00b60af59 | [
"BSD-3-Clause"
] | null | null | null | from arnold.output import speaker
| 17 | 33 | 0.852941 | 5 | 34 | 5.8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 34 | 1 | 34 | 34 | 0.966667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
147646fc4f34ba552a52c29470b966c1c8c15bdc | 276 | py | Python | igramscraper/two_step_verification/__init__.py | tadpol-frog/instagram-scraper | 6d38ce6ad3e18bb627dbb76458e25edfaed76e15 | [
"MIT"
] | 2,420 | 2019-06-10T14:26:25.000Z | 2022-03-29T06:29:20.000Z | igramscraper/two_step_verification/__init__.py | tadpol-frog/instagram-scraper | 6d38ce6ad3e18bb627dbb76458e25edfaed76e15 | [
"MIT"
] | 178 | 2019-06-11T08:42:28.000Z | 2022-03-07T00:05:53.000Z | igramscraper/two_step_verification/__init__.py | tadpol-frog/instagram-scraper | 6d38ce6ad3e18bb627dbb76458e25edfaed76e15 | [
"MIT"
] | 445 | 2019-06-10T17:37:58.000Z | 2022-03-31T14:28:03.000Z | from igramscraper.two_step_verification.two_step_verification_abstract_class import TwoStepVerificationAbstractClass
from igramscraper.two_step_verification.console_verification import ConsoleVerification
__all__ = ["TwoStepVerificationAbstractClass", "ConsoleVerification"]
| 55.2 | 116 | 0.90942 | 24 | 276 | 9.916667 | 0.5 | 0.088235 | 0.239496 | 0.193277 | 0.294118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047101 | 276 | 4 | 117 | 69 | 0.904943 | 0 | 0 | 0 | 0 | 0 | 0.184783 | 0.115942 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
14af72df96aa61962a485c9cbaf4ff3e3d59ef0c | 223 | py | Python | netdice/util.py | heroinedd/netdice | 2a750845108b7e7a57c857b8257989c783621209 | [
"MIT"
] | 27 | 2020-06-27T21:42:50.000Z | 2022-03-17T12:15:14.000Z | netdice/util.py | heroinedd/netdice | 2a750845108b7e7a57c857b8257989c783621209 | [
"MIT"
] | null | null | null | netdice/util.py | heroinedd/netdice | 2a750845108b7e7a57c857b8257989c783621209 | [
"MIT"
] | 1 | 2021-03-29T01:18:34.000Z | 2021-03-29T01:18:34.000Z | import os
project_root_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
def get_relative_to_working_directory(file: str):
return os.path.abspath(os.path.join(os.getcwd(), file))
| 27.875 | 99 | 0.744395 | 36 | 223 | 4.333333 | 0.555556 | 0.230769 | 0.166667 | 0.192308 | 0.320513 | 0.320513 | 0.320513 | 0 | 0 | 0 | 0 | 0 | 0.085202 | 223 | 7 | 100 | 31.857143 | 0.764706 | 0 | 0 | 0 | 0 | 0 | 0.008969 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.25 | 0.25 | 0.75 | 0 | 0 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
21636916b8dda216fe38bd6d005dc9c6a5f11f69 | 114 | py | Python | 30-Days-Of-Python-master/30-Days-Of-Python-master/mypackage/greet.py | ATTO-DT/DATA-SCIENCE-PROJECT | ae192c3e6c6aa1daf9f5610a732822bdc76f0691 | [
"Apache-2.0"
] | 9 | 2020-07-02T06:06:17.000Z | 2022-02-26T11:08:09.000Z | 30-Days-Of-Python-master/30-Days-Of-Python-master/mypackage/greet.py | ATTO-DT/DATA-SCIENCE-PROJECT | ae192c3e6c6aa1daf9f5610a732822bdc76f0691 | [
"Apache-2.0"
] | 1 | 2021-11-04T17:26:36.000Z | 2021-11-04T17:26:36.000Z | 30-Days-Of-Python-master/30-Days-Of-Python-master/mypackage/greet.py | ATTO-DT/DATA-SCIENCE-PROJECT | ae192c3e6c6aa1daf9f5610a732822bdc76f0691 | [
"Apache-2.0"
] | 8 | 2021-01-31T10:31:12.000Z | 2022-03-13T09:15:55.000Z | def greet_person(firstname, lastname):
return f'{firstname} {lastname}, welcome to 30DaysOfPython Challenge!'
| 38 | 74 | 0.77193 | 13 | 114 | 6.692308 | 0.846154 | 0.390805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02 | 0.122807 | 114 | 2 | 75 | 57 | 0.85 | 0 | 0 | 0 | 0 | 0 | 0.526316 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
dcdc042d187079d1607be476836fbfa5fdc7db11 | 232 | py | Python | doctor/models.py | brajeshkumar609/KumarLab | fdb79de662cc27c01c93d5f3599f492c9be6bf14 | [
"MIT"
] | 2 | 2021-03-14T09:36:38.000Z | 2021-12-15T10:59:22.000Z | doctor/models.py | brajeshkumar609/KumarLab | fdb79de662cc27c01c93d5f3599f492c9be6bf14 | [
"MIT"
] | 6 | 2021-03-30T12:45:27.000Z | 2021-09-22T18:36:14.000Z | doctor/models.py | saifulshihab/mlabreport | 36282aa9204652c541ad38f11445324e7ba02db5 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class doctor(models.Model):
demail = models.CharField(max_length=254)
password = models.CharField(max_length=10)
doctor_name = models.CharField(max_length=100) | 33.142857 | 50 | 0.75 | 32 | 232 | 5.3125 | 0.625 | 0.264706 | 0.317647 | 0.423529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040816 | 0.155172 | 232 | 7 | 51 | 33.142857 | 0.826531 | 0.103448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.2 | 0.2 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
0d1ef458678b3221fca0466c29a6cbc912489055 | 35 | py | Python | os_v3_hek/defs/tagc.py | holy-crust/reclaimer | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | [
"MIT"
] | null | null | null | os_v3_hek/defs/tagc.py | holy-crust/reclaimer | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | [
"MIT"
] | null | null | null | os_v3_hek/defs/tagc.py | holy-crust/reclaimer | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | [
"MIT"
] | null | null | null | from ...os_hek.defs.tagc import *
| 17.5 | 34 | 0.685714 | 6 | 35 | 3.833333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 35 | 1 | 35 | 35 | 0.766667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b4bb7e5350f0f3d39a8b7b63cf90f72733d008b4 | 19,267 | py | Python | code/venv/lib/python3.8/site-packages/datadog_api_client/v2/api/cloud_workload_security_api.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | code/venv/lib/python3.8/site-packages/datadog_api_client/v2/api/cloud_workload_security_api.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | code/venv/lib/python3.8/site-packages/datadog_api_client/v2/api/cloud_workload_security_api.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v2.api_client import ApiClient, Endpoint as _Endpoint
from datadog_api_client.v2.model_utils import (
file_type,
)
from datadog_api_client.v2.model.cloud_workload_security_agent_rule_create_request import (
CloudWorkloadSecurityAgentRuleCreateRequest,
)
from datadog_api_client.v2.model.cloud_workload_security_agent_rule_response import (
CloudWorkloadSecurityAgentRuleResponse,
)
from datadog_api_client.v2.model.cloud_workload_security_agent_rule_update_request import (
CloudWorkloadSecurityAgentRuleUpdateRequest,
)
from datadog_api_client.v2.model.cloud_workload_security_agent_rules_list_response import (
CloudWorkloadSecurityAgentRulesListResponse,
)
class CloudWorkloadSecurityApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self._create_cloud_workload_security_agent_rule_endpoint = _Endpoint(
settings={
"response_type": (CloudWorkloadSecurityAgentRuleResponse,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/security_monitoring/cloud_workload_security/agent_rules",
"operation_id": "create_cloud_workload_security_agent_rule",
"http_method": "POST",
"servers": None,
},
params_map={
"body": {
"required": True,
"openapi_types": (CloudWorkloadSecurityAgentRuleCreateRequest,),
"location": "body",
},
},
headers_map={"accept": ["application/json"], "content_type": ["application/json"]},
api_client=api_client,
)
self._delete_cloud_workload_security_agent_rule_endpoint = _Endpoint(
settings={
"response_type": None,
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/security_monitoring/cloud_workload_security/agent_rules/{agent_rule_id}",
"operation_id": "delete_cloud_workload_security_agent_rule",
"http_method": "DELETE",
"servers": None,
},
params_map={
"agent_rule_id": {
"required": True,
"openapi_types": (str,),
"attribute": "agent_rule_id",
"location": "path",
},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self._download_cloud_workload_policy_file_endpoint = _Endpoint(
settings={
"response_type": (file_type,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/security/cloud_workload/policy/download",
"operation_id": "download_cloud_workload_policy_file",
"http_method": "GET",
"servers": None,
},
params_map={},
headers_map={
"accept": ["application/yaml", "application/json"],
"content_type": [],
},
api_client=api_client,
)
self._get_cloud_workload_security_agent_rule_endpoint = _Endpoint(
settings={
"response_type": (CloudWorkloadSecurityAgentRuleResponse,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/security_monitoring/cloud_workload_security/agent_rules/{agent_rule_id}",
"operation_id": "get_cloud_workload_security_agent_rule",
"http_method": "GET",
"servers": None,
},
params_map={
"agent_rule_id": {
"required": True,
"openapi_types": (str,),
"attribute": "agent_rule_id",
"location": "path",
},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self._list_cloud_workload_security_agent_rules_endpoint = _Endpoint(
settings={
"response_type": (CloudWorkloadSecurityAgentRulesListResponse,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/security_monitoring/cloud_workload_security/agent_rules",
"operation_id": "list_cloud_workload_security_agent_rules",
"http_method": "GET",
"servers": None,
},
params_map={},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self._update_cloud_workload_security_agent_rule_endpoint = _Endpoint(
settings={
"response_type": (CloudWorkloadSecurityAgentRuleResponse,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/security_monitoring/cloud_workload_security/agent_rules/{agent_rule_id}",
"operation_id": "update_cloud_workload_security_agent_rule",
"http_method": "PATCH",
"servers": None,
},
params_map={
"agent_rule_id": {
"required": True,
"openapi_types": (str,),
"attribute": "agent_rule_id",
"location": "path",
},
"body": {
"required": True,
"openapi_types": (CloudWorkloadSecurityAgentRuleUpdateRequest,),
"location": "body",
},
},
headers_map={"accept": ["application/json"], "content_type": ["application/json"]},
api_client=api_client,
)
def create_cloud_workload_security_agent_rule(self, body, **kwargs):
"""Create a Cloud Workload Security Agent rule
Create a new Agent rule with the given parameters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True.
>>> thread = api.create_cloud_workload_security_agent_rule(body, async_req=True)
>>> result = thread.get()
Args:
body (CloudWorkloadSecurityAgentRuleCreateRequest): The definition of the new Agent rule.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CloudWorkloadSecurityAgentRuleResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._create_cloud_workload_security_agent_rule_endpoint.default_arguments(kwargs)
kwargs["body"] = body
return self._create_cloud_workload_security_agent_rule_endpoint.call_with_http_info(**kwargs)
def delete_cloud_workload_security_agent_rule(self, agent_rule_id, **kwargs):
"""Delete a Cloud Workload Security Agent rule
Delete a specific Agent rule.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True.
>>> thread = api.delete_cloud_workload_security_agent_rule(agent_rule_id, async_req=True)
>>> result = thread.get()
Args:
agent_rule_id (str): The ID of the Agent rule.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._delete_cloud_workload_security_agent_rule_endpoint.default_arguments(kwargs)
kwargs["agent_rule_id"] = agent_rule_id
return self._delete_cloud_workload_security_agent_rule_endpoint.call_with_http_info(**kwargs)
def download_cloud_workload_policy_file(self, **kwargs):
"""Get the latest Cloud Workload Security policy
The download endpoint generates a Cloud Workload Security policy file from your currently active Cloud Workload Security rules, and downloads them as a .policy file. This file can then be deployed to your agents to update the policy running in your environment.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True.
>>> thread = api.download_cloud_workload_policy_file(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._download_cloud_workload_policy_file_endpoint.default_arguments(kwargs)
return self._download_cloud_workload_policy_file_endpoint.call_with_http_info(**kwargs)
def get_cloud_workload_security_agent_rule(self, agent_rule_id, **kwargs):
"""Get a Cloud Workload Security Agent rule
Get the details of a specific Agent rule.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True.
>>> thread = api.get_cloud_workload_security_agent_rule(agent_rule_id, async_req=True)
>>> result = thread.get()
Args:
agent_rule_id (str): The ID of the Agent rule.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CloudWorkloadSecurityAgentRuleResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._get_cloud_workload_security_agent_rule_endpoint.default_arguments(kwargs)
kwargs["agent_rule_id"] = agent_rule_id
return self._get_cloud_workload_security_agent_rule_endpoint.call_with_http_info(**kwargs)
def list_cloud_workload_security_agent_rules(self, **kwargs):
"""Get all Cloud Workload Security Agent rules
Get the list of Agent rules.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True.
>>> thread = api.list_cloud_workload_security_agent_rules(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CloudWorkloadSecurityAgentRulesListResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._list_cloud_workload_security_agent_rules_endpoint.default_arguments(kwargs)
return self._list_cloud_workload_security_agent_rules_endpoint.call_with_http_info(**kwargs)
def update_cloud_workload_security_agent_rule(self, agent_rule_id, body, **kwargs):
"""Update a Cloud Workload Security Agent rule
Update a specific Agent rule. Returns the Agent rule object when the request is successful.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True.
>>> thread = api.update_cloud_workload_security_agent_rule(agent_rule_id, body, async_req=True)
>>> result = thread.get()
Args:
agent_rule_id (str): The ID of the Agent rule.
body (CloudWorkloadSecurityAgentRuleUpdateRequest): New definition of the Agent rule.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CloudWorkloadSecurityAgentRuleResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._update_cloud_workload_security_agent_rule_endpoint.default_arguments(kwargs)
kwargs["agent_rule_id"] = agent_rule_id
kwargs["body"] = body
return self._update_cloud_workload_security_agent_rule_endpoint.call_with_http_info(**kwargs)
| 46.426506 | 269 | 0.617169 | 2,100 | 19,267 | 5.409048 | 0.099524 | 0.051501 | 0.086891 | 0.100713 | 0.864425 | 0.836693 | 0.795933 | 0.766529 | 0.740822 | 0.727881 | 0 | 0.001815 | 0.313645 | 19,267 | 414 | 270 | 46.538647 | 0.857154 | 0.499092 | 0 | 0.481928 | 0 | 0 | 0.217397 | 0.079931 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042169 | false | 0 | 0.036145 | 0 | 0.120482 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b4cd594c8b8593cd04962b3cb03ca1140e3ebc3d | 65,370 | py | Python | tests/eukaryote/test_metabolism.py | KarrLab/model_generator | b2735391545bcd5f21faaa1ceaa1949e53497378 | [
"MIT"
] | 6 | 2018-12-24T16:20:11.000Z | 2022-01-26T23:38:25.000Z | tests/eukaryote/test_metabolism.py | KarrLab/model_generator | b2735391545bcd5f21faaa1ceaa1949e53497378 | [
"MIT"
] | 15 | 2018-08-08T20:34:40.000Z | 2021-10-31T20:08:40.000Z | tests/eukaryote/test_metabolism.py | KarrLab/model_generator | b2735391545bcd5f21faaa1ceaa1949e53497378 | [
"MIT"
] | 2 | 2019-04-05T16:11:57.000Z | 2020-04-29T14:14:30.000Z | """ Tests of metabolism submodel generation
:Author: Yin Hoon Chew <yinhoon.chew@mssm.edu>
:Date: 2020-02-03
:Copyright: 2019-2020, Karr Lab
:License: MIT
"""
from wc_model_gen.eukaryote import metabolism
from wc_onto import onto as wc_ontology
from wc_utils.util.units import unit_registry
import collections
import conv_opt
import math
import numpy
import os
import scipy.constants
import shutil
import tempfile
import unittest
import wc_lang
import wc_kb
class MetabolismSubmodelGeneratorTestCase(unittest.TestCase):
def setUp(self):
self.tmp_dirname = tempfile.mkdtemp()
kb = self.kb = wc_kb.KnowledgeBase()
model = self.model = wc_lang.Model()
model.parameters.create(id='mean_doubling_time', value=math.log(2)/12)
model.parameters.create(id='cell_volume', value=0.1)
c = model.compartments.create(id='c')
# Create metabolites
for m in [1,2,3]:
metabolite_st = model.species_types.create(id='m{}'.format(m))
metabolite_species = model.species.create(species_type=metabolite_st, compartment=c)
metabolite_species.id = metabolite_species.gen_id()
# Create enzymes
for i, conc in {'enzyme1':0.01, 'enzyme2': 0.01, 'enzyme3': 0.}.items():
enzyme_st = model.species_types.create(id=i)
enzyme_species = model.species.create(species_type=enzyme_st, compartment=c)
enzyme_species.id = enzyme_species.gen_id()
conc_model = model.distribution_init_concentrations.create(species=enzyme_species, mean=conc)
conc_model.id = conc_model.gen_id()
# Create reactions in metabolism submodel
self.gen = metabolism.MetabolismSubmodelGenerator(kb, model)
submodel = model.submodels.get_one(id='metabolism')
ex1 = submodel.reactions.create(id='ex_m1', reversible=False, model=model)
ex1.participants.append(model.species.get_one(id='m1[c]').species_coefficients.get_or_create(coefficient=1))
ex2 = submodel.reactions.create(id='ex_m2', reversible=False, model=model)
ex2.participants.append(model.species.get_one(id='m2[c]').species_coefficients.get_or_create(coefficient=1))
ex3 = submodel.reactions.create(id='ex_m3', reversible=False, model=model)
ex3.participants.append(model.species.get_one(id='m3[c]').species_coefficients.get_or_create(coefficient=1))
r1 = submodel.reactions.create(id='r1', reversible=True, model=model)
r1.participants.append(model.species.get_one(id='m1[c]').species_coefficients.get_or_create(coefficient=-1))
r1.participants.append(model.species.get_one(id='m2[c]').species_coefficients.get_or_create(coefficient=-1))
r1.participants.append(model.species.get_one(id='m3[c]').species_coefficients.get_or_create(coefficient=1))
r1_rate_law_expression1, error = wc_lang.RateLawExpression.deserialize('k_cat_r1_forward_enzyme3 * enzyme3[c]', {
wc_lang.Parameter: {'k_cat_r1_forward_enzyme3': model.parameters.create(id='k_cat_r1_forward_enzyme3')},
wc_lang.Species: {'enzyme3[c]': model.species.get_one(id='enzyme3[c]')},
})
assert error is None, str(error)
r1_model_rate_law1 = model.rate_laws.create(
expression=r1_rate_law_expression1,
reaction=r1,
direction=wc_lang.RateLawDirection['forward'])
r1_model_rate_law1.id = r1_model_rate_law1.gen_id()
r1_rate_law_expression2, error = wc_lang.RateLawExpression.deserialize('k_cat_r1_backward_enzyme3 * enzyme3[c]', {
wc_lang.Parameter: {'k_cat_r1_backward_enzyme3': model.parameters.create(id='k_cat_r1_backward_enzyme3')},
wc_lang.Species: {'enzyme3[c]': model.species.get_one(id='enzyme3[c]')},
})
assert error is None, str(error)
r1_model_rate_law2 = model.rate_laws.create(
expression=r1_rate_law_expression2,
reaction=r1,
direction=wc_lang.RateLawDirection['backward'])
r1_model_rate_law2.id = r1_model_rate_law2.gen_id()
r2 = submodel.reactions.create(id='r2', reversible=True, model=model)
r2.participants.append(model.species.get_one(id='m1[c]').species_coefficients.get_or_create(coefficient=-1))
r2.participants.append(model.species.get_one(id='m2[c]').species_coefficients.get_or_create(coefficient=1))
r2_rate_law_expression1, error = wc_lang.RateLawExpression.deserialize('k_cat_r2_forward_enzyme1 * enzyme1[c] + k_cat_r2_forward_enzyme2 * enzyme2[c]', {
wc_lang.Parameter: {'k_cat_r2_forward_enzyme1': model.parameters.create(id='k_cat_r2_forward_enzyme1', value=100.),
'k_cat_r2_forward_enzyme2': model.parameters.create(id='k_cat_r2_forward_enzyme2')},
wc_lang.Species: {'enzyme1[c]': model.species.get_one(id='enzyme1[c]'),
'enzyme2[c]': model.species.get_one(id='enzyme2[c]')},
})
assert error is None, str(error)
r2_model_rate_law1 = model.rate_laws.create(
expression=r2_rate_law_expression1,
reaction=r2,
direction=wc_lang.RateLawDirection['forward'])
r2_model_rate_law1.id = r2_model_rate_law1.gen_id()
r2_rate_law_expression2, error = wc_lang.RateLawExpression.deserialize('k_cat_r2_backward_enzyme1 * enzyme1[c] + k_cat_r2_backward_enzyme2 * enzyme2[c]', {
wc_lang.Parameter: {'k_cat_r2_backward_enzyme1': model.parameters.create(id='k_cat_r2_backward_enzyme1'),
'k_cat_r2_backward_enzyme2': model.parameters.create(id='k_cat_r2_backward_enzyme2')},
wc_lang.Species: {'enzyme1[c]': model.species.get_one(id='enzyme1[c]'),
'enzyme2[c]': model.species.get_one(id='enzyme2[c]')},
})
assert error is None, str(error)
r2_model_rate_law2 = model.rate_laws.create(
expression=r2_rate_law_expression2,
reaction=r2,
direction=wc_lang.RateLawDirection['backward'])
r2_model_rate_law2.id = r2_model_rate_law2.gen_id()
r3 = submodel.reactions.create(id='r3', reversible=False, model=model)
r3.participants.append(model.species.get_one(id='m1[c]').species_coefficients.get_or_create(coefficient=-2))
r3.participants.append(model.species.get_one(id='m3[c]').species_coefficients.get_or_create(coefficient=1))
r3_rate_law_expression, error = wc_lang.RateLawExpression.deserialize('k_cat_r3_forward_enzyme2 * enzyme2[c]', {
wc_lang.Parameter: {'k_cat_r3_forward_enzyme2': model.parameters.create(id='k_cat_r3_forward_enzyme2', value=200.)},
wc_lang.Species: {'enzyme2[c]': model.species.get_one(id='enzyme2[c]')},
})
assert error is None, str(error)
r3_model_rate_law = model.rate_laws.create(
expression=r3_rate_law_expression,
reaction=r3,
direction=wc_lang.RateLawDirection['forward'])
r3_model_rate_law.id = r3_model_rate_law.gen_id()
r4 = submodel.reactions.create(id='r4', reversible=False, model=model)
r4.participants.append(model.species.get_one(id='m2[c]').species_coefficients.get_or_create(coefficient=-2))
r4.participants.append(model.species.get_one(id='m3[c]').species_coefficients.get_or_create(coefficient=1))
def tearDown(self):
shutil.rmtree(self.tmp_dirname)
def test_gen_reactions_and_rate_laws(self):
# Create KB content
kb = wc_kb.KnowledgeBase()
cell = kb.cell = wc_kb.Cell()
mito = cell.compartments.create(id='m')
cytoplasm = cell.compartments.create(id='c')
nucleus = cell.compartments.create(id='n')
extracellular = cell.compartments.create(id='e')
cell.parameters.create(id='total_carbohydrate_mass', value=2000+20/scipy.constants.Avogadro)
cell.parameters.create(id='total_lipid_mass', value=4700)
sequence_path = os.path.join(self.tmp_dirname, 'test_seq.fasta')
with open(sequence_path, 'w') as f:
f.write('>chr1\nATGCATGACTCTAGTTTAT\n'
'>chrM\nTTTatgaCTCTAGTTTACNNN\n')
chr1 = wc_kb.core.DnaSpeciesType(cell=cell, id='chr1', sequence_path=sequence_path,
ploidy=2, circular=False, double_stranded=True)
chrM = wc_kb.core.DnaSpeciesType(cell=cell, id='chrM', sequence_path=sequence_path,
ploidy=300, circular=False, double_stranded=True)
trans1 = wc_kb.eukaryote.TranscriptSpeciesType(cell=cell, id='trans1')
trans1_half_life = wc_kb.core.SpeciesTypeProperty(property='half-life', species_type=trans1,
value='10000.0', value_type=wc_ontology['WC:float'])
trans1_species = wc_kb.core.Species(species_type=trans1, compartment=cytoplasm)
prot1 = wc_kb.eukaryote.ProteinSpeciesType(cell=cell, id='prot1', transcript=trans1)
prot1_half_life = wc_kb.core.SpeciesTypeProperty(property='half-life', species_type=prot1,
value='20000.0', value_type=wc_ontology['WC:float'])
trans2 = wc_kb.eukaryote.TranscriptSpeciesType(cell=cell, id='trans2')
trans2_half_life = wc_kb.core.SpeciesTypeProperty(property='half-life', species_type=trans2,
value='10000.0', value_type=wc_ontology['WC:float'])
trans2_species = wc_kb.core.Species(species_type=trans2, compartment=cytoplasm)
prot2 = wc_kb.eukaryote.ProteinSpeciesType(cell=cell, id='prot2', transcript=trans2)
prot2_half_life = wc_kb.core.SpeciesTypeProperty(property='half-life', species_type=prot2,
value='20000.0', value_type=wc_ontology['WC:float'])
trans3 = wc_kb.eukaryote.TranscriptSpeciesType(cell=cell, id='trans3')
trans3_half_life = wc_kb.core.SpeciesTypeProperty(property='half-life', species_type=trans3,
value='10000.0', value_type=wc_ontology['WC:float'])
trans3_species = wc_kb.core.Species(species_type=trans3, compartment=mito)
prot3 = wc_kb.eukaryote.ProteinSpeciesType(cell=cell, id='prot3', transcript=trans3)
prot3_half_life = wc_kb.core.SpeciesTypeProperty(property='half-life', species_type=prot3,
value='20000.0', value_type=wc_ontology['WC:float'])
trans4 = wc_kb.eukaryote.TranscriptSpeciesType(cell=cell, id='trans4')
trans4_half_life = wc_kb.core.SpeciesTypeProperty(property='half-life', species_type=trans4,
value='10000.0', value_type=wc_ontology['WC:float'])
trans4_species = wc_kb.core.Species(species_type=trans4, compartment=mito)
ala_L = wc_kb.core.MetaboliteSpeciesType(cell=cell, id='ala_L')
ala_L_species = wc_kb.core.Species(species_type=ala_L, compartment=extracellular)
exchange_rxn_kb = cell.reactions.create(id='EX_ala_L_e_', name='exchange ala_L',
reversible=True, comments='random comments')
ala_L_coef = wc_kb.core.SpeciesCoefficient(species=ala_L_species,
coefficient=-1)
exchange_rxn_kb.participants.append(ala_L_coef)
met_L = wc_kb.core.MetaboliteSpeciesType(cell=cell, id='met_L')
met_L_species = wc_kb.core.Species(species_type=met_L, compartment=extracellular)
exchange_rxn_kb = cell.reactions.create(id='EX_met_L_e', name='exchange met_L',
reversible=True, comments='random comments')
met_L_coef = wc_kb.core.SpeciesCoefficient(species=met_L_species,
coefficient=-1)
exchange_rxn_kb.participants.append(met_L_coef)
g6p = wc_kb.core.MetaboliteSpeciesType(cell=cell, id='g6p')
g6p_species = wc_kb.core.Species(species_type=g6p, compartment=extracellular)
exchange_rxn_kb = cell.reactions.create(id='EX_g6p_e', name='exchange g6p',
reversible=True, comments='random comments')
g6p_coef = wc_kb.core.SpeciesCoefficient(species=g6p_species,
coefficient=-1)
exchange_rxn_kb.participants.append(g6p_coef)
met1 = wc_kb.core.MetaboliteSpeciesType(cell=cell, id='m1')
enzyme1 = wc_kb.core.ComplexSpeciesType(cell=cell, id='enzyme1')
enzyme1_species = wc_kb.core.Species(species_type=enzyme1, compartment=cytoplasm)
enzyme1.subunits.append(wc_kb.core.SpeciesTypeCoefficient(
species_type=met1,
coefficient=0))
met2 = wc_kb.core.MetaboliteSpeciesType(cell=cell, id='m2')
enzyme2 = wc_kb.core.ComplexSpeciesType(cell=cell, id='enzyme2')
enzyme2_species = wc_kb.core.Species(species_type=enzyme2, compartment=cytoplasm)
enzyme2.subunits.append(wc_kb.core.SpeciesTypeCoefficient(
species_type=met2,
coefficient=2))
enzyme2.subunits.append(wc_kb.core.SpeciesTypeCoefficient(
species_type=met1,
coefficient=0))
met3 = wc_kb.core.MetaboliteSpeciesType(cell=cell, id='m3')
enzyme3 = wc_kb.core.ComplexSpeciesType(cell=cell, id='enzyme3')
enzyme3_species = wc_kb.core.Species(species_type=enzyme3, compartment=cytoplasm)
enzyme3.subunits.append(wc_kb.core.SpeciesTypeCoefficient(
species_type=met3,
coefficient=2))
# Create initial model content
model = wc_lang.Model()
model.parameters.create(id='mean_doubling_time', value=20000)
for i in cell.species_types.get(__type=wc_kb.core.ComplexSpeciesType):
model_species_type = model.species_types.create(id=i.id, type=wc_ontology['WC:pseudo_species'])
model_compartment = model.compartments.get_or_create(id=i.species[0].compartment.id)
model_species = model.species.get_or_create(species_type=model_species_type, compartment=model_compartment)
model_species.id = model_species.gen_id()
conc_model = model.distribution_init_concentrations.create(species=model_species, mean=10.)
conc_model.id = conc_model.gen_id()
model.species.get_one(id='enzyme3[c]').distribution_init_concentration.mean = 0.
for i in ['m1', 'm2', 'm3']: # cofactor for the enzymes above
model_species_type = model.species_types.create(id=i, type=wc_ontology['WC:metabolite'])
model_compartment = model.compartments.get_or_create(id='c')
model_species = model.species.get_or_create(species_type=model_species_type, compartment=model_compartment)
model_species.id = model_species.gen_id()
for i in cell.species_types.get(__type=wc_kb.eukaryote.TranscriptSpeciesType):
model_species_type = model.species_types.create(id=i.id, type=wc_ontology['WC:RNA'])
model_compartment = model.compartments.get_or_create(id=i.species[0].compartment.id)
model_species = model.species.get_or_create(species_type=model_species_type, compartment=model_compartment)
model_species.id = model_species.gen_id()
conc_model = model.distribution_init_concentrations.create(species=model_species, mean=10.)
conc_model.id = conc_model.gen_id()
if i.protein:
model_species_type = model.species_types.create(id=i.protein.id, type=wc_ontology['WC:protein'])
model_compartment = model.compartments.get_or_create(id=i.species[0].compartment.id)
model_species = model.species.get_or_create(species_type=model_species_type, compartment=model_compartment)
model_species.id = model_species.gen_id()
conc_model = model.distribution_init_concentrations.create(species=model_species, mean=2.)
conc_model.id = conc_model.gen_id()
if i.protein.id == 'prot1':
model_compartment = model.compartments.get_or_create(id='n')
model_species = model.species.get_or_create(species_type=model_species_type, compartment=model_compartment)
model_species.id = model_species.gen_id()
conc_model = model.distribution_init_concentrations.create(species=model_species, mean=2.)
conc_model.id = conc_model.gen_id()
compartment_list = {'n': 'nucleus', 'm': 'mitochondria', 'c': 'cytosol', 'l': 'lysosome', 'e': 'extracellular'}
for i,v in compartment_list.items():
c = model.compartments.get_or_create(id=i)
c.name = v
c.init_density = model.parameters.get_or_create(id='density_' + c.id, units=unit_registry.parse_units('g l^-1'))
c.init_volume = wc_lang.core.InitVolume(distribution=wc_ontology['WC:normal_distribution'],
mean=0.5, std=0)
volume = model.functions.create(id='volume_' + c.id, units=unit_registry.parse_units('l'))
volume.expression, error = wc_lang.FunctionExpression.deserialize(f'{c.id} / {c.init_density.id}', {
wc_lang.Compartment: {c.id: c},
wc_lang.Parameter: {c.init_density.id: c.init_density},
})
assert error is None, str(error)
metabolic_participants = ['atp', 'ctp', 'gtp', 'utp', 'datp', 'dttp', 'dgtp', 'dctp', 'ppi', 'amp', 'cmp', 'rec', 'pool',
'gmp', 'ump', 'h2o', 'h', 'adp', 'pi', 'gdp', 'ala_L', 'met_L', 'selnp', 'g6p', 'chsterol', 'pail_hs']
for i in metabolic_participants:
model_species_type = model.species_types.create(id=i, type=wc_ontology['WC:metabolite'])
for j in ['n', 'm', 'c', 'l', 'e']:
model_compartment = model.compartments.get_one(id=j)
model_species = model.species.get_or_create(species_type=model_species_type, compartment=model_compartment)
model_species.id = model_species.gen_id()
conc_model = model.distribution_init_concentrations.create(species=model.species.get_one(id='pool[c]'), mean=25.)
conc_model.id = conc_model.gen_id()
conc_model = model.distribution_init_concentrations.create(species=model.species.get_one(id='g6p[e]'), mean=25.)
conc_model.id = conc_model.gen_id()
structure_info = {'g6p': ('C6H13O9P', 220., 1), 'chsterol': ('C27H46O4S', 350., 0), 'pail_hs': ('C41H78O13P', 500., -1), 'h2o': ('H2O', 20., 0.)}
for k, v in structure_info.items():
model_species_type = model.species_types.get_one(id=k)
model_species_type.structure = wc_lang.ChemicalStructure()
model_species_type.structure.empirical_formula = v[0]
model_species_type.structure.molecular_weight = v[1]
model_species_type.structure.charge = v[2]
conc_model = model.distribution_init_concentrations.create(species=model.species.get_one(id='g6p[c]'), mean=15.)
conc_model.id = conc_model.gen_id()
conc_model = model.distribution_init_concentrations.create(species=model.species.get_one(id='pail_hs[c]'), mean=0.)
conc_model.id = conc_model.gen_id()
others = ['polr2', 'ribosome', 'polr_bound_non_specific_species',
'polr_binding_site_species', 'polr_bound_species', 'polr_non_specific_binding_site_species',
'ribo_binding_site_species', 'ribo_bound_species']
for i in others:
model_species_type = model.species_types.create(id=i, type=wc_ontology['WC:pseudo_species'])
for j in ['n', 'm', 'c']:
model_compartment = model.compartments.get_one(id=j)
model_species = model.species.get_or_create(species_type=model_species_type, compartment=model_compartment)
model_species.id = model_species.gen_id()
g6p_exc = model.reactions.create(id='EX_g6p_e_kb', reversible=True, participants=[
model.species.get_one(id='g6p[e]').species_coefficients.get_or_create(coefficient=-1)])
met_L_exc = model.reactions.create(id='EX_met_L_e_kb', reversible=True, participants=[
model.species.get_one(id='met_L[e]').species_coefficients.get_or_create(coefficient=-1)])
# Create transcription submodel
transcription_submodel = model.submodels.create(id='transcription')
for i in cell.species_types.get(__type=wc_kb.eukaryote.TranscriptSpeciesType):
transcription_compartment = 'n' if i.species[0].compartment.id=='c' else 'm'
translation_compartment = 'c' if i.species[0].compartment.id=='c' else 'm'
# Initiation
init_reaction = model.reactions.create(id='transcription_initiation_' + i.id, submodel=transcription_submodel)
init_reaction.participants.append(model.species.get_one(
id='polr_bound_non_specific_species[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=-1))
init_reaction.participants.append(model.species.get_one(
id='polr_binding_site_species[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=-1))
init_reaction.participants.append(model.species.get_one(
id='polr_bound_species[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=1))
init_reaction.participants.append(model.species.get_one(
id='polr_non_specific_binding_site_species[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=1))
if i.id == 'trans1':
init_reaction.participants.append(model.species.get_one(
id='atp[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=-2))
init_reaction.participants.append(model.species.get_one(
id='h2o[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=-2))
init_reaction.participants.append(model.species.get_one(
id='adp[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=2))
init_reaction.participants.append(model.species.get_one(
id='pi[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=2))
init_reaction.participants.append(model.species.get_one(
id='h[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=2))
# Elongation
reaction = model.reactions.get_or_create(id='transcription_elongation_' + i.id, submodel=transcription_submodel)
reaction.participants.append(model.species.get_one(
id='polr_bound_species[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=-1))
reaction.participants.append(model.species.get_one(
id='atp[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=-2))
reaction.participants.append(model.species.get_one(
id='ctp[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=-2))
reaction.participants.append(model.species.get_one(
id='gtp[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=-2))
reaction.participants.append(model.species.get_one(
id='utp[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=-2))
reaction.participants.append(model.species.get_one(
id='h2o[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=-3))
reaction.participants.append(model.species.get_one(
id='{}[{}]'.format(i.id, translation_compartment)).species_coefficients.get_or_create(
coefficient=1))
reaction.participants.append(model.species.get_one(
id='ppi[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=7))
reaction.participants.append(model.species.get_one(
id='amp[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=2))
reaction.participants.append(model.species.get_one(
id='cmp[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=2))
reaction.participants.append(model.species.get_one(
id='gmp[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=2))
reaction.participants.append(model.species.get_one(
id='ump[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=2))
reaction.participants.append(model.species.get_one(
id='h[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=3))
reaction.participants.append(model.species.get_one(
id='polr2[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=1))
reaction.participants.append(model.species.get_one(
id='polr_binding_site_species[{}]'.format(transcription_compartment)).species_coefficients.get_or_create(
coefficient=1))
if i.protein:
reaction.participants.append(model.species.get_one(
id='ribo_binding_site_species[{}]'.format(translation_compartment)).species_coefficients.get_or_create(
coefficient=1))
# Create RNA degradation submodel
rna_deg_submodel = model.submodels.create(id='rna_degradation')
for i in cell.species_types.get(__type=wc_kb.eukaryote.TranscriptSpeciesType):
reaction = model.reactions.get_or_create(id='degradation_' + i.id, submodel=rna_deg_submodel)
reaction.participants.append(model.species.get_one(
id='{}[{}]'.format(i.id, i.species[0].compartment.id)).species_coefficients.get_or_create(coefficient=-1))
reaction.participants.append(model.species.get_one(
id='h2o[{}]'.format(i.species[0].compartment.id)).species_coefficients.get_or_create(coefficient=-3))
if i.protein:
reaction.participants.append(model.species.get_one(
id='ribo_binding_site_species[{}]'.format(i.species[0].compartment.id)).species_coefficients.get_or_create(
coefficient=-1))
reaction.participants.append(model.species.get_one(
id='amp[{}]'.format(i.species[0].compartment.id)).species_coefficients.get_or_create(coefficient=1))
reaction.participants.append(model.species.get_one(
id='cmp[{}]'.format(i.species[0].compartment.id)).species_coefficients.get_or_create(coefficient=1))
reaction.participants.append(model.species.get_one(
id='gmp[{}]'.format(i.species[0].compartment.id)).species_coefficients.get_or_create(coefficient=1))
reaction.participants.append(model.species.get_one(
id='ump[{}]'.format(i.species[0].compartment.id)).species_coefficients.get_or_create(coefficient=1))
reaction.participants.append(model.species.get_one(
id='h[{}]'.format(i.species[0].compartment.id)).species_coefficients.get_or_create(coefficient=3))
# Create translation and translocation submodel
translation_submodel = model.submodels.create(id='translation_translocation')
for i in cell.species_types.get(__type=wc_kb.eukaryote.TranscriptSpeciesType):
translation_compartment = 'c' if i.species[0].compartment.id=='c' else 'm'
if i.protein:
# Initiation
init_reaction = model.reactions.create(id='translation_initiation_' + i.id, submodel=translation_submodel)
init_reaction.participants.append(model.species.get_one(
id='ribosome[{}]'.format(translation_compartment)).species_coefficients.get_or_create(
coefficient=-1))
init_reaction.participants.append(model.species.get_one(
id='ribo_binding_site_species[{}]'.format(translation_compartment)).species_coefficients.get_or_create(
coefficient=-1))
init_reaction.participants.append(model.species.get_one(
id='met_L[{}]'.format(translation_compartment)).species_coefficients.get_or_create(
coefficient=-1))
init_reaction.participants.append(model.species.get_one(
id='h2o[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-5))
init_reaction.participants.append(model.species.get_one(
id='atp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-2))
init_reaction.participants.append(model.species.get_one(
id='gtp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-2))
init_reaction.participants.append(model.species.get_one(
id='ribo_bound_species[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=1))
init_reaction.participants.append(model.species.get_one(
id='h[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=5))
init_reaction.participants.append(model.species.get_one(
id='amp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=1))
init_reaction.participants.append(model.species.get_one(
id='adp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=1))
init_reaction.participants.append(model.species.get_one(
id='gdp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=2))
init_reaction.participants.append(model.species.get_one(
id='pi[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=5))
# Elongation
el_reaction = model.reactions.get_or_create(id='translation_elongation_' + i.id, submodel=translation_submodel)
el_reaction.participants.append(model.species.get_one(
id='ribo_bound_species[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-1))
el_reaction.participants.append(model.species.get_one(
id='gtp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-3))
el_reaction.participants.append(model.species.get_one(
id='atp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-2))
el_reaction.participants.append(model.species.get_one(
id='h2o[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-5))
el_reaction.participants.append(model.species.get_one(
id='ala_L[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-3))
el_reaction.participants.append(model.species.get_one(
id='ribosome[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=1))
el_reaction.participants.append(model.species.get_one(
id='ribo_binding_site_species[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=1))
el_reaction.participants.append(model.species.get_one(
id='{}[{}]'.format(i.protein.id, translation_compartment)).species_coefficients.get_or_create(coefficient=1))
el_reaction.participants.append(model.species.get_one(
id='amp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=2))
el_reaction.participants.append(model.species.get_one(
id='gdp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=3))
el_reaction.participants.append(model.species.get_one(
id='pi[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=7))
el_reaction.participants.append(model.species.get_one(
id='h[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=3))
if i.protein.id == 'prot1':
# Translocation
trans_reaction = model.reactions.get_or_create(id='translocation_prot1_c_to_n', submodel=translation_submodel)
trans_reaction.participants.append(model.species.get_one(id='prot1[c]').species_coefficients.get_or_create(
coefficient=-1))
trans_reaction.participants.append(model.species.get_one(id='gtp[n]').species_coefficients.get_or_create(coefficient=-1))
trans_reaction.participants.append(model.species.get_one(id='h2o[n]').species_coefficients.get_or_create(coefficient=-1))
trans_reaction.participants.append(model.species.get_one(id='prot1[n]').species_coefficients.get_or_create(coefficient=1))
trans_reaction.participants.append(model.species.get_one(id='gdp[n]').species_coefficients.get_or_create(coefficient=1))
trans_reaction.participants.append(model.species.get_one(id='pi[n]').species_coefficients.get_or_create(coefficient=1))
trans_reaction.participants.append(model.species.get_one(id='h[n]').species_coefficients.get_or_create(coefficient=1))
# Create protein degradation submodel
prot_deg_submodel = model.submodels.create(id='protein_degradation')
degradation_comp = model.compartments.get_one(id='l')
for protein_model in model.species_types.get(type=wc_ontology['WC:protein']):
for protein_sp in protein_model.species:
model_rxn = model.reactions.create(id='{}_{}_degradation'.format(protein_model.id, protein_sp.compartment.id), submodel=prot_deg_submodel)
model_rxn.participants.add(protein_sp.species_coefficients.get_or_create(coefficient=-1))
model_rxn.participants.add(model.species.get_one(id='ala_L[l]').species_coefficients.get_or_create(coefficient=3))
model_rxn.participants.add(model.species.get_one(id='met_L[l]').species_coefficients.get_or_create(coefficient=1))
model_rxn.participants.add(model.species.get_one(id='h2o[l]').species_coefficients.get_or_create(coefficient=-3))
gen = metabolism.MetabolismSubmodelGenerator(kb, model, options={
'recycled_metabolites': {'rec[m]': 100},
'carbohydrate_components': {'g6p[c]': 1.},
'lipid_components': {'chsterol[c]': 0.2, 'pail_hs[c]': 0.8},
'amino_acid_ids': ['ala_L', 'met_L'],
'media_fluxes': {'EX_ala_L_e_': (None, 20.)},
'exchange_reactions': ['EX_ala_L_e_', 'EX_met_L_e', 'EX_g6p_e'],
})
g6p_exc.submodel = gen.submodel
met_L_exc.submodel = gen.submodel
gen.clean_and_validate_options()
gen.gen_reactions()
gen.gen_rate_laws()
self.assertEqual(model.reactions.get_one(id='EX_ala_L_e__kb').submodel, gen.submodel)
self.assertEqual(model.reactions.get_one(id='EX_ala_L_e__kb').name, 'exchange ala_L')
self.assertEqual(model.reactions.get_one(id='EX_ala_L_e__kb').reversible, True)
self.assertEqual(model.reactions.get_one(id='EX_ala_L_e__kb').comments, 'random comments')
self.assertEqual(numpy.isnan(model.reactions.get_one(id='EX_ala_L_e__kb').flux_bounds.min), True)
self.assertEqual(model.reactions.get_one(id='EX_ala_L_e__kb').flux_bounds.max, 20.)
self.assertEqual(model.reactions.get_one(id='EX_ala_L_e__kb').flux_bounds.units, unit_registry.parse_units('M s^-1'))
self.assertEqual(model.reactions.get_one(id='EX_met_L_e_kb').flux_bounds.min, 0.)
self.assertEqual(model.reactions.get_one(id='EX_met_L_e_kb').flux_bounds.max, 0.)
self.assertEqual(model.reactions.get_one(id='EX_met_L_e_kb').reversible, False)
self.assertEqual(numpy.isnan(model.reactions.get_one(id='EX_g6p_e_kb').flux_bounds.min), True)
self.assertEqual(numpy.isnan(model.reactions.get_one(id='EX_g6p_e_kb').flux_bounds.max), True)
self.assertEqual(model.reactions.get_one(id='EX_g6p_e_kb').reversible, True)
self.assertEqual(gen.submodel.dfba_obj.expression.expression, 'biomass_reaction')
self.assertEqual(len(gen.submodel.dfba_obj.expression.dfba_obj_reactions), 1)
biomass_reaction = gen.submodel.dfba_obj.expression.dfba_obj_reactions[0]
self.assertEqual({i.species.id: i.value for i in biomass_reaction.dfba_obj_species},
{'pool[c]': -25, 'rec[m]': 100, 'datp[n]': -26, 'dttp[n]': -26, 'dctp[n]': -12, 'dgtp[n]': -12,
'g6p[c]': -10*scipy.constants.Avogadro, 'chsterol[c]': -2*scipy.constants.Avogadro, 'pail_hs[c]': -8*scipy.constants.Avogadro,
'atp[n]': -180, 'ctp[n]': -120, 'gtp[n]': -124, 'utp[n]': -120, 'ppi[n]': 492, 'amp[n]': 120, 'cmp[n]': 120, 'gmp[n]': 120,
'ump[n]': 120, 'h2o[n]': -244, 'h[n]': 244, 'adp[n]': 60, 'pi[n]': 64, 'gdp[n]': 4, 'datp[m]': -4800, 'dttp[m]': -4800, 'dctp[m]': -1500, 'dgtp[m]': -1500,
'atp[m]': -136, 'ctp[m]': -120, 'gtp[m]': -140, 'utp[m]': -120, 'ppi[m]': 12420, 'amp[m]': 172, 'cmp[m]': 160, 'gmp[m]': 160,
'ump[m]': 160, 'h2o[m]': -340, 'h[m]': 332, 'adp[m]': 4, 'pi[m]': 48, 'gdp[m]': 20, 'ala_L[m]': -12, 'met_L[m]': -4,
'atp[c]': -48, 'gtp[c]': -60, 'amp[c]': 76, 'cmp[c]': 40, 'gmp[c]': 40, 'ump[c]': 40, 'h2o[c]': -240 + (10*scipy.constants.Avogadro - 1), 'h[c]': 216,
'adp[c]': 12, 'pi[c]': 144, 'gdp[c]': 60, 'ala_L[c]': -36, 'met_L[c]': -12, 'h2o[l]': -24, 'ala_L[l]': 24, 'met_L[l]': 8,
'm1[c]': -20, 'm2[c]': -20})
for i in biomass_reaction.dfba_obj_species:
self.assertEqual(i.units, unit_registry.parse_units('molecule cell^-1'))
self.assertEqual(model.species_types.get_one(id='carbohydrate').structure.molecular_weight, 2000.*scipy.constants.Avogadro + 20.)
self.assertEqual(model.species_types.get_one(id='carbohydrate').structure.charge, 10*scipy.constants.Avogadro)
self.assertEqual(model.species.get_one(id='carbohydrate[c]').distribution_init_concentration.mean, 1.)
self.assertEqual(model.species.get_one(id='carbohydrate[c]').distribution_init_concentration.units, unit_registry.parse_units('molecule'))
self.assertEqual(model.reactions.get_one(id='carbohydrate_formation').submodel.id, 'macromolecular_formation')
self.assertEqual(model.reactions.get_one(id='carbohydrate_formation').submodel.framework, wc_ontology['WC:next_reaction_method'])
self.assertEqual({i.species.id:i.coefficient for i in model.reactions.get_one(id='carbohydrate_formation').participants},
{'g6p[c]': -10*scipy.constants.Avogadro, 'carbohydrate[c]': 1., 'h2o[c]': 10*scipy.constants.Avogadro-1})
self.assertEqual(model.species_types.get_one(id='lipid').structure.charge, -8*scipy.constants.Avogadro)
self.assertEqual(model.species.get_one(id='lipid[c]').distribution_init_concentration.mean, 1.)
self.assertEqual(model.species.get_one(id='lipid[c]').distribution_init_concentration.units, unit_registry.parse_units('molecule'))
self.assertEqual(model.reactions.get_one(id='lipid_formation').submodel.id, 'macromolecular_formation')
self.assertEqual({i.species.id:i.coefficient for i in model.reactions.get_one(id='lipid_formation').participants},
{'chsterol[c]': -2*scipy.constants.Avogadro, 'pail_hs[c]': -8*scipy.constants.Avogadro, 'lipid[c]': 1.})
numpy.testing.assert_allclose(model.species_types.get_one(id='lipid').structure.molecular_weight, (2*350.+8*500.)*scipy.constants.Avogadro, rtol=1e-8)
self.assertEqual(model.parameters.get_one(id='k_cat_carbohydrate_formation').value, 2e06)
self.assertEqual(model.parameters.get_one(id='k_cat_carbohydrate_formation').units, unit_registry.parse_units('s^-1'))
self.assertEqual(model.parameters.get_one(id='k_cat_carbohydrate_formation').comments,
'A high rate constant was assigned so that the simulated rate of macromolecular formation will be within the higher range')
self.assertEqual(model.parameters.get_one(id='k_cat_lipid_formation').value, 2e06)
self.assertEqual(model.parameters.get_one(id='K_m_carbohydrate_formation_g6p').value, 15/scipy.constants.Avogadro/0.5)
self.assertEqual(model.parameters.get_one(id='K_m_carbohydrate_formation_g6p').comments,
'The value was assumed to be 1.0 times the concentration of g6p in cytosol')
self.assertEqual(model.parameters.get_one(id='K_m_lipid_formation_chsterol').value, 1e-05)
self.assertEqual(model.parameters.get_one(id='K_m_lipid_formation_chsterol').comments,
'The value was assigned to 1e-05 because the concentration of chsterol in cytosol was zero')
self.assertEqual(model.species.get_one(id='chsterol[c]').distribution_init_concentration.mean, 0.)
self.assertEqual(model.species.get_one(id='chsterol[c]').distribution_init_concentration.units, unit_registry.parse_units('molecule'))
self.assertEqual(model.species.get_one(id='chsterol[c]').distribution_init_concentration.comments,
'Set to zero assuming there is no free pool concentration')
self.assertEqual(model.parameters.get_one(id='K_m_lipid_formation_pail_hs').value, 1e-05)
self.assertEqual(model.parameters.get_one(id='K_m_lipid_formation_pail_hs').comments,
'The value was assigned to 1e-05 because the concentration of pail_hs in cytosol was zero')
self.assertEqual(model.reactions.get_one(id='carbohydrate_formation').rate_laws[0].expression.expression,
'k_cat_carbohydrate_formation * (g6p[c] / (g6p[c] + K_m_carbohydrate_formation_g6p * Avogadro * volume_c)) * 2**1')
self.assertEqual(model.reactions.get_one(id='lipid_formation').rate_laws[0].expression.expression,
'k_cat_lipid_formation * '
'(chsterol[c] / (chsterol[c] + K_m_lipid_formation_chsterol * Avogadro * volume_c)) * '
'(pail_hs[c] / (pail_hs[c] + K_m_lipid_formation_pail_hs * Avogadro * volume_c)) * '
'2**2')
def test_input_atp_production(self):
# Create KB content
kb = wc_kb.KnowledgeBase()
cell = kb.cell = wc_kb.Cell()
cell.parameters.create(id='total_carbohydrate_mass', value=2000+20/scipy.constants.Avogadro)
cell.parameters.create(id='total_lipid_mass', value=4700)
cytoplasm = cell.compartments.create(id='c')
trans1 = wc_kb.eukaryote.TranscriptSpeciesType(cell=cell, id='trans1')
trans1_half_life = wc_kb.core.SpeciesTypeProperty(property='half-life', species_type=trans1,
value='10000.0', value_type=wc_ontology['WC:float'])
trans1_species = wc_kb.core.Species(species_type=trans1, compartment=cytoplasm)
prot1 = wc_kb.eukaryote.ProteinSpeciesType(cell=cell, id='prot1', transcript=trans1)
prot1_half_life = wc_kb.core.SpeciesTypeProperty(property='half-life', species_type=prot1,
value='20000.0', value_type=wc_ontology['WC:float'])
# Create initial model content
model = wc_lang.Model()
model.parameters.create(id='mean_doubling_time', value=20000)
model_species_type = model.species_types.create(id=trans1.id, type=wc_ontology['WC:RNA'])
model_compartment = model.compartments.get_or_create(id=trans1.species[0].compartment.id)
model_species = model.species.get_or_create(species_type=model_species_type, compartment=model_compartment)
model_species.id = model_species.gen_id()
conc_model = model.distribution_init_concentrations.create(species=model_species, mean=10.)
conc_model.id = conc_model.gen_id()
model_species_type = model.species_types.create(id=trans1.protein.id, type=wc_ontology['WC:protein'])
model_compartment = model.compartments.get_or_create(id=trans1.species[0].compartment.id)
model_species = model.species.get_or_create(species_type=model_species_type, compartment=model_compartment)
model_species.id = model_species.gen_id()
conc_model = model.distribution_init_concentrations.create(species=model_species, mean=2.)
conc_model.id = conc_model.gen_id()
metabolic_participants = ['atp', 'ctp', 'gtp', 'utp', 'datp', 'dttp', 'dgtp', 'dctp', 'ppi', 'amp', 'cmp', 'rec', 'pool',
'gmp', 'ump', 'h2o', 'h', 'adp', 'pi', 'gdp', 'ala_L', 'met_L', 'g6p', 'chsterol', 'pail_hs', 'selnp']
for i in metabolic_participants:
model_species_type = model.species_types.create(id=i, type=wc_ontology['WC:metabolite'])
model_compartment = model.compartments.get_or_create(id='c')
model_species = model.species.get_or_create(species_type=model_species_type, compartment=model_compartment)
model_species.id = model_species.gen_id()
conc_model = model.distribution_init_concentrations.create(species=model.species.get_one(id='pool[c]'), mean=25.)
conc_model.id = conc_model.gen_id()
structure_info = {'g6p': ('C6H13O9P', 220., 1), 'chsterol': ('C27H46O4S', 350., 0), 'pail_hs': ('C41H78O13P', 500., -1), 'h2o': ('H2O', 20., 0.)}
for k, v in structure_info.items():
model_species_type = model.species_types.get_one(id=k)
model_species_type.structure = wc_lang.ChemicalStructure()
model_species_type.structure.empirical_formula = v[0]
model_species_type.structure.molecular_weight = v[1]
model_species_type.structure.charge = v[2]
others = ['ribosome', 'ribo_binding_site_species', 'ribo_bound_species']
for i in others:
model_species_type = model.species_types.create(id=i, type=wc_ontology['WC:pseudo_species'])
model_compartment = model.compartments.get_or_create(id='c')
model_species = model.species.get_or_create(species_type=model_species_type, compartment=model_compartment)
model_species.id = model_species.gen_id()
# Create translation and translocation submodel
translation_submodel = model.submodels.create(id='translation_translocation')
translation_compartment = 'c'
# Initiation
init_reaction = model.reactions.create(id='translation_initiation_' + trans1.id, submodel=translation_submodel)
init_reaction.participants.append(model.species.get_one(
id='ribosome[{}]'.format(translation_compartment)).species_coefficients.get_or_create(
coefficient=-1))
init_reaction.participants.append(model.species.get_one(
id='ribo_binding_site_species[{}]'.format(translation_compartment)).species_coefficients.get_or_create(
coefficient=-1))
init_reaction.participants.append(model.species.get_one(
id='met_L[{}]'.format(translation_compartment)).species_coefficients.get_or_create(
coefficient=-1))
init_reaction.participants.append(model.species.get_one(
id='h2o[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-5))
init_reaction.participants.append(model.species.get_one(
id='atp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-2))
init_reaction.participants.append(model.species.get_one(
id='gtp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-2))
init_reaction.participants.append(model.species.get_one(
id='ribo_bound_species[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=1))
init_reaction.participants.append(model.species.get_one(
id='h[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=5))
init_reaction.participants.append(model.species.get_one(
id='amp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=1))
init_reaction.participants.append(model.species.get_one(
id='adp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=1))
init_reaction.participants.append(model.species.get_one(
id='gdp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=2))
init_reaction.participants.append(model.species.get_one(
id='pi[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=5))
# Elongation
el_reaction = model.reactions.get_or_create(id='translation_elongation_' + trans1.id, submodel=translation_submodel)
el_reaction.participants.append(model.species.get_one(
id='ribo_bound_species[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-1))
el_reaction.participants.append(model.species.get_one(
id='gtp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-3))
el_reaction.participants.append(model.species.get_one(
id='atp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-2))
el_reaction.participants.append(model.species.get_one(
id='h2o[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-5))
el_reaction.participants.append(model.species.get_one(
id='ala_L[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=-3))
el_reaction.participants.append(model.species.get_one(
id='ribosome[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=1))
el_reaction.participants.append(model.species.get_one(
id='ribo_binding_site_species[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=1))
el_reaction.participants.append(model.species.get_one(
id='{}[{}]'.format(prot1.id, translation_compartment)).species_coefficients.get_or_create(coefficient=1))
el_reaction.participants.append(model.species.get_one(
id='amp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=2))
el_reaction.participants.append(model.species.get_one(
id='gdp[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=3))
el_reaction.participants.append(model.species.get_one(
id='pi[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=7))
el_reaction.participants.append(model.species.get_one(
id='h[{}]'.format(translation_compartment)).species_coefficients.get_or_create(coefficient=3))
gen = metabolism.MetabolismSubmodelGenerator(kb, model, options={
'carbohydrate_components': {'g6p[c]': 1.},
'lipid_components': {'chsterol[c]': 0.2, 'pail_hs[c]': 0.8},
'amino_acid_ids': ['ala_L', 'met_L'],
'atp_production': 2000,
})
gen.clean_and_validate_options()
gen.gen_reactions()
biomass_reaction = gen.submodel.dfba_obj.expression.dfba_obj_reactions[0]
self.assertEqual([i.value for i in biomass_reaction.dfba_obj_species if i.species.id=='atp[c]'], [-2000])
def test_calibrate_submodel(self):
model = self.model
gen = self.gen
gen.clean_and_validate_options()
model.species.get_one(id='enzyme3[c]').distribution_init_concentration.mean = 0.01
model.parameters.get_one(id='k_cat_r1_forward_enzyme3').value = 100.
model.parameters.get_one(id='k_cat_r1_backward_enzyme3').value = 100.
model.parameters.get_one(id='k_cat_r2_forward_enzyme2').value = 200.
model.parameters.get_one(id='k_cat_r2_backward_enzyme1').value = 100.
model.parameters.get_one(id='k_cat_r2_backward_enzyme2').value = 100.
model.parameters.get_one(id='k_cat_r3_forward_enzyme2').value = 300.
r4 = model.reactions.get_one(id='r4')
r4_rate_law_expression, error = wc_lang.RateLawExpression.deserialize('k_cat_r4_forward_enzyme1 * enzyme1[c]', {
wc_lang.Parameter: {'k_cat_r4_forward_enzyme1': model.parameters.create(id='k_cat_r4_forward_enzyme1', value=600.)},
wc_lang.Species: {'enzyme1[c]': model.species.get_one(id='enzyme1[c]')},
})
assert error is None, str(error)
r4_model_rate_law = model.rate_laws.create(
expression=r4_rate_law_expression,
reaction=r4,
direction=wc_lang.RateLawDirection['forward'])
r4_model_rate_law.id = r4_model_rate_law.gen_id()
biomass_rxn = gen.submodel.dfba_obj_reactions.create(id='biomass_reaction', model=model)
biomass_rxn.dfba_obj_species.append(model.species.get_one(id='m3[c]').dfba_obj_species.get_or_create(value=-1))
gen.submodel.dfba_obj = wc_lang.DfbaObjective(model=model)
gen.submodel.dfba_obj.id = gen.submodel.dfba_obj.gen_id()
obj_expression = biomass_rxn.id
dfba_obj_expression, error = wc_lang.DfbaObjectiveExpression.deserialize(
obj_expression, {wc_lang.DfbaObjReaction: {biomass_rxn.id: biomass_rxn}})
assert error is None, str(error)
gen.submodel.dfba_obj.expression = dfba_obj_expression
Av = scipy.constants.Avogadro
model.reactions.get_one(id='ex_m1').flux_bounds = wc_lang.FluxBounds(min=100./Av, max=120./Av)
model.reactions.get_one(id='ex_m2').flux_bounds = wc_lang.FluxBounds(min=100./Av, max=120./Av)
model.reactions.get_one(id='ex_m3').flux_bounds = wc_lang.FluxBounds(min=0., max=0.)
gen.options['scale_factor'] = 1e2
gen.options['coef_scale_factor'] = 10
gen.calibrate_submodel()
self.assertEqual(model.parameters.get_one(id='k_cat_r3_forward_enzyme2').value, 500.)
self.assertEqual(model.parameters.get_one(id='k_cat_r3_forward_enzyme2').comments, 'Measured value adjusted to relax bound')
self.assertEqual(model.parameters.get_one(id='k_cat_r4_forward_enzyme1').value, 600.)
self.assertEqual(model.parameters.get_one(id='k_cat_r4_forward_enzyme1').comments, '')
def test_determine_bounds(self):
model = self.model
gen = self.gen
gen.clean_and_validate_options()
model.reactions.get_one(id='ex_m1').flux_bounds = wc_lang.FluxBounds(min=math.nan, max=math.nan)
model.reactions.get_one(id='ex_m2').flux_bounds = wc_lang.FluxBounds(min=-15., max=None)
model.reactions.get_one(id='ex_m3').flux_bounds = wc_lang.FluxBounds(min=0., max=0.)
gen.options['scale_factor'] = 10.
reaction_bounds, lower_bound_adjustable, upper_bound_adjustable = gen.determine_bounds()
self.assertEqual(reaction_bounds, {'ex_m1': (None, None), 'ex_m2': (-15.*10.*scipy.constants.Avogadro*0.1, None), 'ex_m3': (0., 0.),
'r1': (0., 0.), 'r2': (None, None), 'r3': (0., 20.), 'r4': (0., None)})
self.assertEqual(sorted(lower_bound_adjustable), [])
self.assertEqual(sorted(upper_bound_adjustable), ['r3'])
def test_relax_bounds(self):
model = self.model
gen = self.gen
gen.clean_and_validate_options()
biomass_rxn = gen.submodel.dfba_obj_reactions.create(id='biomass_reaction', model=model)
biomass_rxn.dfba_obj_species.append(model.species.get_one(id='m3[c]').dfba_obj_species.get_or_create(value=-1))
gen.submodel.dfba_obj = wc_lang.DfbaObjective(model=model)
gen.submodel.dfba_obj.id = gen.submodel.dfba_obj.gen_id()
obj_expression = biomass_rxn.id
dfba_obj_expression, error = wc_lang.DfbaObjectiveExpression.deserialize(
obj_expression, {wc_lang.DfbaObjReaction: {biomass_rxn.id: biomass_rxn}})
assert error is None, str(error)
gen.submodel.dfba_obj.expression = dfba_obj_expression
gen._reaction_bounds = {'ex_m1': (10, 10), 'ex_m2': (10, 10), 'ex_m3': (0, 0), 'r1': (0, 1), 'r2': (-2, 3), 'r3': (0, 2), 'r4': (0, None)}
lower_bound_adjustable = ['r2']
upper_bound_adjustable = ['r1', 'r2', 'r3']
target = {'biomass_reaction': 10}
alpha_lower, alpha_upper = gen.relax_bounds(target, lower_bound_adjustable, upper_bound_adjustable)
self.assertEqual(alpha_lower, {})
self.assertEqual(alpha_upper, {'r3': 1})
def test_flux_variability_analysis(self):
model = self.model
gen = self.gen
gen.clean_and_validate_options()
biomass_rxn = gen.submodel.dfba_obj_reactions.create(id='biomass_reaction', model=model)
biomass_rxn.dfba_obj_species.append(model.species.get_one(id='m3[c]').dfba_obj_species.get_or_create(value=-1))
bounds = {'ex_m1': (10, 10), 'ex_m2': (10, 10), 'ex_m3': (0, 0), 'r1': (0, 1), 'r2': (-2, 3), 'r3': (0, 3), 'r4': (0, None)}
submodel = gen.submodel
conv_model = conv_opt.Model(name='test_model')
conv_variables = {}
conv_metabolite_matrices = collections.defaultdict(list)
for reaction in submodel.reactions:
conv_variables[reaction.id] = conv_opt.Variable(
name=reaction.id, type=conv_opt.VariableType.continuous,
lower_bound=bounds[reaction.id][0],
upper_bound=bounds[reaction.id][1])
conv_model.variables.append(conv_variables[reaction.id])
for part in reaction.participants:
conv_metabolite_matrices[part.species.id].append(
conv_opt.LinearTerm(conv_variables[reaction.id],
part.coefficient))
biomass_reaction = submodel.dfba_obj_reactions[0]
conv_variables[biomass_reaction.id] = conv_opt.Variable(
name=biomass_reaction.id, type=conv_opt.VariableType.continuous, lower_bound=0)
conv_model.variables.append(conv_variables[biomass_reaction.id])
for part in biomass_reaction.dfba_obj_species:
conv_metabolite_matrices[part.species.id].append(
conv_opt.LinearTerm(conv_variables[biomass_reaction.id],
part.value))
for met_id, expression in conv_metabolite_matrices.items():
conv_model.constraints.append(conv_opt.Constraint(expression, name=met_id,
upper_bound=0.0, lower_bound=0.0))
conv_model.objective_terms = [conv_opt.LinearTerm(conv_variables['biomass_reaction'], 1.),]
flux_range = gen.flux_variability_analysis(conv_model)
self.assertEqual(flux_range, {'ex_m1': (10., 10.), 'ex_m2': (10., 10.), 'ex_m3': (0., 0.), 'r1': (1., 1.), 'r2': (3., 3.), 'r3': (3., 3.), 'r4': (6., 6.), 'biomass_reaction': (10., 10.)})
flux_range = gen.flux_variability_analysis(conv_model, fraction_of_objective=0.8, target_reactions=['r1', 'r2', 'biomass_reaction'])
self.assertEqual(flux_range, {'r1': (0., 1.), 'r2': (3., 3.), 'biomass_reaction': (8., 8.)})
flux_range = gen.flux_variability_analysis(conv_model, fraction_of_objective=0.8, fixed_values={'r2':3.}, target_reactions=['r1', 'r2', 'biomass_reaction'])
self.assertEqual(flux_range, {'r1': (0., 1.), 'r2': (3., 3.), 'biomass_reaction': (8., 8.)})
def test_impute_kinetic_constant(self):
model = self.model
gen = self.gen
gen.clean_and_validate_options()
bound_values = {'ex_m1': (10, 10), 'ex_m2': (10, 10), 'ex_m3': (0, 0), 'r1': (0, 1), 'r2': (-2, 3), 'r3': (0, 3), 'r4': (0, None)}
gen.options['kcat_adjustment_factor'] = 2.
gen.impute_kinetic_constant(bound_values)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r1-forward')], 1)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r1-backward')], 0)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r2-forward')], 3)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r2-backward')], 2)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r3-forward')], 3)
self.assertEqual(model.parameters.get_one(id='k_cat_r1_forward_enzyme3').value, 150.)
self.assertEqual(model.parameters.get_one(id='k_cat_r1_forward_enzyme3').comments,
'Value imputed as the median of measured k_cat values')
self.assertEqual(model.parameters.get_one(id='k_cat_r1_backward_enzyme3').value, 150.)
self.assertEqual(model.parameters.get_one(id='k_cat_r1_backward_enzyme3').comments,
'Value imputed as the median of measured k_cat values')
self.assertEqual(model.parameters.get_one(id='k_cat_r2_forward_enzyme1').value, 100.)
self.assertEqual(model.parameters.get_one(id='k_cat_r2_forward_enzyme1').comments, '')
self.assertEqual(model.parameters.get_one(id='k_cat_r2_forward_enzyme2').value, 200.*2.)
self.assertEqual(model.parameters.get_one(id='k_cat_r2_forward_enzyme2').comments,
'Value imputed based on FVA bound value and adjusted with a factor of 2.0')
self.assertEqual(model.parameters.get_one(id='k_cat_r2_backward_enzyme1').value, 100.*2.)
self.assertEqual(model.parameters.get_one(id='k_cat_r2_backward_enzyme1').comments,
'Value imputed based on FVA bound value and adjusted with a factor of 2.0')
self.assertEqual(model.parameters.get_one(id='k_cat_r2_backward_enzyme2').value, 100.*2.)
self.assertEqual(model.parameters.get_one(id='k_cat_r2_backward_enzyme2').comments,
'Value imputed based on FVA bound value and adjusted with a factor of 2.0')
self.assertEqual(model.parameters.get_one(id='k_cat_r3_forward_enzyme2').value, 300.)
self.assertEqual(model.parameters.get_one(id='k_cat_r3_forward_enzyme2').comments,
'Measured value adjusted to relax bound')
bound_values = {'ex_m1': (10, 10), 'ex_m2': (10, 10), 'ex_m3': (0, 0), 'r1': (1, 1), 'r2': (-2, -2), 'r3': (1, 3), 'r4': (0, None)}
gen.options['kcat_adjustment_factor'] = 1.
gen.impute_kinetic_constant(bound_values)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r1-forward')], 1)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r1-backward')], 0)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r2-forward')], 0)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r2-backward')], 2)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r3-forward')], 3)
bound_values = {'ex_m1': (10, 10), 'ex_m2': (10, 10), 'ex_m3': (0, 0), 'r1': (0, 0), 'r2': (-4, -2), 'r3': (3, 3), 'r4': (0, None)}
gen.impute_kinetic_constant(bound_values)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r1-forward')], 0)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r1-backward')], 0)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r2-forward')], 0)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r2-backward')], 4)
self.assertEqual(gen._law_bound_pairs[model.rate_laws.get_one(id='r3-forward')], 3)
| 68.73817 | 195 | 0.672464 | 8,218 | 65,370 | 5.068508 | 0.054758 | 0.067126 | 0.041486 | 0.056178 | 0.856217 | 0.828896 | 0.801311 | 0.766667 | 0.729431 | 0.695628 | 0 | 0.024895 | 0.196864 | 65,370 | 951 | 196 | 68.73817 | 0.768481 | 0.009484 | 0 | 0.507995 | 0 | 0.00123 | 0.120373 | 0.042596 | 0 | 0 | 0 | 0 | 0.118081 | 1 | 0.01107 | false | 0 | 0.01722 | 0 | 0.02952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
370de56d50636f002ae6f6a7d6ff71df499ff6c5 | 38 | py | Python | simoni/models/__init__.py | yamad07/simoni | 7c0f18667ced093a86e5e5d875b734c4e018015e | [
"MIT"
] | null | null | null | simoni/models/__init__.py | yamad07/simoni | 7c0f18667ced093a86e5e5d875b734c4e018015e | [
"MIT"
] | null | null | null | simoni/models/__init__.py | yamad07/simoni | 7c0f18667ced093a86e5e5d875b734c4e018015e | [
"MIT"
] | null | null | null | from simoni.models.model import Model
| 19 | 37 | 0.842105 | 6 | 38 | 5.333333 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 38 | 1 | 38 | 38 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2ebaa07c96c42f293eb37fbdecbf25fa08067eb8 | 186 | py | Python | backend/user/admin.py | Tim6FTN/UKS | 3cf19f014cdc7845bf0b808b97c4e05dc49b062e | [
"MIT"
] | 1 | 2021-01-10T12:34:59.000Z | 2021-01-10T12:34:59.000Z | backend/user/admin.py | Tim6FTN/UKS | 3cf19f014cdc7845bf0b808b97c4e05dc49b062e | [
"MIT"
] | 37 | 2021-01-07T22:31:25.000Z | 2021-02-20T10:59:46.000Z | backend/user/admin.py | Tim6FTN/UKS | 3cf19f014cdc7845bf0b808b97c4e05dc49b062e | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from user.models import GithubProfile
@admin.register(GithubProfile)
class GithubProfileAdmin(admin.ModelAdmin):
pass | 20.666667 | 43 | 0.811828 | 22 | 186 | 6.863636 | 0.681818 | 0.172185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.123656 | 186 | 9 | 44 | 20.666667 | 0.92638 | 0.139785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.2 | 0.4 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
2ee3a7d8e8da8f7763d9d16d4e47758012cc4ebd | 134 | py | Python | cannlytics/lims/__init__.py | capdragon/cannlytics | 47eeda80b1faf54d709def3641d9476501508fec | [
"MIT"
] | 1 | 2021-06-07T13:53:06.000Z | 2021-06-07T13:53:06.000Z | cannlytics/lims/__init__.py | keeganskeate/cannlytics | d84d069d074db3d6c89d0ec87df934316b5d5f83 | [
"MIT"
] | null | null | null | cannlytics/lims/__init__.py | keeganskeate/cannlytics | d84d069d074db3d6c89d0ec87df934316b5d5f83 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 18:38:45 2021
@author: keega
"""
def initialize_lims():
"""Initialize LIMS.""" | 14.888889 | 35 | 0.589552 | 19 | 134 | 4.105263 | 0.894737 | 0.358974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 0.19403 | 134 | 9 | 36 | 14.888889 | 0.611111 | 0.679104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | true | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
2c2cb021a650fa2e6c1e2edeb152878ac84e7ed5 | 259 | py | Python | slackbot_job/config_example.py | hichemck/dockerized_data_pipeline | b7b58f95f5dc13d6d3e874882a0ead26d52f5eb0 | [
"MIT"
] | 1 | 2020-09-29T18:37:07.000Z | 2020-09-29T18:37:07.000Z | slackbot_job/config_example.py | hichemck/dockerized_data_pipeline | b7b58f95f5dc13d6d3e874882a0ead26d52f5eb0 | [
"MIT"
] | null | null | null | slackbot_job/config_example.py | hichemck/dockerized_data_pipeline | b7b58f95f5dc13d6d3e874882a0ead26d52f5eb0 | [
"MIT"
] | null | null | null | connection_string = "postgresql+psycopg2://username:password@hostname:port/databaser"
# hostname is the name of the service in docker compose
# //username:password@hostname:port/database
WEBHOOK = "get it from slack after you create a slackbot app" | 43.166667 | 85 | 0.76834 | 35 | 259 | 5.657143 | 0.828571 | 0.161616 | 0.242424 | 0.282828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004525 | 0.146718 | 259 | 6 | 86 | 43.166667 | 0.891403 | 0.405405 | 0 | 0 | 0 | 0 | 0.736842 | 0.414474 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
258f7fbdc8aae95e8394d50848e58222a14d05f8 | 40 | py | Python | intent/tagging/__init__.py | rgeorgi/intent | 9920798c126f6d354029f7bb0a345e7cdb649f3a | [
"MIT"
] | 3 | 2016-08-05T01:11:57.000Z | 2017-08-26T15:35:51.000Z | intent/tagging/__init__.py | rgeorgi/intent | 9920798c126f6d354029f7bb0a345e7cdb649f3a | [
"MIT"
] | 2 | 2016-03-01T22:41:24.000Z | 2016-09-14T18:39:25.000Z | intent/tagging/__init__.py | rgeorgi/intent | 9920798c126f6d354029f7bb0a345e7cdb649f3a | [
"MIT"
] | null | null | null | from nltk.tag.stanford import POSTagger | 40 | 40 | 0.85 | 6 | 40 | 5.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 40 | 1 | 40 | 40 | 0.944444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
25a9a46a885eb29093378c2ef87fac28a5e6ed7e | 2,677 | py | Python | tests/test_create_conversation.py | 9ae/layer-python | 14588bfa8bde3babd4186213edfd8d19b3e797b1 | [
"Apache-2.0"
] | null | null | null | tests/test_create_conversation.py | 9ae/layer-python | 14588bfa8bde3babd4186213edfd8d19b3e797b1 | [
"Apache-2.0"
] | null | null | null | tests/test_create_conversation.py | 9ae/layer-python | 14588bfa8bde3babd4186213edfd8d19b3e797b1 | [
"Apache-2.0"
] | null | null | null | import json
from test_utils import MockRequestResponse, TestPlatformClient
class TestCreateConverstaion(TestPlatformClient):
def test_create_conversation(self, layerclient, monkeypatch):
def verify_request_args(method, url, headers, data, params):
assert method == 'POST'
assert url == (
'https://api.layer.com/apps/TEST_APP_UUID/conversations'
)
assert headers == {
'Accept': 'application/vnd.layer+json; version=1.0',
'Authorization': 'Bearer TEST_BEARER_TOKEN',
'Content-Type': 'application/json',
}
json_data = json.loads(data)
assert json_data == {
'participants': 'TEST_CONVERSATION_UUID',
'metadata': None,
'distinct': True,
}
return MockRequestResponse(
True,
{
'id': 'layer:///conversation/TEST_CONVERSATION_UUID',
'url': 'layer:///conversation/TEST_CONVERSATION_UUID',
},
)
monkeypatch.setattr('requests.request', verify_request_args)
layerclient.create_conversation('TEST_CONVERSATION_UUID')
def test_create_conversation_with_options(self, layerclient, monkeypatch):
def verify_request_args(method, url, headers, data, params):
assert method == 'POST'
assert url == (
'https://api.layer.com/apps/TEST_APP_UUID/conversations'
)
assert headers == {
'Accept': 'application/vnd.layer+json; version=1.0',
'Authorization': 'Bearer TEST_BEARER_TOKEN',
'Content-Type': 'application/json',
}
json_data = json.loads(data)
assert json_data == {
'participants': 'TEST_CONVERSATION_UUID',
'metadata': {
'Topic': 'A coffee conversation',
'Background': '#C0FFEE',
},
'distinct': False,
}
return MockRequestResponse(
True,
{
'id': 'layer:///conversation/TEST_CONVERSATION_UUID',
'url': 'layer:///conversation/TEST_CONVERSATION_UUID',
},
)
monkeypatch.setattr('requests.request', verify_request_args)
layerclient.create_conversation(
'TEST_CONVERSATION_UUID',
False,
{
'Topic': 'A coffee conversation',
'Background': '#C0FFEE',
},
)
| 36.175676 | 78 | 0.5226 | 211 | 2,677 | 6.417062 | 0.293839 | 0.094535 | 0.118168 | 0.141802 | 0.847858 | 0.847858 | 0.788774 | 0.788774 | 0.788774 | 0.788774 | 0 | 0.003571 | 0.372432 | 2,677 | 73 | 79 | 36.671233 | 0.802381 | 0 | 0 | 0.553846 | 0 | 0 | 0.292865 | 0.11879 | 0 | 0 | 0 | 0 | 0.123077 | 1 | 0.061538 | false | 0 | 0.030769 | 0 | 0.138462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
25c34bfc18a5786fe0c93a8982c297022ebde1cd | 1,349 | py | Python | BookClub/views/__init__.py | amir-rahim/BookClubSocialNetwork | b69a07cd33592f700214252a64c7c1c53845625d | [
"MIT"
] | 4 | 2022-02-04T02:11:48.000Z | 2022-03-12T21:38:01.000Z | BookClub/views/__init__.py | amir-rahim/BookClubSocialNetwork | b69a07cd33592f700214252a64c7c1c53845625d | [
"MIT"
] | 51 | 2022-02-01T18:56:23.000Z | 2022-03-31T15:35:37.000Z | BookClub/views/__init__.py | amir-rahim/BookClubSocialNetwork | b69a07cd33592f700214252a64c7c1c53845625d | [
"MIT"
] | null | null | null | from .static import *
from .action_views import *
from BookClub.views.users.authentication import *
from BookClub.views.clubs.clubs import *
from BookClub.views.users.authentication import *
from BookClub.views.clubs.lists import *
from BookClub.views.users.user import *
from BookClub.views.clubs.membership import *
from BookClub.views.meetings.edit_meeting import *
from BookClub.views.library.library import *
from BookClub.views.library.book import *
from BookClub.views.library.booklists import *
from BookClub.views.library.saved_booklists import *
from BookClub.views.reviews.review_lists import *
from BookClub.views.meetings.meetings import *
from BookClub.views.library.reviews import *
from BookClub.views.forum.forum import *
from BookClub.views.async_views.vote import *
from BookClub.views.async_views.async_search import *
from BookClub.views.meetings.agenda import *
from BookClub.views.relationship_views.u2u_views import *
from BookClub.views.relationship_views.relationship_list import *
from BookClub.views.bookshelf.bookshelf import *
from BookClub.views.bookshelf.add_to_bookshelf import *
from BookClub.views.bookshelf.update_in_bookshelf import *
from BookClub.views.bookshelf.remove_from_bookshelf import *
from BookClub.views.recommendation_views.recommendation_lists import *
from BookClub.views.users.user_list import * | 46.517241 | 70 | 0.836916 | 182 | 1,349 | 6.093407 | 0.197802 | 0.243463 | 0.422002 | 0.539225 | 0.788097 | 0.435528 | 0.192967 | 0.12624 | 0.12624 | 0.12624 | 0 | 0.000808 | 0.083024 | 1,349 | 29 | 71 | 46.517241 | 0.895715 | 0 | 0 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
25d6301c41d466ca4e86adc3af8ff43d2b5395d0 | 92 | py | Python | terry/bin/utils/__init__.py | onixuniverse/terry-bot | 2dfadb1749c10cdc59e81463ae347d367e3fd189 | [
"Apache-2.0"
] | 5 | 2021-01-20T22:10:42.000Z | 2021-07-29T15:36:17.000Z | terry/bin/utils/__init__.py | onixuniverse/terry-bot | 2dfadb1749c10cdc59e81463ae347d367e3fd189 | [
"Apache-2.0"
] | null | null | null | terry/bin/utils/__init__.py | onixuniverse/terry-bot | 2dfadb1749c10cdc59e81463ae347d367e3fd189 | [
"Apache-2.0"
] | null | null | null | from .channels import *
from .dates import *
from .roles import *
from .timetables import *
| 18.4 | 25 | 0.73913 | 12 | 92 | 5.666667 | 0.5 | 0.441176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 92 | 4 | 26 | 23 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
25f4b8bb52aa5321f462ed7dd28f1f1c3a0e6814 | 119 | py | Python | gfapy/line/comment/version_conversion.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 44 | 2017-03-18T08:08:04.000Z | 2021-11-10T16:11:15.000Z | gfapy/line/comment/version_conversion.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 22 | 2017-04-04T21:20:31.000Z | 2022-03-09T19:05:30.000Z | gfapy/line/comment/version_conversion.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 5 | 2017-07-07T02:56:56.000Z | 2020-09-30T20:10:49.000Z | class VersionConversion:
def _to_gfa1_a(self): return self.to_list()
def _to_gfa2_a(self): return self.to_list()
| 19.833333 | 45 | 0.756303 | 20 | 119 | 4.1 | 0.5 | 0.121951 | 0.268293 | 0.365854 | 0.512195 | 0.512195 | 0 | 0 | 0 | 0 | 0 | 0.019417 | 0.134454 | 119 | 5 | 46 | 23.8 | 0.776699 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.666667 | false | 0 | 0 | 0.666667 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 6 |
d319ef31510304d3ff54b4096f6a6193d90e05da | 122 | py | Python | main.py | kuhy/pyAPDUFuzzer | 30979ffc868fcd3dcdf307d8d92d634d6b19da12 | [
"MIT"
] | null | null | null | main.py | kuhy/pyAPDUFuzzer | 30979ffc868fcd3dcdf307d8d92d634d6b19da12 | [
"MIT"
] | 2 | 2019-11-04T17:52:32.000Z | 2019-11-04T17:54:20.000Z | main.py | kuhy/pyAPDUFuzzer | 30979ffc868fcd3dcdf307d8d92d634d6b19da12 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import apdu_fuzzer
import apdu_fuzzer.main
if __name__ == "__main__":
apdu_fuzzer.main.main()
| 17.428571 | 27 | 0.745902 | 18 | 122 | 4.444444 | 0.555556 | 0.375 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009434 | 0.131148 | 122 | 6 | 28 | 20.333333 | 0.745283 | 0.172131 | 0 | 0 | 0 | 0 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
d339e268db8313e4d3e47186007382c9b4e6347b | 89 | py | Python | vnpy/app/portfolio_manager/__init__.py | ChaunceyDong/vnpy | 1c1b683ffc1c842bb7661e8194eca61af30cf586 | [
"MIT"
] | 5 | 2020-05-19T07:32:39.000Z | 2022-03-14T09:09:48.000Z | vnpy/app/portfolio_manager/__init__.py | ChaunceyDong/vnpy | 1c1b683ffc1c842bb7661e8194eca61af30cf586 | [
"MIT"
] | 1 | 2021-08-12T01:20:39.000Z | 2021-08-12T01:20:39.000Z | vnpy/app/portfolio_manager/__init__.py | ChaunceyDong/vnpy | 1c1b683ffc1c842bb7661e8194eca61af30cf586 | [
"MIT"
] | 3 | 2020-04-02T08:30:17.000Z | 2020-05-03T12:12:05.000Z | import sys
import vnpy_portfoliomanager
sys.modules[__name__] = vnpy_portfoliomanager
| 12.714286 | 45 | 0.842697 | 10 | 89 | 6.9 | 0.6 | 0.57971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.11236 | 89 | 6 | 46 | 14.833333 | 0.873418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d34633e7d019ce3d81e4452dbdd9d77adbeb8955 | 6,139 | py | Python | flog/tests/test_log_call.py | mdhalse/flog | b8fae638cd23a935c71dc3670c30638aade5fca5 | [
"MIT"
] | null | null | null | flog/tests/test_log_call.py | mdhalse/flog | b8fae638cd23a935c71dc3670c30638aade5fca5 | [
"MIT"
] | null | null | null | flog/tests/test_log_call.py | mdhalse/flog | b8fae638cd23a935c71dc3670c30638aade5fca5 | [
"MIT"
] | 2 | 2018-03-05T17:15:05.000Z | 2021-03-13T03:29:00.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import unittest
from unittest.mock import MagicMock, NonCallableMock, patch
from flog import flog # SUT
def my_fun(*args, **kwargs):
return sum(args)
@patch("flog.flog.logging")
class TestLogCall(unittest.TestCase):
def setUp(self):
self.logger = NonCallableMock()
# self.logger.debug = MagicMock()
def instance_method(self, *args, **kwargs):
return sum(args)
def test_log_call_logs(self, logging):
"""flog.flog.log_call: Logs a basic call without args or kwargs"""
flog.log_call(self.logger)(my_fun)() # SUT
self.logger.debug.assert_any_call("flog.tests.test_log_call.my_fun: args: (), kwargs: {}")
self.logger.debug.assert_any_call("flog.tests.test_log_call.my_fun: returns: 0")
def test_log_call_logs_with_args(self, logging):
"""flog.flog.log_call: Logs a basic call with args"""
randoa = random.randint(0, 500)
randob = random.randint(0, 500)
randoc = random.randint(0, 500)
flog.log_call(self.logger)(my_fun)(randoa, randob, randoc) # SUT
self.logger.debug.assert_any_call(
"flog.tests.test_log_call.my_fun: args: ({}, {}, {}), kwargs: {}".format(randoa, randob, randoc, "{}")
)
self.logger.debug.assert_any_call(
"flog.tests.test_log_call.my_fun: returns: {}".format(randoa + randob + randoc)
)
def test_log_call_logs_with_kwargs(self, logging):
"""flog.flog.log_call: Logs a basic call with args and kwargs"""
randoa = random.randint(0, 500)
randob = random.randint(0, 500)
randoc = random.randint(0, 500)
flog.log_call(self.logger)(my_fun)(randoa, randob, randoc, random_frippery_scale=32) # SUT
self.logger.debug.assert_any_call(
"flog.tests.test_log_call.my_fun: args: ({}, {}, {}), kwargs: {rfs}".format(
randoa, randob, randoc, rfs="{'random_frippery_scale': 32}"
)
)
self.logger.debug.assert_any_call(
"flog.tests.test_log_call.my_fun: returns: {}".format(randoa + randob + randoc)
)
def test_log_call_logs_instance_method_with_kwargs(self, logging):
"""flog.flog.log_call: Logs a instance method call with args and kwargs"""
randoa = random.randint(0, 500)
randob = random.randint(0, 500)
randoc = random.randint(0, 500)
flog.log_call(self.logger)(self.instance_method)(randoa, randob, randoc, random_frippery_scale=32) # SUT
self.logger.debug.assert_any_call(
"flog.tests.test_log_call.TestLogCall.instance_method: args: ({}, {}, {}), kwargs: {rfs}".format(
randoa, randob, randoc, rfs="{'random_frippery_scale': 32}"
)
)
self.logger.debug.assert_any_call(
"flog.tests.test_log_call.TestLogCall.instance_method: returns: {}".format(randoa + randob + randoc)
)
def test_log_call_accepts_callable(self, logging):
"""flog.flog.log_call: Accepts a callable and calls it as if it were a logger function"""
my_logger = MagicMock()
randoa = random.randint(0, 500)
randob = random.randint(0, 500)
randoc = random.randint(0, 500)
flog.log_call(my_logger)(self.instance_method)(randoa, randob, randoc, random_frippery_scale=32) # SUT
my_logger.assert_any_call(
"flog.tests.test_log_call.TestLogCall.instance_method: args: ({}, {}, {}), kwargs: {rfs}".format(
randoa, randob, randoc, rfs="{'random_frippery_scale': 32}"
)
)
my_logger.assert_any_call(
"flog.tests.test_log_call.TestLogCall.instance_method: returns: {}".format(randoa + randob + randoc)
)
@patch("flog.flog.logging")
class TestLogSensitiveCall(unittest.TestCase):
def setUp(self):
self.logger = NonCallableMock()
def test_log_call_logs(self, logging):
"""flog.flog.log_sensitive_call: Discreetly logs a basic call without args or kwargs"""
flog.log_sensitive_call(self.logger)(my_fun)() # SUT
self.logger.debug.assert_any_call("flog.tests.test_log_call.my_fun: args: *XXXXXX, kwargs: **XXXXXXX")
self.logger.debug.assert_any_call("flog.tests.test_log_call.my_fun: returns: XXXXXXXXXX")
def test_log_call_logs_with_args(self, logging):
"""flog.flog.log_sensitive_call: Discreetly logs a basic call with args"""
randoa = random.randint(0, 500)
randob = random.randint(0, 500)
randoc = random.randint(0, 500)
flog.log_sensitive_call(self.logger)(my_fun)(randoa, randob, randoc) # SUT
self.logger.debug.assert_any_call("flog.tests.test_log_call.my_fun: args: *XXXXXX, kwargs: **XXXXXXX")
self.logger.debug.assert_any_call("flog.tests.test_log_call.my_fun: returns: XXXXXXXXXX")
def test_log_call_logs_with_kwargs(self, logging):
"""flog.flog.log_sensitive_call: Discreetly logs a basic call with args and kwargs"""
randoa = random.randint(0, 500)
randob = random.randint(0, 500)
randoc = random.randint(0, 500)
flog.log_sensitive_call(self.logger)(my_fun)(randoa, randob, randoc, random_frippery_scale=32) # SUT
self.logger.debug.assert_any_call("flog.tests.test_log_call.my_fun: args: *XXXXXX, kwargs: **XXXXXXX")
self.logger.debug.assert_any_call("flog.tests.test_log_call.my_fun: returns: XXXXXXXXXX")
def test_log_call_accepts_callable(self, logging):
"""flog.flog.log_sensitive_call: Accepts a callable and calls it as if it were a logger function"""
my_logger = MagicMock()
randoa = random.randint(0, 500)
randob = random.randint(0, 500)
randoc = random.randint(0, 500)
# SUT
flog.log_sensitive_call(my_logger)(my_fun)(randoa, randob, randoc, random_frippery_scale=32)
my_logger.assert_any_call("flog.tests.test_log_call.my_fun: args: *XXXXXX, kwargs: **XXXXXXX")
my_logger.assert_any_call("flog.tests.test_log_call.my_fun: returns: XXXXXXXXXX")
| 39.606452 | 114 | 0.659879 | 825 | 6,139 | 4.677576 | 0.088485 | 0.067116 | 0.076963 | 0.092511 | 0.925628 | 0.900751 | 0.900751 | 0.898419 | 0.868619 | 0.868619 | 0 | 0.021087 | 0.212087 | 6,139 | 154 | 115 | 39.863636 | 0.776721 | 0.123799 | 0 | 0.639175 | 0 | 0 | 0.227025 | 0.138132 | 0 | 0 | 0 | 0 | 0.185567 | 1 | 0.134021 | false | 0 | 0.041237 | 0.020619 | 0.216495 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d3861b17a04f0b6e49f34050f9cb947ed93e610c | 5,395 | py | Python | wandapi/tests/root/test_root.py | bri365/wanda | 22c953fb308114e0f8d9ef6cf1e1e5ad0d35e398 | [
"MIT"
] | null | null | null | wandapi/tests/root/test_root.py | bri365/wanda | 22c953fb308114e0f8d9ef6cf1e1e5ad0d35e398 | [
"MIT"
] | null | null | null | wandapi/tests/root/test_root.py | bri365/wanda | 22c953fb308114e0f8d9ef6cf1e1e5ad0d35e398 | [
"MIT"
] | null | null | null |
from wandapi.tests import utils
import falcon
from testfixtures import log_capture
class TestRootController(utils.FunctionalTest):
"""Root resource tests."""
layer = utils.TopLayer
# noinspection PyUnusedLocal
@log_capture()
def test_options(self, log):
response = self.simulate_options('/api/versions', headers={'Accept': 'application/json'})
self.assertEqual(response.status, falcon.HTTP_OK)
# noinspection PyUnusedLocal
@log_capture()
def test_accept_json(self, log):
response = self.simulate_get('/api/versions', headers={'Accept': 'application/json'})
self.assertEqual(response.status, falcon.HTTP_OK)
# noinspection PyUnusedLocal
@log_capture()
def test_star_request_accept(self, log):
response = self.simulate_get('/api/versions', headers={'Accept': '*/*'})
self.assertEqual(response.status, falcon.HTTP_OK)
@log_capture()
def test_empty_request_body(self, log):
response = self.simulate_post('/api/bogus',
headers={'Accept': 'application/json',
'Content-Type': 'application/json',
'Content-Length': '8'})
self.assertEqual(response.status, falcon.HTTP_BAD_REQUEST)
self.assertEqual(response.json['result'], 'Empty request body - valid JSON required')
self.assertIn('time', response.json)
self.assertIn('POST:/api/bogus:400 Bad Request:Empty request body - valid JSON required',
str(log))
@log_capture()
def test_method_not_allowed(self, log):
response = self.simulate_delete('/api/versions', headers={'Accept': 'application/json'})
self.assertEqual(response.status, falcon.HTTP_METHOD_NOT_ALLOWED)
self.assertEqual(response.json['result'], "['GET', 'OPTIONS']")
self.assertIn('DELETE:/versions:405 Method Not Allowed', str(log))
@log_capture()
def test_non_json_request_accept(self, log):
response = self.simulate_get('/api/versions', headers={'Accept': 'text/html'})
self.assertEqual(response.status, falcon.HTTP_NOT_ACCEPTABLE)
self.assertEqual(response.json['result'],
'WandAPI only supports JSON encoded responses')
self.assertIn('GET:/api/versions:406 Not Acceptable', str(log))
@log_capture()
def test_non_json_request_content(self, log):
response = self.simulate_post('/api/bogus', body='non json content',
headers={'Accept': 'application/json'})
self.assertEqual(response.status, falcon.HTTP_UNSUPPORTED_MEDIA_TYPE)
self.assertEqual(response.json['result'],
'WandAPI only supports JSON encoded requests')
self.assertIn('POST:/api/bogus:415 Unsupported Media Type', str(log))
@log_capture()
def test_non_json_request_content_bad_content_type(self, log):
response = self.simulate_post('/api/bogus', body='non json content',
headers={'Accept': 'application/json',
'Content-Type': 'application/warez'})
self.assertEqual(response.status, falcon.HTTP_UNSUPPORTED_MEDIA_TYPE)
self.assertEqual(response.json['result'],
'WandAPI only supports JSON encoded requests')
self.assertIn('POST:/api/bogus:415 Unsupported Media Type', str(log))
@log_capture()
def test_post_request_empty_body(self, log):
response = self.simulate_post('/api/bogus', body='',
headers={'Accept': 'application/json',
'Content-Type': 'application/json'})
self.assertEqual(response.status, falcon.HTTP_BAD_REQUEST)
self.assertEqual(response.json['result'], 'Empty request body - valid JSON required')
self.assertIn('time', response.json)
self.assertIn('POST:/api/bogus:400 Bad Request:Empty request body - valid JSON required',
str(log))
@log_capture()
def test_put_request_empty_body(self, log):
response = self.simulate_post('/api/bogus', body='',
headers={'Accept': 'application/json',
'Content-Type': 'application/json'})
self.assertEqual(response.status, falcon.HTTP_BAD_REQUEST)
self.assertEqual(response.json['result'], 'Empty request body - valid JSON required')
self.assertIn('time', response.json)
self.assertIn('POST:/api/bogus:400 Bad Request:Empty request body - valid JSON required',
str(log))
@log_capture()
def test_non_json_request_data(self, log):
response = self.simulate_post('/api/bogus', body='non json',
headers={'Accept': 'application/json',
'Content-Type': 'application/json'})
self.assertEqual(response.status, falcon.HTTP_BAD_REQUEST)
self.assertEqual(
response.json['result'],
'Could not decode request body - JSON was incorrect or not encoded as UTF-8')
self.assertIn('mS', response.json['time'])
self.assertIn('POST:/api/bogus:400 Bad Request', str(log))
| 49.495413 | 97 | 0.61279 | 578 | 5,395 | 5.574394 | 0.141869 | 0.088454 | 0.13563 | 0.058038 | 0.842024 | 0.815022 | 0.789882 | 0.770329 | 0.742086 | 0.715084 | 0 | 0.006556 | 0.264875 | 5,395 | 108 | 98 | 49.953704 | 0.80585 | 0.018906 | 0 | 0.573034 | 0 | 0 | 0.256813 | 0.003974 | 0 | 0 | 0 | 0 | 0.348315 | 1 | 0.123596 | false | 0 | 0.033708 | 0 | 0.179775 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d38a990ebf666d1995a29a8f26b8d16ee2404861 | 1,622 | py | Python | wordbank.py | Yoonsen/Modules | 205a344c73e597d8bd1c004c69e054b549b9a67f | [
"MIT"
] | 2 | 2019-10-15T06:57:56.000Z | 2021-02-10T09:28:18.000Z | wordbank.py | Yoonsen/Modules | 205a344c73e597d8bd1c004c69e054b549b9a67f | [
"MIT"
] | null | null | null | wordbank.py | Yoonsen/Modules | 205a344c73e597d8bd1c004c69e054b549b9a67f | [
"MIT"
] | 1 | 2019-10-14T12:29:12.000Z | 2019-10-14T12:29:12.000Z | import requests
# Norwegian word bank
def word_variant(word, form, lang = 'nob'):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/variant_form", params={'word':word, 'form':form, 'lang':lang})
return r.json()
def word_paradigm(word, lang = 'nob'):
""" Find paradigm form for a word """
r = requests.get("https://api.nb.no/ngram/paradigm", params = {'word': word, 'lang':lang})
return r.json()
def word_paradigm_many(wordlist, lang = 'nob'):
""" Find alternative form for a list words """
r = requests.post("https://api.nb.no/ngram/paradigms", json = {'words': wordlist, 'lang':lang})
return r.json()
def word_form(word, lang = 'nob'):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/word_form", params = {'word': word, 'lang':lang})
return r.json()
def word_form_many(wordlist, lang = 'nob'):
""" Find alternative forms for a list of words """
r = requests.post("https://api.nb.no/ngram/word_forms", json = {'words': wordlist, 'lang':lang})
return r.json()
def word_lemma(word, lang = 'nob'):
""" Find lemma form for a given word form """
r = requests.get("https://api.nb.no/ngram/word_lemma", params = {'word': word, 'lang':lang})
return r.json()
def word_lemma_many(wordlist, lang = 'nob'):
""" Find lemma form for a given word form """
r = requests.post("https://api.nb.no/ngram/word_lemmas", json = {'words': wordlist, 'lang':lang})
return r.json() | 41.589744 | 108 | 0.639951 | 244 | 1,622 | 4.184426 | 0.168033 | 0.070519 | 0.075416 | 0.082272 | 0.818805 | 0.807052 | 0.761998 | 0.732615 | 0.610186 | 0.485798 | 0 | 0 | 0.17016 | 1,622 | 39 | 109 | 41.589744 | 0.758544 | 0.239211 | 0 | 0.318182 | 0 | 0 | 0.269295 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.318182 | false | 0 | 0.045455 | 0 | 0.681818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
d38e2253fb1079fd5b0d92f534562a731c561d78 | 4,579 | py | Python | tests/command_parser_test.py | njbrown09/votey | 414abf522254bfa3e2203dcc1c09925255095f6c | [
"MIT"
] | 17 | 2019-02-05T02:07:59.000Z | 2022-03-22T14:35:58.000Z | tests/command_parser_test.py | njbrown09/votey | 414abf522254bfa3e2203dcc1c09925255095f6c | [
"MIT"
] | 22 | 2019-02-02T19:55:14.000Z | 2022-03-23T18:13:15.000Z | tests/command_parser_test.py | njbrown09/votey | 414abf522254bfa3e2203dcc1c09925255095f6c | [
"MIT"
] | 5 | 2019-03-12T18:11:44.000Z | 2022-03-23T18:59:30.000Z | # type: ignore
from unittest.mock import MagicMock
import pytest
from votey.slack import ANON_KEYWORDS
from votey.slack import get_command_from_req
from votey.slack import SECRET_KEYWORDS
from votey.utils import OptionData
QUESTION = "Some Question?"
FIRST_OPTION = "Some Option1"
SECOND_OPTION = "Some Option2"
def test_basic_command():
cmd = get_command_from_req(
{"text": f'"{QUESTION}" "{FIRST_OPTION}" "{SECOND_OPTION}"'}, MagicMock()
)
assert cmd is not None
assert cmd.question == QUESTION
assert cmd.options == [
OptionData(FIRST_OPTION, None),
OptionData(SECOND_OPTION, None),
]
assert not cmd.anonymous
assert not cmd.secret
@pytest.mark.parametrize(
"keyword",
SECRET_KEYWORDS,
)
def test_secret(keyword):
cmd = get_command_from_req(
{"text": f'"{QUESTION}" "{FIRST_OPTION}" "{SECOND_OPTION}" {keyword}'},
MagicMock(),
)
assert cmd is not None
assert cmd.anonymous
assert cmd.secret
@pytest.mark.parametrize(
"keyword",
ANON_KEYWORDS,
)
def test_anonymous(keyword):
cmd = get_command_from_req(
{"text": f'"{QUESTION}" "{FIRST_OPTION}" "{SECOND_OPTION}" {keyword}'},
MagicMock(),
)
assert cmd is not None
assert cmd.anonymous
assert not cmd.secret
def test_emoji_option_command():
cmd = get_command_from_req(
{
"text": f'"{QUESTION}" "{FIRST_OPTION}" :someemoji1: "{SECOND_OPTION}" :someemoji2:'
},
MagicMock(),
)
assert cmd is not None
assert cmd.question == QUESTION
assert cmd.options == [
OptionData(FIRST_OPTION, ":someemoji1:"),
OptionData(SECOND_OPTION, ":someemoji2:"),
]
assert not cmd.anonymous
assert not cmd.secret
def test_emoji_option_command_without_last_option():
cmd = get_command_from_req(
{"text": f'"{QUESTION}" "{FIRST_OPTION}" :someemoji1: "{SECOND_OPTION}"'},
MagicMock(),
)
assert cmd is not None
assert cmd.question == QUESTION
assert cmd.options == [
OptionData(FIRST_OPTION, ":someemoji1:"),
OptionData(SECOND_OPTION, None),
]
assert not cmd.anonymous
assert not cmd.secret
def test_emoji_option_command_without_first_option():
cmd = get_command_from_req(
{"text": f'"{QUESTION}" "{FIRST_OPTION}" "{SECOND_OPTION}" :some-emoji:'},
MagicMock(),
)
assert cmd is not None
assert cmd.question == QUESTION
assert cmd.options == [
OptionData(FIRST_OPTION, None),
OptionData(SECOND_OPTION, ":some-emoji:"),
]
assert not cmd.anonymous
assert not cmd.secret
def test_emoji_option_command_with_secret():
cmd = get_command_from_req(
{
"text": f'"{QUESTION}" "{FIRST_OPTION}" "{SECOND_OPTION}" :some-emoji: --secret'
},
MagicMock(),
)
assert cmd is not None
assert cmd.question == QUESTION
assert cmd.options == [
OptionData(FIRST_OPTION, None),
OptionData(SECOND_OPTION, ":some-emoji:"),
]
assert cmd.anonymous
assert cmd.secret
def test_emoji_option_command_with_anonymous():
cmd = get_command_from_req(
{
"text": f'"{QUESTION}" "{FIRST_OPTION}" :some-emoji: "{SECOND_OPTION}" --anonymous'
},
MagicMock(),
)
assert cmd is not None
assert cmd.question == QUESTION
assert cmd.options == [
OptionData(FIRST_OPTION, ":some-emoji:"),
OptionData(SECOND_OPTION, None),
]
assert cmd.anonymous
assert not cmd.secret
def test_anonymous_with_voting_emoji():
cmd = get_command_from_req(
{
"text": f'"{QUESTION}" "{FIRST_OPTION}" :some-emoji: "{SECOND_OPTION}" --anonymous=:soccer:'
},
MagicMock(),
)
assert cmd is not None
assert cmd.question == QUESTION
assert cmd.options == [
OptionData(FIRST_OPTION, ":some-emoji:"),
OptionData(SECOND_OPTION, None),
]
assert cmd.anonymous
assert not cmd.secret
assert cmd.vote_emoji == ":soccer:"
def test_secret_with_voting_emoji():
cmd = get_command_from_req(
{
"text": f'"{QUESTION}" "{FIRST_OPTION}" :some-emoji: "{SECOND_OPTION}" --secret=:soccer:'
},
MagicMock(),
)
assert cmd is not None
assert cmd.question == QUESTION
assert cmd.options == [
OptionData(FIRST_OPTION, ":some-emoji:"),
OptionData(SECOND_OPTION, None),
]
assert cmd.anonymous
assert cmd.secret
assert cmd.vote_emoji == ":soccer:"
| 26.468208 | 104 | 0.633763 | 523 | 4,579 | 5.330784 | 0.09369 | 0.11944 | 0.060617 | 0.067073 | 0.857963 | 0.857963 | 0.83142 | 0.807747 | 0.792683 | 0.792683 | 0 | 0.002309 | 0.243503 | 4,579 | 172 | 105 | 26.622093 | 0.80254 | 0.002621 | 0 | 0.594595 | 0 | 0 | 0.187952 | 0 | 0 | 0 | 0 | 0 | 0.324324 | 1 | 0.067568 | false | 0 | 0.040541 | 0 | 0.108108 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d3acbced9fa4135185b918899f82a1d971d85f10 | 488 | py | Python | corehq/apps/locations/tests/__init__.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/locations/tests/__init__.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | 1 | 2022-03-12T01:03:25.000Z | 2022-03-12T01:03:25.000Z | corehq/apps/locations/tests/__init__.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | from .test_dbaccessors import *
from .test_location_expressions import *
from .test_location_fixtures import *
from .test_location_groups import *
from .test_location_import import *
from .test_location_queries import *
from .test_location_set import *
from .test_location_types import *
from .test_location_utils import *
from .test_locations import *
from .test_permissions import *
from .test_products_at_location import *
from .test_reupholster import *
from .test_site_code import *
| 32.533333 | 40 | 0.827869 | 67 | 488 | 5.656716 | 0.283582 | 0.295515 | 0.480211 | 0.46438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114754 | 488 | 14 | 41 | 34.857143 | 0.877315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6ca7b6cbe0898791f1960545fba33a3edc2f49a0 | 3,477 | py | Python | test/test_md047.py | scop/pymarkdown | 562ba8f7857d99ba09e86e42de5a37ec6d9b2c30 | [
"MIT"
] | null | null | null | test/test_md047.py | scop/pymarkdown | 562ba8f7857d99ba09e86e42de5a37ec6d9b2c30 | [
"MIT"
] | null | null | null | test/test_md047.py | scop/pymarkdown | 562ba8f7857d99ba09e86e42de5a37ec6d9b2c30 | [
"MIT"
] | null | null | null | """
Module to provide tests related to the MD047 rule.
"""
from test.markdown_scanner import MarkdownScanner
import pytest
@pytest.mark.rules
def test_md047_all_samples():
"""
Test to make sure we get the expected behavior after scanning the files in the
test/resources/rules/md047 directory.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = ["scan", "test/resources/rules/md047"]
expected_return_code = 1
expected_output = (
"test/resources/rules/md047/end_with_no_blank_line.md:3:41: "
+ "MD047: Each file should end with a single newline character. "
+ "(single-trailing-newline)\n"
+ "test/resources/rules/md047/end_with_no_blank_line_and_spaces.md:4:2: "
+ "MD047: Each file should end with a single newline character. "
+ "(single-trailing-newline)\n"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md047_good_end_with_blank_line():
"""
Test to make sure we get the expected behavior after scanning a good file from the
test/resources/rules/md047 directory.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = ["scan", "test/resources/rules/md047/end_with_blank_line.md"]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md047_bad_end_with_no_blank_line():
"""
Test to make sure we get the expected behavior after scanning a bad file from the
test/resources/rules/md047 directory which does not end with a blank line.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md047/end_with_no_blank_line.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md047/end_with_no_blank_line.md:3:41: "
+ "MD047: Each file should end with a single newline character. (single-trailing-newline)\n"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md047_bad_end_with_blank_line_containing_spaces():
"""
Test to make sure we get the expected behavior after scanning a bad file from the
test/resources/rules/md047 directory which does not end with a blank line.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md047/end_with_no_blank_line_and_spaces.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md047/end_with_no_blank_line_and_spaces.md:4:2: "
+ "MD047: Each file should end with a single newline character. (single-trailing-newline)\n"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
| 28.5 | 100 | 0.699741 | 443 | 3,477 | 5.234763 | 0.1693 | 0.048297 | 0.093144 | 0.119017 | 0.92583 | 0.924968 | 0.900819 | 0.900819 | 0.89737 | 0.89737 | 0 | 0.028143 | 0.213115 | 3,477 | 121 | 101 | 28.735537 | 0.819444 | 0.195283 | 0 | 0.646154 | 0 | 0 | 0.301147 | 0.203478 | 0 | 0 | 0 | 0 | 0.061538 | 1 | 0.061538 | false | 0 | 0.030769 | 0 | 0.092308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6cdaf73d27f50497b3b6b9de3fdca3bc2edc6759 | 152 | py | Python | tests/testapp1/views/myview.py | Prev/jikji | 1878791456737e3053bc7e0748a7a890a77a98bc | [
"MIT"
] | 11 | 2016-09-14T12:03:43.000Z | 2021-01-24T08:56:28.000Z | tests/testapp1/views/myview.py | Prev/jikji | 1878791456737e3053bc7e0748a7a890a77a98bc | [
"MIT"
] | 17 | 2016-09-20T09:36:55.000Z | 2020-07-05T05:30:10.000Z | tests/testapp1/views/myview.py | Prev/jikji | 1878791456737e3053bc7e0748a7a890a77a98bc | [
"MIT"
] | 5 | 2016-09-29T06:52:43.000Z | 2017-06-21T00:36:41.000Z | from jikji import render_template, register_view
@register_view(url_rule='/$1.html')
def myview(num) :
return render_template('template1.html', n=num) | 30.4 | 48 | 0.782895 | 23 | 152 | 4.956522 | 0.73913 | 0.245614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014388 | 0.085526 | 152 | 5 | 49 | 30.4 | 0.805755 | 0 | 0 | 0 | 0 | 0 | 0.143791 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.25 | 0.25 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
9f59522faa43ac4d0a33526a9d766cf91622f22b | 39 | py | Python | script/hello.py | YannMjl/IoT-basic-chat-app | be63f4e89936bdf462650869d0cdd160c3ff27eb | [
"MIT"
] | null | null | null | script/hello.py | YannMjl/IoT-basic-chat-app | be63f4e89936bdf462650869d0cdd160c3ff27eb | [
"MIT"
] | null | null | null | script/hello.py | YannMjl/IoT-basic-chat-app | be63f4e89936bdf462650869d0cdd160c3ff27eb | [
"MIT"
] | null | null | null | print('Hello World! this is python 3')
| 19.5 | 38 | 0.717949 | 7 | 39 | 4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030303 | 0.153846 | 39 | 1 | 39 | 39 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0.74359 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
4cbb5061de58ffc795815ea903f90c6005f2f6b6 | 10,189 | py | Python | tests/test_simulator/test_combat.py | LaraFerCue/dnd_combat_simulator | 7a5139ed0518628acab91fccc52f1b104f1337e6 | [
"BSD-3-Clause"
] | null | null | null | tests/test_simulator/test_combat.py | LaraFerCue/dnd_combat_simulator | 7a5139ed0518628acab91fccc52f1b104f1337e6 | [
"BSD-3-Clause"
] | null | null | null | tests/test_simulator/test_combat.py | LaraFerCue/dnd_combat_simulator | 7a5139ed0518628acab91fccc52f1b104f1337e6 | [
"BSD-3-Clause"
] | null | null | null | from typing import List
from dnd.models.armor import Armor, ArmorType
from dnd.models.character import Character, CharacterCategory, Ability
from dnd.models.damage import Damage, DamageType
from dnd.models.die import D10
from dnd.models.spell import Spell
from dnd.models.weapon import Weapon, WeaponType
from dnd.simulator.combat import Combat
from tests.mocking_models.dummies import DUMMY_PLAYER_WEAPON, DUMMY_ENEMY_WEAPON, DUMMY_CHARACTER
from tests.mocking_models.mocking_die import MockingDie
def dummy_lose_checker(characters: List[Character]) -> bool:
for character in characters:
if character.hit_points <= 0:
return True
return False
def test_get_player_target_single_player():
player1 = Character(**DUMMY_CHARACTER)
player1.armor = Armor(11, ArmorType.LIGHT)
combat = Combat(players=[player1], enemies=[], lose_checker=dummy_lose_checker)
assert combat.get_player_target() == player1
def test_get_player_target_multiple_player():
player1 = Character(**DUMMY_CHARACTER)
player2 = Character(**DUMMY_CHARACTER)
player1.armor = Armor(11, ArmorType.LIGHT)
player2.armor = Armor(14, ArmorType.LIGHT)
combat = Combat(players=[player1, player2], enemies=[], lose_checker=dummy_lose_checker)
assert combat.get_player_target() == player2
def test_get_enemy_target_single_enemy():
enemy1 = Character(**DUMMY_CHARACTER)
enemy1.armor = Armor(11, ArmorType.LIGHT)
combat = Combat(enemies=[enemy1], players=[], lose_checker=dummy_lose_checker)
assert combat.get_enemy_target() == enemy1
def test_get_enemy_target_multiple_enemies():
enemy1 = Character(**DUMMY_CHARACTER)
enemy2 = Character(**DUMMY_CHARACTER)
enemy1.armor = Armor(11, ArmorType.LIGHT)
enemy2.armor = Armor(14, ArmorType.LIGHT)
combat = Combat(enemies=[enemy1, enemy2], players=[], lose_checker=dummy_lose_checker)
assert combat.get_enemy_target() == enemy1
def test_won_combat_with_two_characters():
player = Character(strength=10, dexterity=10, constitution=10, intelligence=10, wisdom=10, charisma=10,
hit_points=10, category=CharacterCategory.PLAYABLE)
player.active_weapon = DUMMY_PLAYER_WEAPON
enemy = Character(strength=10, dexterity=10, constitution=10, intelligence=10, wisdom=10, charisma=10, hit_points=2,
category=CharacterCategory.NON_PLAYABLE)
enemy.active_weapon = DUMMY_ENEMY_WEAPON
combat = Combat(players=[player], enemies=[enemy], lose_checker=dummy_lose_checker, die=MockingDie(15))
assert combat.initiate_combat() == Combat.Result.WIN
def test_lost_combat_with_two_characters():
player = Character(strength=10, dexterity=10, constitution=10, intelligence=10, wisdom=10, charisma=10,
hit_points=1, category=CharacterCategory.PLAYABLE)
player.active_weapon = DUMMY_PLAYER_WEAPON
enemy = Character(strength=10, dexterity=10, constitution=10, intelligence=10, wisdom=10, charisma=10,
hit_points=20,
category=CharacterCategory.NON_PLAYABLE)
enemy.active_weapon = DUMMY_ENEMY_WEAPON
combat = Combat(players=[player], enemies=[enemy], lose_checker=dummy_lose_checker, die=MockingDie(15))
assert combat.initiate_combat() == Combat.Result.LOSE
def test_won_combat_several_players_one_enemy():
player1 = Character(strength=15, dexterity=10, constitution=14, intelligence=10, wisdom=10, charisma=10,
hit_points=20, category=CharacterCategory.PLAYABLE)
player2 = Character(strength=15, dexterity=10, constitution=14, intelligence=10, wisdom=10, charisma=10,
hit_points=20, category=CharacterCategory.PLAYABLE)
player3 = Character(strength=15, dexterity=10, constitution=14, intelligence=10, wisdom=10, charisma=10,
hit_points=20, category=CharacterCategory.PLAYABLE)
player1.active_weapon = DUMMY_PLAYER_WEAPON
player2.active_weapon = DUMMY_PLAYER_WEAPON
player3.active_weapon = DUMMY_PLAYER_WEAPON
enemy = Character(strength=18, dexterity=10, constitution=16, intelligence=10, wisdom=10, charisma=10,
hit_points=30, category=CharacterCategory.NON_PLAYABLE)
enemy.active_weapon = DUMMY_ENEMY_WEAPON
combat = Combat(players=[player1, player2, player3], enemies=[enemy], lose_checker=dummy_lose_checker,
die=MockingDie(10))
assert combat.initiate_combat() == Combat.Result.WIN
def test_lost_combat_several_players_one_enemy():
player1 = Character(strength=15, dexterity=10, constitution=14, intelligence=10, wisdom=10, charisma=10,
hit_points=20, category=CharacterCategory.PLAYABLE)
player2 = Character(strength=15, dexterity=10, constitution=14, intelligence=10, wisdom=10, charisma=10,
hit_points=20, category=CharacterCategory.PLAYABLE)
player3 = Character(strength=15, dexterity=10, constitution=14, intelligence=10, wisdom=10, charisma=10,
hit_points=20, category=CharacterCategory.PLAYABLE)
player1.active_weapon = DUMMY_PLAYER_WEAPON
player2.active_weapon = DUMMY_PLAYER_WEAPON
player3.active_weapon = DUMMY_PLAYER_WEAPON
enemy = Character(strength=18, dexterity=10, constitution=16, intelligence=10, wisdom=10, charisma=10,
hit_points=90, category=CharacterCategory.NON_PLAYABLE)
enemy.active_weapon = Weapon(damage=Damage([MockingDie(10)], DamageType.PIERCING),
weapon_type=WeaponType.MARTIAL_MELEE)
combat = Combat(players=[player1, player2, player3], enemies=[enemy], lose_checker=dummy_lose_checker,
die=MockingDie(10))
assert combat.initiate_combat() == Combat.Result.LOSE
def test_get_statistics_basic_combat():
player = Character(strength=10, dexterity=10, constitution=10, intelligence=10, wisdom=10, charisma=10,
hit_points=10, category=CharacterCategory.PLAYABLE, name='player')
player.active_weapon = DUMMY_PLAYER_WEAPON
enemy = Character(strength=10, dexterity=10, constitution=10, intelligence=10, wisdom=10, charisma=10, hit_points=2,
category=CharacterCategory.NON_PLAYABLE, name='enemy')
enemy.active_weapon = DUMMY_ENEMY_WEAPON
combat = Combat(players=[player], enemies=[enemy], lose_checker=dummy_lose_checker, die=MockingDie(15))
combat.initiate_combat()
statistics = combat.get_statistics()
assert statistics == {'turns': 1, 'players': {'player': 10}, 'enemies': {'enemy': -2}}
def test_get_statistics_huge_combat():
player1 = Character(strength=15, dexterity=10, constitution=14, intelligence=10, wisdom=10, charisma=10,
hit_points=20, category=CharacterCategory.PLAYABLE, name='player 1')
player2 = Character(strength=15, dexterity=10, constitution=14, intelligence=10, wisdom=10, charisma=10,
hit_points=20, category=CharacterCategory.PLAYABLE, name='player 2')
player3 = Character(strength=15, dexterity=10, constitution=14, intelligence=10, wisdom=10, charisma=10,
hit_points=20, category=CharacterCategory.PLAYABLE, name='player 3')
player1.active_weapon = DUMMY_PLAYER_WEAPON
player2.active_weapon = DUMMY_PLAYER_WEAPON
player3.active_weapon = DUMMY_PLAYER_WEAPON
enemy = Character(strength=18, dexterity=10, constitution=16, intelligence=10, wisdom=10, charisma=10,
hit_points=90, category=CharacterCategory.NON_PLAYABLE, name='beast')
enemy.active_weapon = Weapon(damage=Damage([MockingDie(10)], DamageType.PIERCING),
weapon_type=WeaponType.MARTIAL_MELEE)
combat = Combat(players=[player1, player2, player3], enemies=[enemy], lose_checker=dummy_lose_checker,
die=MockingDie(10))
combat.initiate_combat()
assert combat.get_statistics() == {'enemies': {'beast': 18},
'players': {'player 1': -8, 'player 2': 6, 'player 3': 6},
'turns': 4}
def test_get_action_to_perform_spells():
spell1 = Spell(Damage([D10], DamageType.MAGIC_COLD), spell_lvl=2)
spell1.slots = 1
character = Character(strength=10, dexterity=10, constitution=10, intelligence=10, wisdom=10, charisma=16,
hit_points=20, category=CharacterCategory.INDIFFERENT, name='character')
character.cast_ability = Ability.CHARISMA
character.spell_list.append(spell1)
assert Combat.select_spell_or_weapon(character) == Combat.Action.CAST
spell1.cast()
assert Combat.select_spell_or_weapon(character) == Combat.Action.ATTACK
def test_critical_roll_with_weapon():
character = Character(strength=16, dexterity=10, constitution=10, intelligence=10, wisdom=10, charisma=16,
hit_points=20, category=CharacterCategory.INDIFFERENT, name='character')
character.active_weapon = Weapon(damage=Damage([MockingDie(4)], DamageType.PIERCING),
weapon_type=WeaponType.SIMPLE_MELEE)
assert Combat.get_weapon_damage(character, False) == (7, DamageType.PIERCING)
assert Combat.get_weapon_damage(character, True) == (14, DamageType.PIERCING)
def test_critical_roll_with_spells():
spell1 = Spell(Damage([MockingDie(10)], DamageType.MAGIC_COLD), spell_lvl=2)
spell1.slots = 2
character = Character(strength=10, dexterity=10, constitution=10, intelligence=10, wisdom=10, charisma=16,
hit_points=20, category=CharacterCategory.INDIFFERENT, name='character')
character.cast_ability = Ability.CHARISMA
character.spell_list.append(spell1)
assert Combat.get_spell_damage(character, False) == (10, DamageType.MAGIC_COLD)
assert Combat.get_spell_damage(character, True) == (20, DamageType.MAGIC_COLD)
def test_get_target_on_critical_miss():
character1 = Character(**DUMMY_CHARACTER, name="player 1")
character2 = Character(**DUMMY_CHARACTER, name="player 2")
assert Combat.get_target_on_critical_miss(character1, [character1, character2]) == character2
| 49.461165 | 120 | 0.716066 | 1,197 | 10,189 | 5.89056 | 0.101921 | 0.028081 | 0.068501 | 0.065523 | 0.824564 | 0.781591 | 0.740462 | 0.72628 | 0.715218 | 0.667565 | 0 | 0.050024 | 0.1799 | 10,189 | 205 | 121 | 49.702439 | 0.793801 | 0 | 0 | 0.519737 | 0 | 0 | 0.015801 | 0 | 0 | 0 | 0 | 0 | 0.111842 | 1 | 0.098684 | false | 0 | 0.065789 | 0 | 0.177632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4cea6abf78f15b3c0f19daa4ee1e2f04be801b56 | 2,148 | py | Python | plugins/lenny.py | gabriel-cr/discord.py | 9a33fa89146c96394f62ee6c4165aa31f0d6da94 | [
"MIT"
] | null | null | null | plugins/lenny.py | gabriel-cr/discord.py | 9a33fa89146c96394f62ee6c4165aa31f0d6da94 | [
"MIT"
] | null | null | null | plugins/lenny.py | gabriel-cr/discord.py | 9a33fa89146c96394f62ee6c4165aa31f0d6da94 | [
"MIT"
] | 2 | 2018-10-09T15:42:59.000Z | 2019-02-11T07:16:00.000Z | from cloudbot import hook
import random
lennyface = [u'( \u0361\u00B0 \u035C\u0296 \u0361\u00B0)', u'( \u0360\u00B0 \u035F\u0296 \u0361\u00B0)', u'\u1566( \u0361\xb0 \u035c\u0296 \u0361\xb0)\u1564', u'( \u0361\u00B0 \u035C\u0296 \u0361\u00B0)', u'( \u0361~ \u035C\u0296 \u0361\u00B0)', u'( \u0361o \u035C\u0296 \u0361o)', u'\u0361\u00B0 \u035C\u0296 \u0361 -', u'( \u0361\u0361 \u00B0 \u035C \u0296 \u0361 \u00B0)\uFEFF', u'( \u0361 \u0361\u00B0 \u0361\u00B0 \u0296 \u0361\u00B0 \u0361\u00B0)', u'(\u0E07 \u0360\u00B0 \u035F\u0644\u035C \u0361\u00B0)\u0E07', u'( \u0361\u00B0 \u035C\u0296 \u0361 \u00B0)', u'( \u0361\u00B0\u256D\u035C\u0296\u256E\u0361\u00B0 )']
flennyface = [ '( \u0361\xb0 \u035c \u0361\xb0 )', '( \u0361\xb0 \u035c \u0361\xb0 )', '(\u0e07 \u0360\xb0 \u035f \u0361\xb0 )\u0e07', '( \u0361\xb0_ \u0361\xb0 )', '(\ufffd \u0361\xb0 \u035c \u0361\xb0 )\ufffd', '( \u25d5 \u035c \u25d5 )', '( \u0361~ \u035c \u0361\xb0 )', '( \u0360\xb0 \u035f \u0361\xb0 )', '( \u0ca0 \u035c \u0ca0 )', '( \u0ca5 \u035c \u0ca5 )', '( \u0361^ \u035c \u0361^ )', '( \u0ca5 _ \u0ca5 )', '( \u0361\xb0 \uff0d \u0361\xb0 )', '\u2570( \u0361\xb0 \u035c \u0361\xb0)\u2283\u2501\u2606\u309c\u30fb\u3002\u3002\u30fb\u309c\u309c\u30fb\u3002\u3002\u30fb\u309c\u2606\u309c\u30fb\u3002\u3002\u30fb\u309c\u309c\u30fb\u3002\u3002\u30fb\u309c', '\u2534\u252c\u2534\u252c\u2534\u2524( \u0361\xb0 \u035c \u251c\u252c\u2534\u252c\u2534\u252c', '( \u2310\u25a0 \u035c \u25a0 )', '( \u0361~ _ \u0361~ )', '@=( \u0361\xb0 \u035c \u0361\xb0 @ )\u2261', '( \u0361\xb0\u06a1 \u0361\xb0 )', '( \u2716_\u2716 )', '(\u3065 \u0361\xb0 \u035c \u0361\xb0 )\u3065', '\u10da( \u0361\xb0 \u035c \u0361\xb0 \u10da)', '( \u25c9 \u035c \u0361\u25d4 )' ]
@hook.command(autohelp=False)
def lenny(message, conn):
"""why the shit not lennyface"""
message(random.choice(lennyface))
@hook.command(autohelp=False)
def flenny(message):
"""flenny is watching."""
message(random.choice(flennyface))
| 126.352941 | 1,228 | 0.608939 | 274 | 2,148 | 4.759124 | 0.229927 | 0.159509 | 0.089724 | 0.096626 | 0.42638 | 0.223926 | 0.180982 | 0.180982 | 0.156442 | 0.099693 | 0 | 0.389869 | 0.18203 | 2,148 | 16 | 1,229 | 134.25 | 0.352305 | 0.021415 | 0 | 0.2 | 0 | 0.3 | 0.799617 | 0.143472 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e21c8eacff9cb128a14c29d97150cbf443ff1144 | 39 | py | Python | pandaserd/__init__.py | ba-tno/pandas-erd | 9251fd46dc21d9b8701bd4a6444e691ca1f81131 | [
"MIT"
] | 7 | 2021-02-27T14:34:03.000Z | 2021-12-19T01:50:07.000Z | pandaserd/__init__.py | ba-tno/pandas-erd | 9251fd46dc21d9b8701bd4a6444e691ca1f81131 | [
"MIT"
] | 2 | 2022-02-24T06:24:26.000Z | 2022-03-27T02:09:03.000Z | pandaserd/__init__.py | ba-tno/pandas-erd | 9251fd46dc21d9b8701bd4a6444e691ca1f81131 | [
"MIT"
] | 2 | 2021-12-13T13:31:08.000Z | 2022-02-24T06:39:42.000Z | from pandaserd.module import ERD, Table | 39 | 39 | 0.846154 | 6 | 39 | 5.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102564 | 39 | 1 | 39 | 39 | 0.942857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e229e2952858a78eebc90eccd05c21566dbd1bd6 | 80 | py | Python | fastTSNE/pynndescent/__init__.py | toddrme2178/fastTSNE | 6eefe61965c6557b38300f75dccb2ff5d03d7a2c | [
"BSD-3-Clause"
] | 1 | 2018-09-21T16:04:10.000Z | 2018-09-21T16:04:10.000Z | fastTSNE/pynndescent/__init__.py | ender1001/fastTSNE | 176aa63da216fc9c4fe5cf6b59a5a2d21465f019 | [
"BSD-3-Clause"
] | null | null | null | fastTSNE/pynndescent/__init__.py | ender1001/fastTSNE | 176aa63da216fc9c4fe5cf6b59a5a2d21465f019 | [
"BSD-3-Clause"
] | 3 | 2019-12-07T10:22:11.000Z | 2022-02-16T00:14:42.000Z | from fastTSNE.pynndescent.pynndescent_ import NNDescent, PyNNDescentTransformer
| 40 | 79 | 0.9 | 7 | 80 | 10.142857 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 80 | 1 | 80 | 80 | 0.946667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e2363734e3ca052b3fcf8e0b10262e43f8aa802c | 107 | py | Python | upman/schemas/__init__.py | marcsello/upman | 9e6481f73986ba4a162962c623b8ee1c1d4dab9d | [
"MIT"
] | null | null | null | upman/schemas/__init__.py | marcsello/upman | 9e6481f73986ba4a162962c623b8ee1c1d4dab9d | [
"MIT"
] | null | null | null | upman/schemas/__init__.py | marcsello/upman | 9e6481f73986ba4a162962c623b8ee1c1d4dab9d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from .report_schema import ReportSchema
from .reporter_schema import ReporterSchema
| 26.75 | 43 | 0.841121 | 14 | 107 | 6.285714 | 0.785714 | 0.272727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010309 | 0.093458 | 107 | 3 | 44 | 35.666667 | 0.896907 | 0.196262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e26cfbed87ae68ae2072db297cb3ccfc79dd5a41 | 98 | py | Python | states/highscorestate.py | MetalYos/FlappyBird | 2572f3385e51a04968526375684c4dcae8c6a048 | [
"MIT"
] | null | null | null | states/highscorestate.py | MetalYos/FlappyBird | 2572f3385e51a04968526375684c4dcae8c6a048 | [
"MIT"
] | null | null | null | states/highscorestate.py | MetalYos/FlappyBird | 2572f3385e51a04968526375684c4dcae8c6a048 | [
"MIT"
] | null | null | null | import pygame
from states.basestate import BaseState
class HighScoresState(BaseState):
pass
| 14 | 38 | 0.806122 | 11 | 98 | 7.181818 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153061 | 98 | 6 | 39 | 16.333333 | 0.951807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.25 | 0.5 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
e28cbfb846483f3abca076bc8c068d7df2bd5b9f | 9,214 | py | Python | model/resnet.py | Rajneesh-Tiwari/WhaleDet | 35738658c445a35c03796fa69517f492b7870ced | [
"MIT"
] | 1 | 2019-02-16T04:41:32.000Z | 2019-02-16T04:41:32.000Z | model/resnet.py | Rajneesh-Tiwari/WhaleDet | 35738658c445a35c03796fa69517f492b7870ced | [
"MIT"
] | null | null | null | model/resnet.py | Rajneesh-Tiwari/WhaleDet | 35738658c445a35c03796fa69517f492b7870ced | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Activation, Add, AveragePooling2D, BatchNormalization, Conv2D, Dense, Flatten, Input, MaxPool2D, ZeroPadding2D
from keras.initializers import glorot_uniform
from keras.models import Model
def identity(x, filters, kernel_size, stage, block, regularizer=None, trainable=True):
"""Create an identity residual block
:param x:
:param filters: tuple of length 3 that contains the number of filters in each convolutional layer
:param kernel_size: tuple of length 2
:param stage: integer, goes into names of the layers
:param block: integer, goes into names of the layers
:param regularizer: keras regularizer to be used in the block
:param trainable: bool, False is the layer is supposed to be frozen during training
:return:
"""
f1, f2, f3 = filters
shortcut = x
x = Conv2D(filters=f1, kernel_size=(1, 1), strides=(1, 1), padding='valid', name='conv%d_%d_a' % (stage, block),
kernel_initializer=glorot_uniform(), kernel_regularizer=regularizer, trainable=trainable)(x)
x = BatchNormalization(axis=3, name='bn%d_%d_a' % (stage, block))(x)
x = Activation('relu')(x)
x = Conv2D(filters=f2, kernel_size=kernel_size, strides=(1, 1), padding='same', name='conv%d_%d_b' % (stage, block),
kernel_initializer=glorot_uniform(), kernel_regularizer=regularizer, trainable=trainable)(x)
x = BatchNormalization(axis=3, name='bn%d_%d_b' % (stage, block))(x)
x = Activation('relu')(x)
x = Conv2D(filters=f3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name='conv%d_%d_c' % (stage, block),
kernel_initializer=glorot_uniform(), kernel_regularizer=regularizer, trainable=trainable)(x)
x = BatchNormalization(axis=3, name='bn%d_%d_c' % (stage, block))(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
def conv(x, filters, kernel_size, stage, block, strides=(2, 2), regularizer=None, trainable=True):
"""Create a convolutional residual block
:param x:
:param filters: tuple of length 3 that contains the number of filters in each convolutional layer
:param kernel_size: tuple of length 2
:param stage: integer, goes into names of the layers
:param block: integer, goes into names of the layers
:param strides: tuple of length 2
:param regularizer: keras regularizer to be used in the block
:param trainable: bool, False is the layer is supposed to be frozen during training
:return:
"""
f1, f2, f3 = filters
shortcut = x
x = Conv2D(filters=f1, kernel_size=(1, 1), strides=strides, padding='valid', name='conv%d_%d_a' % (stage, block),
kernel_initializer=glorot_uniform(), kernel_regularizer=regularizer, trainable=trainable)(x)
x = BatchNormalization(axis=3, name='bn%d_%d_a' % (stage, block))(x)
x = Activation('relu')(x)
x = Conv2D(filters=f2, kernel_size=kernel_size, strides=(1, 1), padding='same', name='conv%d_%d_b' % (stage, block),
kernel_initializer=glorot_uniform(), kernel_regularizer=regularizer, trainable=trainable)(x)
x = BatchNormalization(axis=3, name='bn%d_%d_b' % (stage, block))(x)
x = Activation('relu')(x)
x = Conv2D(filters=f3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name='conv%d_%d_c' % (stage, block),
kernel_initializer=glorot_uniform(), kernel_regularizer=regularizer, trainable=trainable)(x)
x = BatchNormalization(axis=3, name='bn%d_%d_c' % (stage, block))(x)
shortcut = Conv2D(filters=f3, kernel_size=(1, 1), strides=strides, padding='valid', name='conv%d_%d_s' % (stage, block),
kernel_initializer=glorot_uniform(), kernel_regularizer=regularizer, trainable=trainable)(shortcut)
shortcut = BatchNormalization(axis=3, name='bn%d_%d_s' % (stage, block))(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
def ResNet50(input_shape=(224, 224, 3), num_classes=6):
img = Input(input_shape)
x = ZeroPadding2D((3, 3))(img)
x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', name='conv1', kernel_initializer=glorot_uniform())(x)
x = BatchNormalization(axis=3, name='bn1')(x)
x = Activation('relu')(x)
x = MaxPool2D((3, 3), strides=(2, 2), padding='same')(x)
x = conv(x, filters=[64, 64, 256], kernel_size=(3, 3), strides=(1, 1), stage=2, block=1)
x = identity(x, filters=[64, 64, 256], kernel_size=(3, 3), stage=2, block=2)
x = identity(x, filters=[64, 64, 256], kernel_size=(3, 3), stage=2, block=3)
x = conv(x, filters=[128, 128, 512], kernel_size=(3, 3), stage=3, block=1)
x = identity(x, filters=[128, 128, 512], kernel_size=(3, 3), stage=3, block=2)
x = identity(x, filters=[128, 128, 512], kernel_size=(3, 3), stage=3, block=3)
x = identity(x, filters=[128, 128, 512], kernel_size=(3, 3), stage=3, block=4)
x = conv(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=4, block=1)
x = identity(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=4, block=2)
x = identity(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=4, block=3)
x = identity(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=4, block=4)
x = identity(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=4, block=5)
x = identity(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=4, block=6)
x = conv(x, filters=[512, 512, 2048], kernel_size=(3, 3), stage=5, block=1)
x = identity(x, filters=[512, 512, 2048], kernel_size=(3, 3), stage=5, block=2)
x = identity(x, filters=[512, 512, 2048], kernel_size=(3, 3), stage=5, block=3)
x = AveragePooling2D((int(input_shape[0]//32), int(input_shape[0]//32)))(x)
x = Flatten()(x)
x = Dense(num_classes, activation='softmax', name='fc%d' % num_classes, kernel_initializer=glorot_uniform())(x)
model = Model(inputs=img, outputs=x, name='ResNet50')
return model
def resnet_like_33(input_shape=(384, 512, 3), embedding_size=128):
img = Input(shape=input_shape)
x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', name='conv1', kernel_initializer=glorot_uniform())(img)
x = BatchNormalization(axis=3, name='bn1')(x)
x = Activation('relu')(x)
x = MaxPool2D((3, 3), strides=(2, 2), padding='same')(x)
x = conv(x, filters=[64, 64, 256], kernel_size=(3, 3), strides=(1, 1), stage=2, block=1)
x = identity(x, filters=[64, 64, 256], kernel_size=(3, 3), stage=2, block=2)
x = identity(x, filters=[64, 64, 256], kernel_size=(3, 3), stage=2, block=3)
x = conv(x, filters=[128, 128, 512], kernel_size=(3, 3), stage=3, block=1)
x = identity(x, filters=[128, 128, 512], kernel_size=(3, 3), stage=3, block=2)
x = identity(x, filters=[128, 128, 512], kernel_size=(3, 3), stage=3, block=3)
x = identity(x, filters=[128, 128, 512], kernel_size=(3, 3), stage=3, block=4)
x = conv(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=4, block=1)
x = conv(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=5, block=1)
x = conv(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=6, block=1)
x = AveragePooling2D((int(input_shape[0]//64), int(input_shape[0]//64)))(x)
x = Flatten()(x)
x = Dense(512, activation='relu', kernel_initializer=glorot_uniform())(x)
x = Dense(embedding_size, kernel_initializer=glorot_uniform(), name='embeddings')(x)
model = Model(inputs=img, outputs=x, name='resnet_like_33')
return model
def resnet_like_36(input_shape=(768, 1024, 3), embedding_size=128):
img = Input(shape=input_shape)
x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', name='conv1', kernel_initializer=glorot_uniform())(img)
x = BatchNormalization(axis=3, name='bn1')(x)
x = Activation('relu')(x)
x = MaxPool2D((3, 3), strides=(2, 2), padding='same')(x)
x = conv(x, filters=[64, 64, 256], kernel_size=(3, 3), strides=(1, 1), stage=2, block=1)
x = identity(x, filters=[64, 64, 256], kernel_size=(3, 3), stage=2, block=2)
x = identity(x, filters=[64, 64, 256], kernel_size=(3, 3), stage=2, block=3)
x = conv(x, filters=[128, 128, 512], kernel_size=(3, 3), stage=3, block=1)
x = identity(x, filters=[128, 128, 512], kernel_size=(3, 3), stage=3, block=2)
x = identity(x, filters=[128, 128, 512], kernel_size=(3, 3), stage=3, block=3)
x = identity(x, filters=[128, 128, 512], kernel_size=(3, 3), stage=3, block=4)
x = conv(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=4, block=1)
x = conv(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=5, block=1)
x = conv(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=6, block=1)
x = conv(x, filters=[256, 256, 1024], kernel_size=(3, 3), stage=7, block=1)
x = AveragePooling2D((int(input_shape[0]//128), int(input_shape[0]//128)))(x)
x = Flatten()(x)
x = Dense(512, activation='relu', kernel_initializer=glorot_uniform())(x)
x = Dense(embedding_size, kernel_initializer=glorot_uniform(), name='embeddings')(x)
model = Model(inputs=img, outputs=x, name='ResNet_siamese')
model.summary()
return model
| 49.805405 | 135 | 0.653137 | 1,430 | 9,214 | 4.099301 | 0.086014 | 0.085295 | 0.06943 | 0.075742 | 0.904981 | 0.866087 | 0.847834 | 0.842545 | 0.823098 | 0.822927 | 0 | 0.08637 | 0.171912 | 9,214 | 184 | 136 | 50.076087 | 0.681914 | 0.099957 | 0 | 0.692308 | 0 | 0 | 0.040859 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042735 | false | 0 | 0.051282 | 0 | 0.136752 | 0.008547 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e29a8078183da41b952d5e16d99cd5af6ec7754b | 256 | py | Python | src/server/server/apps/stockdata/errors.py | jsarja/Stock-Price-Sandbox | d0838264ebe7a5e837f92b07c850b0f0270e460b | [
"MIT"
] | null | null | null | src/server/server/apps/stockdata/errors.py | jsarja/Stock-Price-Sandbox | d0838264ebe7a5e837f92b07c850b0f0270e460b | [
"MIT"
] | 4 | 2021-03-30T12:36:03.000Z | 2021-09-22T18:25:48.000Z | src/server/server/apps/stockdata/errors.py | jsarja/Stock-Price-Sandbox | d0838264ebe7a5e837f92b07c850b0f0270e460b | [
"MIT"
] | null | null | null | class InvalidStockNameError(Exception):
pass
class NoInternetConnectionError(Exception):
pass
class InvalidPlotOptionParameter(Exception):
pass
class RequestDataMissingError(Exception):
pass
class InvalidDateError(Exception):
pass
| 16 | 44 | 0.785156 | 20 | 256 | 10.05 | 0.4 | 0.323383 | 0.358209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15625 | 256 | 15 | 45 | 17.066667 | 0.930556 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 1 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
2c520aead4fedf16b098958ca9a4080df577af0d | 41 | py | Python | cfpland_bot/logger/__init__.py | jonatasbaldin/cfpland-telegram-bot | fdd846240705ff6ce7705413336f6d7169a2e7fc | [
"MIT"
] | 3 | 2019-04-23T14:16:11.000Z | 2019-04-24T06:21:10.000Z | cfpland_bot/logger/__init__.py | jonatasbaldin/cfpland-telegram-bot | fdd846240705ff6ce7705413336f6d7169a2e7fc | [
"MIT"
] | 2 | 2020-07-17T14:53:16.000Z | 2021-05-09T21:42:43.000Z | cfpland_bot/logger/__init__.py | jonatasbaldin/cfpland-telegram-bot | fdd846240705ff6ce7705413336f6d7169a2e7fc | [
"MIT"
] | null | null | null | from .logger import logger # noqa: F401
| 20.5 | 40 | 0.731707 | 6 | 41 | 5 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 0.195122 | 41 | 1 | 41 | 41 | 0.818182 | 0.243902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2c9ba8867816a8b467870edd083f820968b76ab4 | 93 | py | Python | IA/Python/7/7.1/5.py | worthl3ss/random-small | ffb60781f57eb865acbd81aaa07056046bad32fe | [
"MIT"
] | 1 | 2022-02-23T12:47:00.000Z | 2022-02-23T12:47:00.000Z | IA/Python/7/7.1/5.py | worthl3ss/random-small | ffb60781f57eb865acbd81aaa07056046bad32fe | [
"MIT"
] | null | null | null | IA/Python/7/7.1/5.py | worthl3ss/random-small | ffb60781f57eb865acbd81aaa07056046bad32fe | [
"MIT"
] | null | null | null | def read_numbers(in_file):
return [int(x) for x in open(in_file, 'r').read().split(' ')]
| 31 | 65 | 0.634409 | 17 | 93 | 3.294118 | 0.705882 | 0.214286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.150538 | 93 | 2 | 66 | 46.5 | 0.708861 | 0 | 0 | 0 | 0 | 0 | 0.021505 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
2cb2bb1a01eb17baaa65fe3f0dcedd552eb7e173 | 160 | py | Python | auction/lots/admin.py | Gliger13/auction_site | 0358dbdee1a41da1d68cb158bccadf54aad465dd | [
"MIT"
] | 1 | 2020-06-16T20:02:55.000Z | 2020-06-16T20:02:55.000Z | auction/lots/admin.py | Gliger13/auction_site | 0358dbdee1a41da1d68cb158bccadf54aad465dd | [
"MIT"
] | null | null | null | auction/lots/admin.py | Gliger13/auction_site | 0358dbdee1a41da1d68cb158bccadf54aad465dd | [
"MIT"
] | 2 | 2020-06-03T13:12:26.000Z | 2020-06-25T21:56:05.000Z | from django.contrib import admin
from lots.models import Lot, Bet, ImageTags
admin.site.register(Lot)
admin.site.register(Bet)
admin.site.register(ImageTags)
| 20 | 43 | 0.80625 | 24 | 160 | 5.375 | 0.5 | 0.209302 | 0.395349 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09375 | 160 | 7 | 44 | 22.857143 | 0.889655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.4 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
e2d9fe0763c8a48628228c6858e7b1ded6535293 | 40 | py | Python | tests/test_annotation.py | mtszkw/pointpicker | 0845bdbeb3d000ec577772f01a02bdb46e422350 | [
"MIT"
] | null | null | null | tests/test_annotation.py | mtszkw/pointpicker | 0845bdbeb3d000ec577772f01a02bdb46e422350 | [
"MIT"
] | null | null | null | tests/test_annotation.py | mtszkw/pointpicker | 0845bdbeb3d000ec577772f01a02bdb46e422350 | [
"MIT"
] | null | null | null | from .annotation import directory_reader | 40 | 40 | 0.9 | 5 | 40 | 7 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075 | 40 | 1 | 40 | 40 | 0.945946 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
390c5c74200efdcee6ccb2e8f2651b35887a7b69 | 253 | py | Python | pybilt/bilayer_analyzer/__init__.py | blakeaw/ORBILT | ed402dd496534dccd00f3e75b57007d944c58c1d | [
"MIT"
] | 11 | 2019-07-29T16:21:53.000Z | 2022-02-02T11:44:57.000Z | pybilt/bilayer_analyzer/__init__.py | blakeaw/ORBILT | ed402dd496534dccd00f3e75b57007d944c58c1d | [
"MIT"
] | 11 | 2019-05-15T09:30:05.000Z | 2021-07-19T16:49:59.000Z | pybilt/bilayer_analyzer/__init__.py | blakeaw/ORBILT | ed402dd496534dccd00f3e75b57007d944c58c1d | [
"MIT"
] | 9 | 2019-08-12T11:14:45.000Z | 2020-12-22T18:22:55.000Z | from __future__ import absolute_import
from .bilayer_analyzer import BilayerAnalyzer
from . import prefab_analysis_protocols
from .bilayer_analyzer import print_valid_analyses, print_analysis_settings
from .bilayer_analyzer import print_available_plots
| 42.166667 | 75 | 0.897233 | 32 | 253 | 6.59375 | 0.5 | 0.156398 | 0.270142 | 0.35545 | 0.28436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083004 | 253 | 5 | 76 | 50.6 | 0.909483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0.4 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
393f3bae36e301454a55dd3a0b7059dec04558d2 | 46 | py | Python | PYTHON/hello.py | sonukumar34/hello-world-all-programming-language | 88d2bd39cf4b2c2815aa1105236ee5d434628ae2 | [
"MIT"
] | 27 | 2020-10-02T12:53:09.000Z | 2022-03-10T14:15:23.000Z | PYTHON/hello.py | sonukumar34/hello-world-all-programming-language | 88d2bd39cf4b2c2815aa1105236ee5d434628ae2 | [
"MIT"
] | 32 | 2020-10-02T16:20:37.000Z | 2020-10-16T11:37:45.000Z | PYTHON/hello.py | sonukumar34/hello-world-all-programming-language | 88d2bd39cf4b2c2815aa1105236ee5d434628ae2 | [
"MIT"
] | 113 | 2020-10-02T13:47:53.000Z | 2022-01-22T13:13:25.000Z | ## hello world in python
print ('hello world') | 23 | 24 | 0.717391 | 7 | 46 | 4.714286 | 0.714286 | 0.606061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152174 | 46 | 2 | 25 | 23 | 0.846154 | 0.456522 | 0 | 0 | 0 | 0 | 0.478261 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
39536e1d74f64d33be9c1025de03792367c203af | 56 | py | Python | data/__init__.py | ishine/FastVocoder | ac716e6df8cd03dbfc4a969d8a5ed42c055c38aa | [
"MIT"
] | 116 | 2021-05-30T13:27:19.000Z | 2022-03-28T12:52:41.000Z | data/__init__.py | ishine/FastVocoder | ac716e6df8cd03dbfc4a969d8a5ed42c055c38aa | [
"MIT"
] | 9 | 2021-06-23T05:33:41.000Z | 2022-02-22T09:27:53.000Z | data/__init__.py | ishine/FastVocoder | ac716e6df8cd03dbfc4a969d8a5ed42c055c38aa | [
"MIT"
] | 17 | 2021-05-30T14:18:31.000Z | 2022-03-25T04:58:22.000Z | import data.audio
import data.dataset
import data.utils
| 14 | 19 | 0.839286 | 9 | 56 | 5.222222 | 0.555556 | 0.638298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 56 | 3 | 20 | 18.666667 | 0.94 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
1a303a7e7556b807d8dabca004224fcf8f7c42f0 | 33 | py | Python | pyross/tsi/__init__.py | vishalbelsare/pyross | 98dbdd7896661c790f7a9d13fda8595ddccadf04 | [
"MIT"
] | null | null | null | pyross/tsi/__init__.py | vishalbelsare/pyross | 98dbdd7896661c790f7a9d13fda8595ddccadf04 | [
"MIT"
] | null | null | null | pyross/tsi/__init__.py | vishalbelsare/pyross | 98dbdd7896661c790f7a9d13fda8595ddccadf04 | [
"MIT"
] | null | null | null | import pyross.tsi.deterministic
| 11 | 31 | 0.848485 | 4 | 33 | 7 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 33 | 2 | 32 | 16.5 | 0.933333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
1a3d007e83d858aba6a2254a45c4f8bca8f60349 | 380 | py | Python | beebole/interfaces/responses/__init__.py | Dogeek/beebole | f40ae3fad9a5178cc0d27116790eb5a0ef069a5b | [
"MIT"
] | 1 | 2021-12-19T23:45:47.000Z | 2021-12-19T23:45:47.000Z | beebole/interfaces/responses/__init__.py | Dogeek/beebole | f40ae3fad9a5178cc0d27116790eb5a0ef069a5b | [
"MIT"
] | null | null | null | beebole/interfaces/responses/__init__.py | Dogeek/beebole | f40ae3fad9a5178cc0d27116790eb5a0ef069a5b | [
"MIT"
] | null | null | null | from beebole.interfaces.responses.custom_field import CustomFieldListResponse
from beebole.interfaces.responses.simple import IdResponse, SimpleResponse
from beebole.interfaces.responses.jobs import JobInfoResponse
from beebole.interfaces.responses.group import GroupResponse, GroupListResponse
from beebole.interfaces.responses.absence import AbsenceResponse, AbsenceListResponse | 76 | 85 | 0.894737 | 39 | 380 | 8.692308 | 0.487179 | 0.162242 | 0.309735 | 0.442478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057895 | 380 | 5 | 85 | 76 | 0.946927 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
1a98cc2f0be31688f6f8f0c1452bdbb57ed198bb | 47 | py | Python | src/__init__.py | alliance-genome/agr_ferret | e2ccef16308b1a8a6f1b2a3dde6e29e0530da721 | [
"MIT"
] | 2 | 2020-07-22T14:25:00.000Z | 2021-09-20T18:29:08.000Z | src/__init__.py | alliance-genome/agr_ferret | e2ccef16308b1a8a6f1b2a3dde6e29e0530da721 | [
"MIT"
] | 6 | 2019-09-24T14:09:42.000Z | 2021-06-07T15:27:55.000Z | src/__init__.py | alliance-genome/agr_ferret | e2ccef16308b1a8a6f1b2a3dde6e29e0530da721 | [
"MIT"
] | 3 | 2020-12-19T08:57:51.000Z | 2020-12-19T08:58:09.000Z | from app import ContextInfo # pragma: no cover
| 23.5 | 46 | 0.787234 | 7 | 47 | 5.285714 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.170213 | 47 | 1 | 47 | 47 | 0.948718 | 0.340426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
1aab414e06d0473d944cccdf5cc83e474bac623e | 141 | py | Python | sparc/filter/testing.py | davisd50/sparc.filter | 47d516d7af78028e220dcda458c125f09865e639 | [
"MIT"
] | null | null | null | sparc/filter/testing.py | davisd50/sparc.filter | 47d516d7af78028e220dcda458c125f09865e639 | [
"MIT"
] | null | null | null | sparc/filter/testing.py | davisd50/sparc.filter | 47d516d7af78028e220dcda458c125f09865e639 | [
"MIT"
] | null | null | null | import sparc.filter
from sparc.testing.testlayer import SparcZCMLFileLayer
SPARC_FILTER_INTEGRATION_LAYER = SparcZCMLFileLayer(sparc.filter) | 35.25 | 65 | 0.886525 | 16 | 141 | 7.625 | 0.5625 | 0.270492 | 0.47541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06383 | 141 | 4 | 65 | 35.25 | 0.924242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
1ac12769968b3b1a2b5cf68de978520f26206d73 | 151 | py | Python | ahvl/__init__.py | gardar/ahvl | 045b5882d94fc2d4ba7b194bf65ebfbf9d2e1d6d | [
"MIT"
] | 4 | 2019-10-12T12:11:23.000Z | 2021-12-20T13:53:28.000Z | ahvl/__init__.py | gardar/ahvl | 045b5882d94fc2d4ba7b194bf65ebfbf9d2e1d6d | [
"MIT"
] | 2 | 2021-02-05T12:52:55.000Z | 2022-02-11T10:58:52.000Z | ahvl/__init__.py | gardar/ahvl | 045b5882d94fc2d4ba7b194bf65ebfbf9d2e1d6d | [
"MIT"
] | 1 | 2020-08-13T07:52:27.000Z | 2020-08-13T07:52:27.000Z | from ahvl.helper import AhvlMsg, AhvlHelper
from ahvl.hashivault import HashiVault
from ahvl.process import Process
from ahvl.lookup import AhvlLookup
| 30.2 | 43 | 0.854305 | 21 | 151 | 6.142857 | 0.47619 | 0.248062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.112583 | 151 | 4 | 44 | 37.75 | 0.962687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
46e3dff591f352b843003d2c18415f5a2c310ea1 | 132 | py | Python | gsfarc/gptool/parameter/templates/long64array.py | geospatial-services-framework/gsfpyarc | 5ef69299fbc0b763ad4c1857ceac3ff087c0dc14 | [
"MIT"
] | 1 | 2021-11-06T18:36:28.000Z | 2021-11-06T18:36:28.000Z | gsfarc/gptool/parameter/templates/long64array.py | geospatial-services-framework/gsfpyarc | 5ef69299fbc0b763ad4c1857ceac3ff087c0dc14 | [
"MIT"
] | null | null | null | gsfarc/gptool/parameter/templates/long64array.py | geospatial-services-framework/gsfpyarc | 5ef69299fbc0b763ad4c1857ceac3ff087c0dc14 | [
"MIT"
] | null | null | null | """
"""
from .basicarray import BASICARRAY
class LONG64ARRAY(BASICARRAY): pass
def template():
return LONG64ARRAY('GPLong') | 12 | 35 | 0.712121 | 13 | 132 | 7.230769 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036036 | 0.159091 | 132 | 11 | 36 | 12 | 0.810811 | 0 | 0 | 0 | 0 | 0 | 0.048 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | true | 0.25 | 0.25 | 0.25 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 1 | 0 | 0 | 6 |
46ecd3e887adb53e58623ea042176421bde0c646 | 96 | py | Python | sources/algorithms/queries/srqenum/__init__.py | tipech/OverlapGraph | 0aa132802f2e174608ce33c6bfc24ff14551bf4a | [
"MIT"
] | null | null | null | sources/algorithms/queries/srqenum/__init__.py | tipech/OverlapGraph | 0aa132802f2e174608ce33c6bfc24ff14551bf4a | [
"MIT"
] | 1 | 2018-10-07T08:06:01.000Z | 2018-10-07T08:06:01.000Z | sources/algorithms/queries/srqenum/__init__.py | tipech/OverlapGraph | 0aa132802f2e174608ce33c6bfc24ff14551bf4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from .bynxgraph import *
from .byrcsweep import *
from .srqenum import *
| 16 | 24 | 0.729167 | 13 | 96 | 5.384615 | 0.692308 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15625 | 96 | 5 | 25 | 19.2 | 0.864198 | 0.208333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
46fdf4d1a50140f8a61fae3327d270a3dabbd213 | 136 | py | Python | integration-testing/rnode_testing/random.py | Jake-Gillberg/rchain | 100caa9c2762c5cb90e7222f48ec3f1b4d7da9a7 | [
"Apache-2.0"
] | 1 | 2019-09-19T06:37:39.000Z | 2019-09-19T06:37:39.000Z | integration-testing/rnode_testing/random.py | Jake-Gillberg/rchain | 100caa9c2762c5cb90e7222f48ec3f1b4d7da9a7 | [
"Apache-2.0"
] | null | null | null | integration-testing/rnode_testing/random.py | Jake-Gillberg/rchain | 100caa9c2762c5cb90e7222f48ec3f1b4d7da9a7 | [
"Apache-2.0"
] | 1 | 2018-09-28T23:03:48.000Z | 2018-09-28T23:03:48.000Z | import random
import string
def random_string(length):
return ''.join(random.choice(string.ascii_letters) for m in range(length))
| 19.428571 | 78 | 0.764706 | 20 | 136 | 5.1 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.132353 | 136 | 6 | 79 | 22.666667 | 0.864407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.5 | 0.25 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
201b6a740cc0ef63f771666e7b1db997bff14fbc | 86 | py | Python | template/fs/__init__.py | clayne/syringe-1 | 4a431aa65c371a2018fca95145a3952ba802a609 | [
"BSD-2-Clause"
] | 25 | 2015-04-14T21:53:46.000Z | 2022-03-30T19:15:24.000Z | template/fs/__init__.py | clayne/syringe-1 | 4a431aa65c371a2018fca95145a3952ba802a609 | [
"BSD-2-Clause"
] | 5 | 2020-03-23T20:19:59.000Z | 2021-05-24T19:38:31.000Z | template/fs/__init__.py | clayne/syringe-1 | 4a431aa65c371a2018fca95145a3952ba802a609 | [
"BSD-2-Clause"
] | 7 | 2015-07-31T13:26:37.000Z | 2021-03-05T19:35:37.000Z | from . import cramfs
from . import iso9660
from . import ntfs
from . import physical
| 14.333333 | 22 | 0.755814 | 12 | 86 | 5.416667 | 0.5 | 0.615385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057971 | 0.197674 | 86 | 5 | 23 | 17.2 | 0.884058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
203a85e10447370f982ec34f430853332bb135df | 3,194 | py | Python | python/server/module/mysqlOJ.py | alfie1121/OJ | 1a05633d3e3b73010c9dcc9352f76b9d027e4b5a | [
"MIT"
] | null | null | null | python/server/module/mysqlOJ.py | alfie1121/OJ | 1a05633d3e3b73010c9dcc9352f76b9d027e4b5a | [
"MIT"
] | null | null | null | python/server/module/mysqlOJ.py | alfie1121/OJ | 1a05633d3e3b73010c9dcc9352f76b9d027e4b5a | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import pymysql
import sys
class MysqlOJ:
def __init__(self, localhost, user, userPassword, db):
self.host = localhost
self.user = user
self.userPW = userPassword
self.db = db
def mysqloj_connect(self):
try:
pymysql.connect(self.host, self.user, self.userPW, self.db)
return True
except:
info = sys.exc_info()
print(info[0], ":", info[1])
def get_login(self, email):
try:
db = pymysql.connect(self.host, self.user, self.userPW, self.db)
cursor = db.cursor()
sql = "select id, name, email, password from users where email="+"\'"+email+"\'"
cursor.execute(sql)
results = cursor.fetchall()
db.close()
return results
except:
info = sys.exc_info()
print(info[0], ":", info[1])
def get_exam(self):
try:
db = pymysql.connect(self.host, self.user, self.userPW, self.db)
cursor = db.cursor()
sql = "select id, num, title, content from "
cursor.execute(sql)
results = cursor.fetchall()
db.close()
return results
except:
info = sys.exc_info()
print(info[0], ":", info[1])
def get_all_QP(self):
try:
db = pymysql.connect(self.host, self.user, self.userPW, self.db)
cursor = db.cursor()
sql = "select id, title, content, code_function from questionpools"
cursor.execute(sql)
results = cursor.fetchall()
db.close()
return results
except:
info = sys.exc_info()
print(info[0], ":", info[1])
def get_example(self, qpid: str):
try:
db = pymysql.connect(self.host, self.user, self.userPW, self.db)
cursor = db.cursor()
sql = "select example from questionpools_examples where qpid="+qpid
cursor.execute(sql)
results = cursor.fetchall()
db.close()
return results
except:
info = sys.exc_info()
print(info[0], ":", info[1])
def get_inputtype(self, qpid: str):
try:
db = pymysql.connect(self.host, self.user, self.userPW, self.db)
cursor = db.cursor()
sql = "select inputtype from questionpools where id="+qpid
cursor.execute(sql)
results = cursor.fetchall()
db.close()
return results
except:
info = sys.exc_info()
print(info[0], ":", info[1])
def get_testing(self, qpid: str):
try:
db = pymysql.connect(self.host, self.user, self.userPW, self.db)
cursor = db.cursor()
sql = "select input, output from questionpools_testings where qpid="+qpid
cursor.execute(sql)
results = cursor.fetchall()
db.close()
return results
except:
info = sys.exc_info()
print(info[0], ":", info[1])
if __name__ == '__main__':
pass
| 31.313725 | 92 | 0.518785 | 355 | 3,194 | 4.583099 | 0.180282 | 0.059004 | 0.068838 | 0.094653 | 0.731408 | 0.731408 | 0.731408 | 0.731408 | 0.731408 | 0.731408 | 0 | 0.007353 | 0.361302 | 3,194 | 101 | 93 | 31.623762 | 0.790196 | 0.006575 | 0 | 0.719101 | 0 | 0 | 0.103753 | 0.013876 | 0.011236 | 0 | 0 | 0 | 0 | 1 | 0.089888 | false | 0.044944 | 0.022472 | 0 | 0.202247 | 0.078652 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
646bbbe02f21dfe9fc62750355d439b5a86ea3f4 | 284 | py | Python | lessons/02lesson/instruction_set/main.py | tmax818/vero_python | e90d540310dec7de16f33eb0fabd99f81b3291a5 | [
"MIT"
] | null | null | null | lessons/02lesson/instruction_set/main.py | tmax818/vero_python | e90d540310dec7de16f33eb0fabd99f81b3291a5 | [
"MIT"
] | null | null | null | lessons/02lesson/instruction_set/main.py | tmax818/vero_python | e90d540310dec7de16f33eb0fabd99f81b3291a5 | [
"MIT"
] | null | null | null | """ Replace all strings below with the correct data type:"""
def return_int():
return ("replace with an int")
def return_float():
return ("replace with a float")
def return_bool():
return ("replace with a boolean")
def return_none():
return ("replace with None")
| 18.933333 | 60 | 0.672535 | 40 | 284 | 4.675 | 0.45 | 0.192513 | 0.363636 | 0.192513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.204225 | 284 | 14 | 61 | 20.285714 | 0.827434 | 0.18662 | 0 | 0 | 0 | 0 | 0.348214 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
649a6fbe14c708d233812379f7d1c5cbd2af98ae | 21 | py | Python | models/__init__.py | choumartin1234/Reading-VQA | b677fcd336fb36721b4f6ca8a148cd015530dfd3 | [
"MIT"
] | 1 | 2020-03-02T07:46:00.000Z | 2020-03-02T07:46:00.000Z | models/__init__.py | choumartin1234/Reading-VQA | b677fcd336fb36721b4f6ca8a148cd015530dfd3 | [
"MIT"
] | 2 | 2019-09-19T14:48:53.000Z | 2019-11-18T18:04:35.000Z | models/__init__.py | choumartin1234/Reading-VQA | b677fcd336fb36721b4f6ca8a148cd015530dfd3 | [
"MIT"
] | 2 | 2020-03-02T07:46:27.000Z | 2020-03-28T13:45:54.000Z | from .net import Net
| 10.5 | 20 | 0.761905 | 4 | 21 | 4 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.190476 | 21 | 1 | 21 | 21 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
64c9d9c9cd86b92ba329da4c3baaf58dfd32064d | 133 | py | Python | repositoryupdater/__main__.py | marciogranzotto/repository-updater | f703142f84659e4d478a6fd6d569788c9fb5dbbf | [
"MIT"
] | 9 | 2018-05-02T10:10:42.000Z | 2022-03-30T06:09:57.000Z | repositoryupdater/__main__.py | marciogranzotto/repository-updater | f703142f84659e4d478a6fd6d569788c9fb5dbbf | [
"MIT"
] | 11 | 2018-06-07T19:47:32.000Z | 2022-02-04T19:10:09.000Z | repositoryupdater/__main__.py | marciogranzotto/repository-updater | f703142f84659e4d478a6fd6d569788c9fb5dbbf | [
"MIT"
] | 6 | 2020-01-27T13:14:40.000Z | 2022-03-30T06:11:28.000Z | """Repository Updater bootstrap."""
from . import cli
if __name__ == "__main__":
cli.repository_updater(None, None, None, None)
| 22.166667 | 50 | 0.706767 | 16 | 133 | 5.3125 | 0.625 | 0.282353 | 0.282353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.150376 | 133 | 5 | 51 | 26.6 | 0.752212 | 0.218045 | 0 | 0 | 0 | 0 | 0.081633 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
b389c14b4cdd3273a77bbb29bfb7967abdc29346 | 155 | py | Python | web/logger/admin.py | Gertdor/time_logger_joker | dd857cda76ed60b87812de66e62d05bc762ea657 | [
"BSD-3-Clause"
] | 3 | 2017-12-13T11:27:36.000Z | 2018-01-29T13:24:21.000Z | web/logger/admin.py | Gertdor/time_logger_joker | dd857cda76ed60b87812de66e62d05bc762ea657 | [
"BSD-3-Clause"
] | 30 | 2017-09-18T14:13:26.000Z | 2018-05-22T11:44:40.000Z | web/logger/admin.py | gtcodes/time_logger_joker | dd857cda76ed60b87812de66e62d05bc762ea657 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from .models import Timelog, User, Team
admin.site.register(Timelog)
admin.site.register(User)
admin.site.register(Team) | 22.142857 | 39 | 0.806452 | 23 | 155 | 5.434783 | 0.478261 | 0.216 | 0.408 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090323 | 155 | 7 | 40 | 22.142857 | 0.886525 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.4 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
376213aeee570f956834483a455248d689c82ab2 | 30,406 | py | Python | api/resources_portal/test/views/test_material_request.py | AlexsLemonade/resources-portal | d91c6c8d6135461faccbc78ef2b0be3f9b358f21 | [
"BSD-3-Clause"
] | null | null | null | api/resources_portal/test/views/test_material_request.py | AlexsLemonade/resources-portal | d91c6c8d6135461faccbc78ef2b0be3f9b358f21 | [
"BSD-3-Clause"
] | 536 | 2019-11-13T15:49:03.000Z | 2022-03-28T20:17:24.000Z | api/resources_portal/test/views/test_material_request.py | AlexsLemonade/resources-portal | d91c6c8d6135461faccbc78ef2b0be3f9b358f21 | [
"BSD-3-Clause"
] | 1 | 2020-04-03T02:07:29.000Z | 2020-04-03T02:07:29.000Z | import datetime
from django.forms.models import model_to_dict
from django.urls import reverse
from django.utils import timezone
from rest_framework import status
from rest_framework.test import APITestCase
from faker import Faker
from resources_portal.models import (
MaterialRequest,
MaterialShareEvent,
Notification,
OrganizationUserSetting,
)
from resources_portal.test.factories import (
AddressFactory,
AttachmentFactory,
MaterialFactory,
MaterialRequestFactory,
MaterialRequestIssueFactory,
OrganizationFactory,
ShippingRequirementFactory,
UserFactory,
)
fake = Faker()
class TestMaterialRequestListTestCase(APITestCase):
"""
Tests /materials-requests list operations.
"""
def setUp(self):
self.url = reverse("material-request-list")
self.request = MaterialRequestFactory()
self.sharer = self.request.material.contact_user
self.organization = self.request.material.organization
self.request.material.save()
self.organization.members.add(self.sharer)
self.organization.assign_member_perms(self.sharer)
self.organization.assign_owner_perms(self.sharer)
self.material_request_data = model_to_dict(self.request)
self.user_without_perms = UserFactory()
def test_post_request_with_no_data_fails(self):
self.client.force_authenticate(user=self.request.requester)
response = self.client.post(self.url, {})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_request_with_valid_data_succeeds(self):
self.client.force_authenticate(user=self.request.requester)
OrganizationUserSetting.objects.get_or_create(
user=self.sharer, organization=self.request.material.organization
)
response = self.client.post(self.url, self.material_request_data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
len(
Notification.objects.filter(
notification_type="MATERIAL_REQUEST_SHARER_ASSIGNED_NEW"
)
),
1,
)
# Does not notify the assignee, because they are notified specially.
self.assertEqual(
len(Notification.objects.filter(notification_type="MATERIAL_REQUEST_SHARER_RECEIVED")),
self.organization.members.count() - 1,
)
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUEST_OPENED")), 1,
)
def test_post_request_with_address_succeeds(self):
self.client.force_authenticate(user=self.request.requester)
OrganizationUserSetting.objects.get_or_create(
user=self.sharer, organization=self.request.material.organization
)
address = AddressFactory()
self.material_request_data["address"] = address.id
response = self.client.post(self.url, self.material_request_data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
len(
Notification.objects.filter(
notification_type="MATERIAL_REQUEST_SHARER_ASSIGNED_NEW"
)
),
1,
)
# Does not notify the assignee, because they are notified specially.
self.assertEqual(
len(Notification.objects.filter(notification_type="MATERIAL_REQUEST_SHARER_RECEIVED")),
self.organization.members.count() - 1,
)
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUEST_OPENED")), 1,
)
created_request = MaterialRequest.objects.get(id=response.json()["id"])
self.assertEqual(created_request.address.id, address.id)
def test_post_request_with_invalid_data_fails(self):
self.client.force_authenticate(user=self.request.requester)
OrganizationUserSetting.objects.get_or_create(
user=self.sharer, organization=self.request.material.organization
)
self.material_request_data["payment_method"] = "I won't pay!"
response = self.client.post(self.url, self.material_request_data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_request_with_valid_data_fails_if_archived(self):
self.request.material.is_archived = True
self.request.material.save()
self.client.force_authenticate(user=self.request.requester)
OrganizationUserSetting.objects.get_or_create(
user=self.sharer, organization=self.request.material.organization
)
response = self.client.post(self.url, self.material_request_data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_request_from_unauthenticated_forbidden(self):
self.client.force_authenticate(user=None)
response = self.client.post(self.url, self.material_request_data, format="json")
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_request_from_sharer_succeeds(self):
self.client.force_authenticate(user=self.sharer)
self.request.created_at = timezone.now() - datetime.timedelta(days=8)
self.request.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 1)
self.assertIn("organization", response.json()["results"][0]["material"])
self.assertEqual("a week ago", response.json()["results"][0]["human_readable_created_at"])
def test_get_request_from_requester_succeeds(self):
self.client.force_authenticate(user=self.request.requester)
self.request.created_at = timezone.now() - datetime.timedelta(days=15)
self.request.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 1)
self.assertEqual("2 weeks ago", response.json()["results"][0]["human_readable_created_at"])
def test_get_request_filters(self):
self.client.force_authenticate(user=self.user_without_perms)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 0)
def test_get_request_from_unauthenticated_fails(self):
self.client.force_authenticate(user=None)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class TestSingleMaterialRequestTestCase(APITestCase):
def setUp(self):
self.organization = OrganizationFactory()
self.sharer = self.organization.owner
self.material = MaterialFactory(organization=self.organization, contact_user=self.sharer)
self.request = MaterialRequestFactory(material=self.material, assigned_to=self.sharer)
self.url = reverse("material-request-detail", args=[self.request.id])
self.material_request_data = model_to_dict(self.request)
self.request2 = MaterialRequestFactory()
self.organization.assign_member_perms(self.sharer)
self.organization.assign_owner_perms(self.sharer)
self.other_member = UserFactory()
self.organization.members.add(self.other_member)
self.organization.assign_member_perms(self.other_member)
self.organization.assign_owner_perms(self.other_member)
self.user_without_perms = UserFactory()
self.admin = UserFactory()
self.admin.is_staff = True
def test_get_request_from_sharer_succeeds(self):
self.client.force_authenticate(user=self.sharer)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_request_from_requester_succeeds(self):
self.client.force_authenticate(user=self.request.requester)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_request_without_perms_fails(self):
self.client.force_authenticate(user=self.user_without_perms)
url = reverse("material-request-detail", args=[self.request.id])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_get_request_from_unauthenticated_fails(self):
self.client.force_authenticate(user=None)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_put_request_fulfilled_closes_issues(self):
# Make the request IN_FULFILLMENT, add an open issue, and
# verify that has_issues reports it correctly
self.request.status = "IN_FULFILLMENT"
self.request.save()
issue = MaterialRequestIssueFactory(material_request=self.request)
self.request.refresh_from_db()
self.assertTrue(self.request.has_issues)
self.client.force_authenticate(user=self.sharer)
self.material_request_data["status"] = "FULFILLED"
response = self.client.put(self.url, self.material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.request.refresh_from_db()
self.assertFalse(self.request.has_issues)
issue.refresh_from_db()
self.assertEqual(issue.status, "CLOSED")
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUEST_FULFILLED")), 1,
)
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUEST_ISSUE_CLOSED")), 1,
)
def test_put_request_from_sharer_updates_a_material_request(self):
self.client.force_authenticate(user=self.sharer)
sharer_org = OrganizationFactory()
sharer_org.members.add(self.sharer)
sharer_org.save()
self.material_request_data["status"] = "APPROVED"
self.material_request_data["executed_mta_attachment"] = AttachmentFactory(
owned_by_org=sharer_org
).id
response = self.client.put(self.url, self.material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_request = MaterialRequest.objects.get(pk=self.request.id)
self.assertEqual(material_request.status, "APPROVED")
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUEST_APPROVED")), 1,
)
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="SHARER_MTA_ADDED")), 1,
)
def test_patch_request_from_requester_adds_attachments(self):
# Set up the material request to require both MTA and IRB
self.request.material.mta_attachment = AttachmentFactory()
self.request.material.needs_irb = True
self.request.material.save()
self.client.force_authenticate(user=self.request.requester)
irb_attachment = AttachmentFactory(
owned_by_user=self.request.requester, owned_by_org=None, attachment_type="IRB"
)
mta_attachment = AttachmentFactory(
owned_by_user=self.request.requester, owned_by_org=None, attachment_type="SIGNED_MTA"
)
material_request_data = {
"irb_attachment": irb_attachment.id,
"requester_signed_mta_attachment": mta_attachment.id,
}
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_request = MaterialRequest.objects.get(pk=self.request.id)
self.assertEqual(material_request.irb_attachment, irb_attachment)
self.assertEqual(
material_request.irb_attachment.owned_by_org, self.request.material.organization
)
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUESTER_IRB_ADDED")), 1,
)
def test_patch_request_from_requester_fails_without_MTA(self):
# Set up the material request to require both MTA and IRB
self.request.material.mta_attachment = AttachmentFactory()
self.request.material.needs_irb = True
self.request.material.save()
self.client.force_authenticate(user=self.request.requester)
irb_attachment = AttachmentFactory(
owned_by_user=self.request.requester, owned_by_org=None, attachment_type="IRB"
)
material_request_data = {"irb_attachment": irb_attachment.id}
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_patch_request_from_requester_fails_without_IRB(self):
# Set up the material request to require both MTA and IRB
self.request.material.mta_attachment = AttachmentFactory()
self.request.material.needs_irb = True
self.request.material.save()
self.client.force_authenticate(user=self.request.requester)
mta_attachment = AttachmentFactory(
owned_by_user=self.request.requester, owned_by_org=None, attachment_type="SIGNED_MTA"
)
material_request_data = {"requester_signed_mta_attachment": mta_attachment.id}
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_patch_address_triggers_info_notif(self):
self.client.force_authenticate(user=self.request.requester)
address = AddressFactory(user=self.request.requester)
material_request_data = {"address": address.id}
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_request = MaterialRequest.objects.get(pk=self.request.id)
self.assertEqual(material_request.address, address)
self.assertEqual(
len(
Notification.objects.filter(
notification_type="MATERIAL_REQUEST_SHARER_RECEIVED_INFO"
)
),
self.organization.members.count(),
)
def test_patch_request_from_requester_updates_all_docs(self):
# Set up the material request to require both MTA and IRB
self.request.material.mta_attachment = AttachmentFactory()
self.request.material.needs_irb = True
self.request.material.shipping_requirement = ShippingRequirementFactory(
organization=self.organization
)
self.request.material.save()
self.request.payment_method = None
self.request.payment_method_notes = None
self.request.save()
self.client.force_authenticate(user=self.request.requester)
mta_attachment = AttachmentFactory(
owned_by_user=self.request.requester, owned_by_org=None, attachment_type="SIGNED_MTA"
)
irb_attachment = AttachmentFactory(
owned_by_user=self.request.requester, owned_by_org=None, attachment_type="IRB"
)
material_request_data = {
"payment_method": "REIMBURSEMENT",
"payment_method_notes": "You know I'm good for it!",
"irb_attachment": irb_attachment.id,
"requester_signed_mta_attachment": mta_attachment.id,
}
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUESTER_PAYMENT_METHOD_ADDED")), 1,
)
self.assertEqual(
len(
MaterialShareEvent.objects.filter(event_type="REQUESTER_PAYMENT_METHOD_NOTES_ADDED")
),
1,
)
# Test that this is idempotent. If multiple requests happen they shouldn't be an error.
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_patch_request_from_requester_missing_payment_fails(self):
# Set up the material request to require both MTA and IRB
self.request.material.mta_attachment = AttachmentFactory()
self.request.material.needs_irb = True
self.request.material.shipping_requirement = ShippingRequirementFactory(
organization=self.organization
)
self.request.material.save()
self.request.payment_method = None
self.request.payment_method_notes = None
self.request.irb_attachment = None
self.request.requester_signed_mta_attachment = None
self.request.save()
self.client.force_authenticate(user=self.request.requester)
mta_attachment = AttachmentFactory(
owned_by_user=self.request.requester, owned_by_org=None, attachment_type="SIGNED_MTA"
)
irb_attachment = AttachmentFactory(
owned_by_user=self.request.requester, owned_by_org=None, attachment_type="IRB"
)
material_request_data = {
"irb_attachment": irb_attachment.id,
"requester_signed_mta_attachment": mta_attachment.id,
}
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# There was a bug where we were updating the request even
# though it was missing docs, so test that it doesn't get
# updated.
self.request.refresh_from_db()
self.assertIsNone(self.request.irb_attachment)
def test_patch_request_from_requester_generates_payment_events(self):
self.client.force_authenticate(user=self.request.requester)
material_request_data = {
"payment_method": "REIMBURSEMENT",
"payment_method_notes": "You know I'm good for it!",
}
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUESTER_PAYMENT_METHOD_ADDED")), 1,
)
self.assertEqual(
len(
MaterialShareEvent.objects.filter(event_type="REQUESTER_PAYMENT_METHOD_NOTES_ADDED")
),
1,
)
def test_put_request_from_requester_verifies_request(self):
# Make the request fulfilled, so it can be verified.
self.request.status = "FULFILLED"
self.request.save()
self.client.force_authenticate(user=self.request.requester)
self.material_request_data["status"] = "VERIFIED_FULFILLED"
response = self.client.put(self.url, self.material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_request = MaterialRequest.objects.get(pk=self.request.id)
self.assertEqual(material_request.status, "VERIFIED_FULFILLED")
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUEST_VERIFIED_FULFILLED")), 1,
)
def test_put_request_from_sharer_does_not_verify_request(self):
# Make the request fulfilled, so it could be verified.
self.request.status = "FULFILLED"
self.request.save()
self.client.force_authenticate(user=self.sharer)
self.material_request_data["status"] = "VERIFIED_FULFILLED"
response = self.client.put(self.url, self.material_request_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_request_from_user_who_does_not_own_attachment_fails(self):
self.client.force_authenticate(user=self.request.requester)
irb_attachment = AttachmentFactory()
self.material_request_data["irb_attachment"] = irb_attachment.id
response = self.client.put(self.url, self.material_request_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_request_updates_assigned_to(self):
self.client.force_authenticate(user=self.other_member)
self.material_request_data["assigned_to"] = self.other_member.id
response = self.client.put(self.url, self.material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
updated_request = MaterialRequest.objects.get(id=self.request.id)
self.assertEqual(updated_request.assigned_to, self.other_member)
personal_notifications = Notification.objects.filter(
notification_type="MATERIAL_REQUEST_SHARER_ASSIGNED"
)
self.assertEqual(personal_notifications[0].associated_user, self.other_member)
self.assertEqual(personal_notifications.count(), 1)
self.assertEqual(
len(
Notification.objects.filter(notification_type="MATERIAL_REQUEST_SHARER_ASSIGNMENT")
),
self.organization.members.count() - 1,
)
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUEST_REASSIGNED")), 1,
)
def test_put_request_from_requester_does_not_update_assigned_to(self):
self.client.force_authenticate(user=self.request.requester)
self.material_request_data["assigned_to"] = self.other_member.id
response = self.client.put(self.url, self.material_request_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_patch_can_reject(self):
self.client.force_authenticate(user=self.sharer)
material_request_data = {"status": "REJECTED"}
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(Notification.objects.filter(notification_type="MATERIAL_REQUEST_SHARER_REJECTED")),
self.organization.members.count(),
)
self.assertEqual(
len(
Notification.objects.filter(notification_type="MATERIAL_REQUEST_REQUESTER_REJECTED")
),
1,
)
def test_patch_can_cancel(self):
self.client.force_authenticate(user=self.request.requester)
material_request_data = {"status": "CANCELLED"}
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(Notification.objects.filter(notification_type="MATERIAL_REQUEST_SHARER_CANCELLED")),
self.organization.members.count(),
)
self.assertEqual(
len(
Notification.objects.filter(
notification_type="MATERIAL_REQUEST_REQUESTER_CANCELLED"
)
),
1,
)
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUEST_CANCELLED")), 1,
)
def test_patch_can_move_to_in_fulfillment(self):
# Remove the executed MTA so we can test the IN_FULFILLMENT notifications.
self.request.executed_mta_attachment = None
self.request.save()
self.client.force_authenticate(user=self.sharer)
material_request_data = {"status": "IN_FULFILLMENT"}
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(
Notification.objects.filter(
notification_type="MATERIAL_REQUEST_SHARER_IN_FULFILLMENT"
)
),
self.organization.members.count(),
)
self.assertEqual(
len(
Notification.objects.filter(
notification_type="MATERIAL_REQUEST_REQUESTER_IN_FULFILLMENT"
)
),
1,
)
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUEST_IN_FULFILLMENT")), 1,
)
def test_patch_requester_can_move_to_in_fulfillment(self):
"""If an MTA is not required, the requester can move it to IN_FULFILLMENT"""
self.material.mta_attachment = None
self.material.save()
# Remove the executed MTA so we can test the IN_FULFILLMENT notifications.
self.request.executed_mta_attachment = None
self.request.status = "APPROVED"
self.request.save()
self.client.force_authenticate(user=self.request.requester)
irb_attachment = AttachmentFactory(owned_by_user=self.request.requester)
material_request_data = {"status": "IN_FULFILLMENT", "irb_attachment": irb_attachment.id}
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(
Notification.objects.filter(
notification_type="MATERIAL_REQUEST_SHARER_IN_FULFILLMENT"
)
),
self.organization.members.count(),
)
self.assertEqual(
len(
Notification.objects.filter(
notification_type="MATERIAL_REQUEST_REQUESTER_IN_FULFILLMENT"
)
),
1,
)
self.assertEqual(
len(MaterialShareEvent.objects.filter(event_type="REQUEST_IN_FULFILLMENT")), 1,
)
def test_patch_requester_cannot_move_to_in_fulfillment(self):
"""If an MTA is required, the requester cannot move it to IN_FULFILLMENT"""
self.client.force_authenticate(user=self.request.requester)
irb_attachment = AttachmentFactory()
material_request_data = {"status": "IN_FULFILLMENT", "irb_attachment": irb_attachment.id}
response = self.client.patch(self.url, material_request_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_request_without_permission_forbidden(self):
self.client.force_authenticate(user=self.user_without_perms)
self.material_request_data["status"] = "APPROVED"
response = self.client.put(self.url, self.material_request_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_request_from_unauthenticated_forbidden(self):
self.client.force_authenticate(user=None)
self.material_request_data["status"] = "APPROVED"
response = self.client.put(self.url, self.material_request_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_request_deletes_a_material(self):
self.client.force_authenticate(user=self.admin)
request_id = self.request.id
response = self.client.delete(self.url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(MaterialRequest.objects.filter(id=request_id).count(), 0)
def test_delete_request_without_permission_forbidden(self):
self.client.force_authenticate(user=self.user_without_perms)
response = self.client.delete(self.url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_delete_request_from_unauthenticated_forbidden(self):
self.client.force_authenticate(user=None)
response = self.client.delete(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class TestNestedMaterialRequestListTestCase(APITestCase):
"""
Tests /materials/id/requests list operations.
"""
def setUp(self):
self.request = MaterialRequestFactory()
self.url = reverse("material-material-requests-list", args=[self.request.material.id])
self.sharer = self.request.material.contact_user
self.organization = self.request.material.organization
self.request.material.save()
# Create second material for same user to make sure nesting filters correctly.
material = MaterialFactory(organization=self.organization)
MaterialRequestFactory(material=material)
self.organization.members.add(self.sharer)
self.organization.assign_member_perms(self.sharer)
self.organization.assign_owner_perms(self.sharer)
self.material_request_data = model_to_dict(self.request)
self.user_without_perms = UserFactory()
def test_get_request_from_sharer_succeeds(self):
self.client.force_authenticate(user=self.sharer)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 1)
def test_get_request_from_sharer_filters_fields(self):
self.client.force_authenticate(user=self.sharer)
response = self.client.get(self.url + "?status=OPEN")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 1)
def test_get_request_from_sharer_filters_fields_out(self):
self.client.force_authenticate(user=self.sharer)
response = self.client.get(self.url + "?status=APPROVED")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 0)
def test_get_request_from_requester_succeeds(self):
self.client.force_authenticate(user=self.request.requester)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 1)
def test_get_request_filters(self):
self.client.force_authenticate(user=self.user_without_perms)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 0)
def test_get_request_from_unauthenticated_fails(self):
self.client.force_authenticate(user=None)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
| 39.642764 | 100 | 0.695422 | 3,427 | 30,406 | 5.902247 | 0.07616 | 0.057646 | 0.050724 | 0.064518 | 0.854106 | 0.825777 | 0.801849 | 0.788995 | 0.776339 | 0.73481 | 0 | 0.007485 | 0.21351 | 30,406 | 766 | 101 | 39.694517 | 0.838337 | 0.042031 | 0 | 0.633157 | 0 | 0 | 0.067251 | 0.035432 | 0 | 0 | 0 | 0 | 0.176367 | 1 | 0.082892 | false | 0 | 0.015873 | 0 | 0.104056 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
377c6f838c0eb6a1fc30cad5ff87b389120d7369 | 29,713 | py | Python | chunair/kicad-footprint-generator-master/scripts/Crystals_Resonators_THT/make_crystal.py | speedypotato/chuni-lite | c8dda8428723f8c4f99075e7cbaa22a44cbc187d | [
"CC-BY-4.0"
] | 2 | 2022-03-18T23:42:51.000Z | 2022-03-19T15:31:34.000Z | chunair/kicad-footprint-generator-master/scripts/Crystals_Resonators_THT/make_crystal.py | speedypotato/chuni-lite | c8dda8428723f8c4f99075e7cbaa22a44cbc187d | [
"CC-BY-4.0"
] | null | null | null | chunair/kicad-footprint-generator-master/scripts/Crystals_Resonators_THT/make_crystal.py | speedypotato/chuni-lite | c8dda8428723f8c4f99075e7cbaa22a44cbc187d | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
import sys
import os
import math
# ensure that the kicad-footprint-generator directory is available
#sys.path.append(os.environ.get('KIFOOTPRINTGENERATOR')) # enable package import from parent directory
#sys.path.append("D:\hardware\KiCAD\kicad-footprint-generator") # enable package import from parent directory
sys.path.append(os.path.join(sys.path[0],"..","..","kicad_mod")) # load kicad_mod path
sys.path.append(os.path.join(sys.path[0],"..","..")) # load kicad_mod path
sys.path.append(os.path.join(sys.path[0],"..","tools")) # load kicad_mod path
from KicadModTree import * # NOQA
from drawing_tools import *
from footprint_scripts_crystals import *
if __name__ == '__main__':
standardtags="THT crystal"
standardtagsres="THT ceramic resonator filter"
script3dhc49="crystal_hc49_2pin.py"
with open(script3dhc49, "w") as myfile:
myfile.write("#\n# SCRIPT to generate 3D models\n#\n\n")
script3dhc493p="crystal_hc49_3pin.py"
with open(script3dhc493p, "w") as myfile:
myfile.write("#\n# SCRIPT to generate 3D models\n#\n\n")
script3dres3="resonator_3pin.py"
with open(script3dres3, "w") as myfile:
myfile.write("#\n# SCRIPT to generate 3D models\n#\n\n")
script3dres2="resonator_2pin.py"
with open(script3dres2, "w") as myfile:
myfile.write("#\n# SCRIPT to generate 3D models\n#\n\n")
script3dhc49h="crystal_hc49_2pin_hor.py"
with open(script3dhc49h, "w") as myfile:
myfile.write("#\n# SCRIPT to generate 3D models\n#\n\n")
# common settings
makeCrystalAll(footprint_name="Crystal_AT310_d3.0mm_l10.0mm_Horizontal",
rm=2.54, pad_size=1, ddrill=0.5, pack_width=10.5, pack_height=3, pack_rm=1.2, pack_offset=3,
package_pad=True, package_pad_offset=3.5, package_pad_size=[10.5,3.2],
package_pad_add_holes=True, package_pad_drill_size=[1.2, 1.2], package_pad_ddrill=0.8,
style="flat", description="Crystal THT AT310 10.0mm-10.5mm length 3.0mm diameter", lib_name="Crystals", tags=["AT310"],
offset3d=[1.27/25.4, 0, 0], scale3d=[1, 1, 1], rotate3d=[0, 0, 0])
makeCrystalRoundVert(footprint_name="Crystal_AT310_d3.0mm_l10.0mm_Vertical",
rm=2.54, pad_size=1, ddrill=0.5, pack_diameter=3,
description="Crystal THT AT310 10.0mm-10.5mm length 3.0mm diameter", lib_name="Crystals", tags=["AT310"],
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalAll(footprint_name="Crystal_C26-LF_d2.1mm_l6.5mm_Horizontal",
rm=1.9, pad_size=1, ddrill=0.5, pack_width=6.5, pack_height=2.06, pack_rm=0.7, pack_offset=2,
package_pad=True, package_pad_offset=2.5, package_pad_size=[6.5,2.2],
package_pad_add_holes=True, package_pad_drill_size=[1.2, 1.2], package_pad_ddrill=0.8,
style="flat", description="Crystal THT C26-LF 6.5mm length 2.06mm diameter", tags=["C26-LF"], lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalRoundVert(footprint_name="Crystal_C26-LF_d2.1mm_l6.5mm_Vertical",
rm=1.9, pad_size=1, ddrill=0.5, pack_diameter=2.06,
description="Crystal THT C26-LF 6.5mm length 2.06mm diameter", tags=["C26-LF"], lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalAll(footprint_name="Crystal_C38-LF_d3.0mm_l8.0mm_Horizontal",
rm=1.9, pad_size=1, ddrill=0.5, pack_width=8, pack_height=3, pack_rm=1.09, pack_offset=2.5,
package_pad=True, package_pad_offset=3, package_pad_size=[8,3],
package_pad_add_holes=True, package_pad_drill_size=[1.2, 1.2], package_pad_ddrill=0.8,
style="flat", description="Crystal THT C38-LF 8.0mm length 3.0mm diameter", tags=["C38-LF"],
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalRoundVert(footprint_name="Crystal_C38-LF_d3.0mm_l8.0mm_Vertical",
rm=1.9, pad_size=1, ddrill=0.5, pack_diameter=3,
description="Crystal THT C38-LF 8.0mm length 3.0mm diameter", tags=["C38-LF"],
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalRoundVert(footprint_name="Crystal_Round_d3.0mm_Vertical",
rm=1.9, pad_size=1, ddrill=0.5, pack_diameter=3,
description="Crystal THT C38-LF 8.0mm length 3.0mm diameter", tags=["C38-LF"],
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalAll(footprint_name="Crystal_DS26_d2.0mm_l6.0mm_Horizontal",
rm=1.9, pad_size=1, ddrill=0.5, pack_width=6, pack_height=2, pack_rm=0.7, pack_offset=2,
package_pad=True, package_pad_offset=2.5, package_pad_size=[6,2.5],
package_pad_add_holes=True, package_pad_drill_size=[1, 1], package_pad_ddrill=0.5,
style="flat", description="Crystal THT DS26 6.0mm length 2.0mm diameter http://www.microcrystal.com/images/_Product-Documentation/03_TF_metal_Packages/01_Datasheet/DS-Series.pdf",
tags=["DS26"],lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalRoundVert(footprint_name="Crystal_DS26_d2.0mm_l6.0mm_Vertical",
rm=1.9, pad_size=1, ddrill=0.5, pack_diameter=2,
description="Crystal THT DS26 6.0mm length 2.0mm diameter http://www.microcrystal.com/images/_Product-Documentation/03_TF_metal_Packages/01_Datasheet/DS-Series.pdf",
tags=["DS26"],lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalRoundVert(footprint_name="Crystal_Round_d2.0mm_Vertical",
rm=1.9, pad_size=1, ddrill=0.5, pack_diameter=2,
description="Crystal THT DS26 6.0mm length 2.0mm diameter http://www.microcrystal.com/images/_Product-Documentation/03_TF_metal_Packages/01_Datasheet/DS-Series.pdf",
tags=["DS26"],lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalAll(footprint_name="Crystal_DS15_d1.5mm_l5.0mm_Horizontal",
rm=1.7, pad_size=1, ddrill=0.5, pack_width=5, pack_height=1.5, pack_rm=0.5, pack_offset=1.5,
package_pad=True, package_pad_offset=2, package_pad_size=[5,2],
package_pad_add_holes=True, package_pad_drill_size=[1, 1], package_pad_ddrill=0.5,
style="flat",
description="Crystal THT DS15 5.0mm length 1.5mm diameter http://www.microcrystal.com/images/_Product-Documentation/03_TF_metal_Packages/01_Datasheet/DS-Series.pdf",
tags=["DS15"], lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalRoundVert(footprint_name="Crystal_DS15_d1.5mm_l5.0mm_Vertical",
rm=1.7, pad_size=1, ddrill=0.5, pack_diameter=1.5,
description="Crystal THT DS15 5.0mm length 1.5mm diameter http://www.microcrystal.com/images/_Product-Documentation/03_TF_metal_Packages/01_Datasheet/DS-Series.pdf",
tags=["DS15"], lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalRoundVert(footprint_name="Crystal_Round_d1.5mm_Vertical",
rm=1.7, pad_size=1, ddrill=0.5, pack_diameter=1.5,
description="Crystal THT DS15 5.0mm length 1.5mm diameter http://www.microcrystal.com/images/_Product-Documentation/03_TF_metal_Packages/01_Datasheet/DS-Series.pdf",
tags=["DS15"], lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalAll(footprint_name="Crystal_DS10_d1.0mm_l4.3mm_Horizontal",
rm=1.5, pad_size=1, ddrill=0.5, pack_width=4.3, pack_height=1, pack_rm=0.3, pack_offset=1.5,
package_pad=True, package_pad_offset=2, package_pad_size=[4.3, 1.5],
package_pad_add_holes=True, package_pad_drill_size=[1, 1], package_pad_ddrill=0.5,
style="flat",
description="Crystal THT DS10 4.3mm length 1.0mm diameter http://www.microcrystal.com/images/_Product-Documentation/03_TF_metal_Packages/01_Datasheet/DS-Series.pdf",
tags=["DS10"], lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalRoundVert(footprint_name="Crystal_DS10_d1.0mm_l4.3mm_Vertical",
rm=1.5, pad_size=1, ddrill=0.5, pack_diameter=1,
description="Crystal THT DS10 4.3mm length 1.0mm diameter http://www.microcrystal.com/images/_Product-Documentation/03_TF_metal_Packages/01_Datasheet/DS-Series.pdf",
tags=["DS10"], lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalRoundVert(footprint_name="Crystal_Round_d1.0mm_Vertical",
rm=1.5, pad_size=1, ddrill=0.5, pack_diameter=1,
description="Crystal THT DS10 1.0mm diameter http://www.microcrystal.com/images/_Product-Documentation/03_TF_metal_Packages/01_Datasheet/DS-Series.pdf",
tags=["DS10"], lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54,1/2.54,1/2.54], rotate3d=[0, 0, 0])
makeCrystalAll(footprint_name="Crystal_HC49-U_Horizontal",
rm=4.9, pad_size=1.5, ddrill=0.8, pack_width=13.0, pack_height=10.9, pack_rm=4.9, pack_offset=2,
package_pad=True, package_pad_offset=2.5, package_pad_size=[13.5, 11],
package_pad_add_holes=True, package_pad_drill_size=[1.2, 1.2], package_pad_ddrill=0.8,
style="hc49",
description="Crystal THT HC-49/U http://5hertz.com/pdfs/04404_D.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54, 1/2.54, 1/2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49h, height3d=4.65, iheight3d=4)
makeCrystalHC49Vert(footprint_name = "Crystal_HC49-U_Vertical", pins=2,
rm=4.88, pad_size=1.5, ddrill=0.8, pack_width=10.9, pack_height=4.65,
innerpack_width=10, innerpack_height=4,
tags=standardtags+"HC-49/U", description="Crystal THT HC-49/U http://5hertz.com/pdfs/04404_D.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54, 1/2.54, 1/2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49, height3d=13)
makeCrystalHC49Vert(footprint_name="Crystal_HC49-U-3pin_Vertical", pins=3,
rm=4.88, pad_size=1.5, ddrill=0.8, pack_width=10.9, pack_height=4.65,
innerpack_width=10, innerpack_height=4,
tags=standardtags+"HC-49/U", description="Crystal THT HC-49/U, 3pin-version, http://www.raltron.com/products/pdfspecs/crystal_hc_49_45_51.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc493p, height3d=13)
makeCrystalHC49Vert(footprint_name = "Crystal_HC49-4H_Vertical", pins=2,
rm=4.88, pad_size=1.5, ddrill=0.8, pack_width=11.05, pack_height=4.65,
innerpack_width=10, innerpack_height=4,
tags=standardtags+"HC-49-4H", description="Crystal THT HC-49-4H http://5hertz.com/pdfs/04404_D.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54, 1/2.54, 1/2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49, height3d=4)
makeCrystalAll(footprint_name="Crystal_HC18-U_Horizontal",
rm=4.9, pad_size=1.5, ddrill=0.8, pack_width=13.0, pack_height=10.9, pack_rm=4.9, pack_offset=2,
package_pad=True, package_pad_offset=2.5, package_pad_size=[13.5, 11],
package_pad_add_holes=True, package_pad_drill_size=[1.2, 1.2], package_pad_ddrill=0.8,
style="hc49",
description="Crystal THT HC-18/U http://5hertz.com/pdfs/04404_D.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49h, height3d=4.65, iheight3d=4)
makeCrystalHC49Vert(footprint_name="Crystal_HC18-U_Vertical", pins=2,
rm=4.9, pad_size=1.5, ddrill=0.8, pack_width=10.9, pack_height=4.65,
innerpack_width=10, innerpack_height=4,
tags=standardtags+"HC-18/U",
description="Crystal THT HC-18/U, http://5hertz.com/pdfs/04404_D.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49, height3d=13)
makeCrystalAll(footprint_name="Crystal_HC33-U_Horizontal",
rm=12.34, pad_size=2.7, ddrill=1.7, pack_width=19.7, pack_height=19.23, pack_rm=12.34, pack_offset=2.5,
package_pad=True, package_pad_offset=2.5, package_pad_size=[20.5, 20],
package_pad_add_holes=True, package_pad_drill_size=[1.2, 1.2], package_pad_ddrill=0.8,
style="hc49",
description="Crystal THT HC-33/U http://pdi.bentech-taiwan.com/PDI/GEN20SPEV20HC3320U.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49h, height3d=8.94, iheight3d=8.05)
makeCrystalHC49Vert(footprint_name="Crystal_HC33-U_Vertical", pins=2,
rm=12.34, pad_size=2.7, ddrill=1.7, pack_width=19.23, pack_height=8.94,
innerpack_width=18.42, innerpack_height=8.05,
tags=standardtags+"HC-33/U",
description="Crystal THT HC-33/U, http://pdi.bentech-taiwan.com/PDI/GEN20SPEV20HC3320U.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49, height3d=19.7)
makeCrystalAll(footprint_name="Crystal_HC50_Horizontal",
rm=4.9, pad_size=2.3, ddrill=1.5, pack_width=13.36, pack_height=11.05, pack_rm=4.9, pack_offset=2.5,
package_pad=True, package_pad_offset=2.5, package_pad_size=[14, 11.5],
package_pad_add_holes=True, package_pad_drill_size=[1.2, 1.2], package_pad_ddrill=0.8,
style="hc49",
description="Crystal THT HC-50 http://www.crovencrystals.com/croven_pdf/HC-50_Crystal_Holder_Rev_00.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49h, height3d=4.65, iheight3d=3.8)
makeCrystalHC49Vert(footprint_name="Crystal_HC50_Vertical", pins=2,
rm=4.9, pad_size=2.3, ddrill=1.5, pack_width=11.05, pack_height=4.65,
innerpack_width=10.2, innerpack_height=3.8,
tags=standardtags+"HC-50",
description="Crystal THT HC-50, http://www.crovencrystals.com/croven_pdf/HC-50_Crystal_Holder_Rev_00.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49, height3d=13.36)
makeCrystalAll(footprint_name="Crystal_HC51_Horizontal",
rm=12.35, pad_size=2.3, ddrill=1.2, pack_width=19.7, pack_height=19.3, pack_rm=12.35, pack_offset=2.5,
package_pad=True, package_pad_offset=2.5, package_pad_size=[20.5, 20],
package_pad_add_holes=True, package_pad_drill_size=[1.2, 1.2], package_pad_ddrill=0.8,
style="hc49",
description="Crystal THT HC-51 http://www.crovencrystals.com/croven_pdf/HC-51_Crystal_Holder_Rev_00.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49h, height3d=8.9, iheight3d=7.6)
makeCrystalHC49Vert(footprint_name="Crystal_HC51-U_Vertical", pins=2,
rm=12.35, pad_size=2.3, ddrill=1.2, pack_width=19.3, pack_height=8.9,
innerpack_width=18, innerpack_height=7.6,
tags=standardtags+"HC-51/U",
description="Crystal THT HC-51/U, http://www.crovencrystals.com/croven_pdf/HC-51_Crystal_Holder_Rev_00.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49, height3d=19.7)
makeCrystalAll(footprint_name="Crystal_HC52-U_Horizontal",
rm=3.8, pad_size=1.5, ddrill=0.8, pack_width=8.8, pack_height=8, pack_rm=3.8, pack_offset=1.5,
package_pad=True, package_pad_offset=1.5, package_pad_size=[9.5, 8.5],
package_pad_add_holes=True, package_pad_drill_size=[1.2, 1.2], package_pad_ddrill=0.8,
style="hc49",
description="Crystal THT HC-51/U http://www.kvg-gmbh.de/assets/uploads/files/product_pdfs/XS71xx.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49h, height3d=3.3, iheight3d=2.3)
makeCrystalHC49Vert(footprint_name="Crystal_HC52-U_Vertical", pins=2,
rm=3.8, pad_size=1.5, ddrill=0.8, pack_width=8, pack_height=3.3,
innerpack_width=7, innerpack_height=2.3,
tags=standardtags+"HC-52/U",
description="Crystal THT HC-52/U, http://www.kvg-gmbh.de/assets/uploads/files/product_pdfs/XS71xx.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49, height3d=8.8)
makeCrystalHC49Vert(footprint_name="Crystal_HC52-U-3pin_Vertical", pins=3,
rm=3.8, pad_size=1.5, ddrill=0.8, pack_width=8, pack_height=3.3,
innerpack_width=7, innerpack_height=2.3,
tags=standardtags+"HC-52/U",
description="Crystal THT HC-52/U, http://www.kvg-gmbh.de/assets/uploads/files/product_pdfs/XS71xx.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc493p, height3d=8.8)
makeCrystalAll(footprint_name="Crystal_HC52-8mm_Horizontal",
rm=3.8, pad_size=1.5, ddrill=0.8, pack_width=8, pack_height=8, pack_rm=3.8, pack_offset=1.5,
package_pad=True, package_pad_offset=1.5, package_pad_size=[8.5, 8.5],
package_pad_add_holes=True, package_pad_drill_size=[1.2, 1.2], package_pad_ddrill=0.8,
style="hc49",
description="Crystal THT HC-51/8mm http://www.kvg-gmbh.de/assets/uploads/files/product_pdfs/XS71xx.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49h, height3d=3.3, iheight3d=2.3)
makeCrystalHC49Vert(footprint_name="Crystal_HC52-8mm_Vertical", pins=2,
rm=3.8, pad_size=1.5, ddrill=0.8, pack_width=8, pack_height=3.3,
innerpack_width=7, innerpack_height=2.3,
tags=standardtags+"HC-49/U",
description="Crystal THT HC-52/8mm, http://www.kvg-gmbh.de/assets/uploads/files/product_pdfs/XS71xx.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49, height3d=8)
makeCrystalAll(footprint_name="Crystal_HC52-6mm_Horizontal",
rm=3.8, pad_size=1.5, ddrill=0.8, pack_width=6, pack_height=8, pack_rm=3.8, pack_offset=1.5,
package_pad=True, package_pad_offset=1.5, package_pad_size=[6.5, 8.5],
package_pad_add_holes=True, package_pad_drill_size=[1.2, 1.2], package_pad_ddrill=0.8,
style="hc49",
description="Crystal THT HC-51/6mm http://www.kvg-gmbh.de/assets/uploads/files/product_pdfs/XS71xx.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49h, height3d=3.3, iheight3d=2.3)
makeCrystalHC49Vert(footprint_name="Crystal_HC52-6mm_Vertical", pins=2,
rm=3.8, pad_size=1.5, ddrill=0.8, pack_width=8, pack_height=3.3,
innerpack_width=7, innerpack_height=2.3,
tags=standardtags+"HC-49/U",
description="Crystal THT HC-52/6mm, http://www.kvg-gmbh.de/assets/uploads/files/product_pdfs/XS71xx.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dhc49, height3d=6)
makeCrystalHC49Vert(footprint_name="Resonator_Murata_DSN6", pins=3, addSizeFootprintName=True,
rm=5, pad_size=1.7, ddrill=1, pack_width=7, pack_height=2.54,
innerpack_width=7, innerpack_height=2.54,
tags=standardtagsres+" DSN6",
description="Ceramic Resomator/Filter Murata DSN6, http://cdn-reichelt.de/documents/datenblatt/B400/DSN6NC51H.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dres3, height3d=8)
makeCrystalHC49Vert(footprint_name="Resonator_Murata_DSS6", pins=3, addSizeFootprintName=True,
rm=5, pad_size=1.7, ddrill=1, pack_width=7, pack_height=2.54,
innerpack_width=7, innerpack_height=2.54,
tags=standardtagsres + " DSS6",
description="Ceramic Resomator/Filter Murata DSS6, http://cdn-reichelt.de/documents/datenblatt/B400/DSN6NC51H.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dres3, height3d=7)
makeCrystalHC49Vert(footprint_name="Resonator_Murata_CSTLSxxxG", pins=3, addSizeFootprintName=True,
rm=5, pad_size=1.7, ddrill=1, pack_width=8, pack_height=3,
innerpack_width=8, innerpack_height=3,
tags=standardtagsres + " CSTLSxxxG",
description="Ceramic Resomator/Filter Murata CSTLSxxxG, http://www.murata.com/~/media/webrenewal/support/library/catalog/products/timingdevice/ceralock/p17e.ashx",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dres3, height3d=5.5)
makeCrystalHC49Vert(footprint_name="Resonator_Murata_CSTLSxxxX", pins=3, addSizeFootprintName=True,
rm=5, pad_size=1.7, ddrill=1, pack_width=5.5, pack_height=3,
innerpack_width=5.5, innerpack_height=3,
tags=standardtagsres + " CSTLSxxxX",
description="Ceramic Resomator/Filter Murata CSTLSxxxX, http://www.murata.com/~/media/webrenewal/support/library/catalog/products/timingdevice/ceralock/p17e.ashx",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dres3, height3d=5.5)
makeCrystalHC49Vert(footprint_name="Resonator", pins=2, addSizeFootprintName=True,
rm=5, pad_size=1.5, ddrill=0.8, pack_width=10, pack_height=5,
innerpack_width=10, innerpack_height=5,
tags=standardtagsres + "",
description="Ceramic Resomator/Filter 10.0x5.0 RedFrequency MG/MT/MX series, http://www.red-frequency.com/download/datenblatt/redfrequency-datenblatt-ir-zta.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1 / 2.54, 1 / 2.54, 1 / 2.54], rotate3d=[0, 0, 0],
script3d=script3dres2, height3d=10)
makeCrystalHC49Vert(footprint_name="Resonator", pins=3, addSizeFootprintName=True,
rm=5, pad_size=1.5, ddrill=0.8, pack_width=10, pack_height=5,
innerpack_width=10, innerpack_height=5,
tags=standardtagsres + "",
description="Ceramic Resomator/Filter 10.0x5.0mm^2 RedFrequency MG/MT/MX series, http://www.red-frequency.com/download/datenblatt/redfrequency-datenblatt-ir-zta.pdf",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54, 1/2.54, 1/2.54], rotate3d=[0, 0, 0],
script3d=script3dres3, height3d=10)
makeCrystalHC49Vert(footprint_name="Resonator", pins=3, addSizeFootprintName=True,
rm=5, pad_size=1.7, ddrill=1, pack_width=7, pack_height=2.5,
innerpack_width=7, innerpack_height=2.5,
tags=standardtagsres + "",
description="Ceramic Resomator/Filter 7.0x2.5mm^2",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54, 1/2.54, 1/2.54], rotate3d=[0, 0, 0],
script3d=script3dres3, height3d=5.5)
makeCrystalHC49Vert(footprint_name="Resonator", pins=2, addSizeFootprintName=True,
rm=5, pad_size=1.7, ddrill=1, pack_width=7, pack_height=2.5,
innerpack_width=7, innerpack_height=2.5,
tags=standardtagsres + "",
description="Ceramic Resomator/Filter 7.0x2.5mm^2",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54, 1/2.54, 1/2.54], rotate3d=[0, 0, 0],
script3d=script3dres2, height3d=5.5)
makeCrystalHC49Vert(footprint_name="Resonator", pins=3, addSizeFootprintName=True,
rm=5, pad_size=1.7, ddrill=1, pack_width=8, pack_height=3.5,
innerpack_width=8, innerpack_height=3.5,
tags=standardtagsres + "",
description="Ceramic Resomator/Filter 8.0x3.5mm^2",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54, 1/2.54, 1/2.54], rotate3d=[0, 0, 0],
script3d=script3dres3, height3d=6.5)
makeCrystalHC49Vert(footprint_name="Resonator", pins=2, addSizeFootprintName=True,
rm=5, pad_size=1.7, ddrill=1, pack_width=8, pack_height=3.5,
innerpack_width=8, innerpack_height=3.5,
tags=standardtagsres + "",
description="Ceramic Resomator/Filter 8.0x3.5mm^2",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54, 1/2.54, 1/2.54], rotate3d=[0, 0, 0],
script3d=script3dres2, height3d=6.5)
makeCrystalHC49Vert(footprint_name="Resonator", pins=3, addSizeFootprintName=True,
rm=5, pad_size=1.7, ddrill=1, pack_width=6, pack_height=3.0,
innerpack_width=6, innerpack_height=3.0,
tags=standardtagsres + "",
description="Ceramic Resomator/Filter 6.0x3.0mm^2",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54, 1/2.54, 1/2.54], rotate3d=[0, 0, 0],
script3d=script3dres3, height3d=6.5)
makeCrystalHC49Vert(footprint_name="Resonator", pins=2, addSizeFootprintName=True,
rm=5, pad_size=1.7, ddrill=1, pack_width=6, pack_height=3.0,
innerpack_width=6, innerpack_height=3.0,
tags=standardtagsres + "",
description="Ceramic Resomator/Filter 6.0x3.0mm^2",
lib_name="Crystals",
offset3d=[0, 0, 0], scale3d=[1/2.54, 1/2.54, 1/2.54], rotate3d=[0, 0, 0],
script3d=script3dres2, height3d=6.5)
| 77.377604 | 195 | 0.589977 | 4,161 | 29,713 | 4.046623 | 0.05888 | 0.022212 | 0.032783 | 0.027319 | 0.917449 | 0.884191 | 0.868096 | 0.85895 | 0.844162 | 0.833828 | 0 | 0.119006 | 0.270084 | 29,713 | 383 | 196 | 77.579634 | 0.657368 | 0.012621 | 0 | 0.591892 | 0 | 0.072973 | 0.217342 | 0.039008 | 0 | 0 | 0.000818 | 0 | 0 | 1 | 0 | false | 0 | 0.016216 | 0 | 0.016216 | 0.12973 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
378e1b5e6c4f46e86884972a9b4237cc6a84cbb7 | 280 | py | Python | src/wai/annotations/format/void/specifier/__init__.py | waikato-ufdl/wai-annotations-core | bac3429e9488efb456972c74f9d462f951c4af3d | [
"Apache-2.0"
] | null | null | null | src/wai/annotations/format/void/specifier/__init__.py | waikato-ufdl/wai-annotations-core | bac3429e9488efb456972c74f9d462f951c4af3d | [
"Apache-2.0"
] | 3 | 2021-06-30T23:42:47.000Z | 2022-03-01T03:45:07.000Z | src/wai/annotations/format/void/specifier/__init__.py | waikato-ufdl/wai-annotations-core | bac3429e9488efb456972c74f9d462f951c4af3d | [
"Apache-2.0"
] | null | null | null | from ._VoidICOutputFormatSpecifier import VoidICOutputFormatSpecifier
from ._VoidISOutputFormatSpecifier import VoidISOutputFormatSpecifier
from ._VoidODOutputFormatSpecifier import VoidODOutputFormatSpecifier
from ._VoidSPOutputFormatSpecifier import VoidSPOutputFormatSpecifier
| 56 | 69 | 0.928571 | 16 | 280 | 16 | 0.375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057143 | 280 | 4 | 70 | 70 | 0.969697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
37c8ef130b1ae28b98b2d89f0e18076b1a6b2be4 | 34 | py | Python | fill_discontiguous/__init__.py | kwrooijen/krita-fill-contiguous | f65a5f4002fced4f2894775d87ffa8d4035f41d3 | [
"MIT"
] | 1 | 2020-05-28T11:38:49.000Z | 2020-05-28T11:38:49.000Z | fill_discontiguous/__init__.py | kwrooijen/krita-fill-contiguous | f65a5f4002fced4f2894775d87ffa8d4035f41d3 | [
"MIT"
] | null | null | null | fill_discontiguous/__init__.py | kwrooijen/krita-fill-contiguous | f65a5f4002fced4f2894775d87ffa8d4035f41d3 | [
"MIT"
] | null | null | null | from .fill_discontiguous import *
| 17 | 33 | 0.823529 | 4 | 34 | 6.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 34 | 1 | 34 | 34 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
03eaf88127009e798c779cda706d0fd4686a68d0 | 41 | py | Python | adform/__init__.py | dutkiewicz/adform-api | 5b670ea971c261565d1fe4cf7c18b2e109f8449d | [
"MIT"
] | null | null | null | adform/__init__.py | dutkiewicz/adform-api | 5b670ea971c261565d1fe4cf7c18b2e109f8449d | [
"MIT"
] | 6 | 2019-11-29T04:53:15.000Z | 2020-06-29T04:41:24.000Z | adform/__init__.py | dutkiewicz/adform-api | 5b670ea971c261565d1fe4cf7c18b2e109f8449d | [
"MIT"
] | null | null | null | from . import auth, lineitems, reporting
| 20.5 | 40 | 0.780488 | 5 | 41 | 6.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.146341 | 41 | 1 | 41 | 41 | 0.914286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
206452f3aab9dd4cba48cc83a1692d173ca092a1 | 142 | py | Python | pywi/processing/filtering/__init__.py | jeremiedecock/mrif | 094b0dd81ff2be0e24bf3871caab48da1b5d138b | [
"MIT"
] | 1 | 2021-07-06T06:02:45.000Z | 2021-07-06T06:02:45.000Z | pywi/processing/filtering/__init__.py | jeremiedecock/mrif | 094b0dd81ff2be0e24bf3871caab48da1b5d138b | [
"MIT"
] | null | null | null | pywi/processing/filtering/__init__.py | jeremiedecock/mrif | 094b0dd81ff2be0e24bf3871caab48da1b5d138b | [
"MIT"
] | 1 | 2019-01-07T10:50:38.000Z | 2019-01-07T10:50:38.000Z | """Filtering algorithms
This module contains various image filtering algorithms.
"""
from . import hard_filter
from . import pixel_clusters
| 17.75 | 56 | 0.795775 | 17 | 142 | 6.529412 | 0.764706 | 0.342342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.140845 | 142 | 7 | 57 | 20.285714 | 0.909836 | 0.549296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
20882971ace3da54f2dc76302475ee3d3b627dfd | 914 | py | Python | tests/test_configuration.py | natinwave/paymentech | 01694844c6600e396aab3b4afe692d3220073319 | [
"Unlicense"
] | 1 | 2021-03-09T00:55:46.000Z | 2021-03-09T00:55:46.000Z | tests/test_configuration.py | natinwave/paymentech | 01694844c6600e396aab3b4afe692d3220073319 | [
"Unlicense"
] | null | null | null | tests/test_configuration.py | natinwave/paymentech | 01694844c6600e396aab3b4afe692d3220073319 | [
"Unlicense"
] | 1 | 2020-09-15T21:34:23.000Z | 2020-09-15T21:34:23.000Z | import paymentech
def test_configuration():
paymentech.configure("user", "pass", "1", "pns")
assert paymentech.configuration.get("merchant_id") == "1"
assert paymentech.configuration.get("bin") == "000002"
assert paymentech.configuration.get("username") == "user"
assert paymentech.configuration.get("password") == "pass"
def test_platforms():
paymentech.configure("user", "pass", "1", "stratus")
assert paymentech.configuration.get("bin") == "000001"
paymentech.configure("user", "pass", "1", "pns")
assert paymentech.configuration.get("bin") == "000002"
paymentech.configure("user", "pass", "1", "garbage")
assert paymentech.configuration.get("bin") == "000002"
def test_configuration_with_additional_options():
paymentech.configure("user", "pass", "1", "stratus", something="extra")
assert paymentech.configuration.get("something", None) == "extra"
| 32.642857 | 75 | 0.684902 | 96 | 914 | 6.447917 | 0.291667 | 0.206785 | 0.374798 | 0.41357 | 0.565428 | 0.463651 | 0.203554 | 0.203554 | 0.203554 | 0.203554 | 0 | 0.037975 | 0.135667 | 914 | 27 | 76 | 33.851852 | 0.74557 | 0 | 0 | 0.294118 | 0 | 0 | 0.178337 | 0 | 0 | 0 | 0 | 0 | 0.470588 | 1 | 0.176471 | true | 0.352941 | 0.058824 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
20c7389074eb8916297b8dbb53eed8c7ac340505 | 128 | py | Python | src/util/lossComponents/__init__.py | JasonYuJjyu/unsupFlownet | 3f652d7aaa0ce99efb767114ec119a3c23e3051b | [
"MIT"
] | 24 | 2018-09-25T14:39:45.000Z | 2022-03-29T10:44:50.000Z | src/util/lossComponents/__init__.py | y788zhan/joint_seg_flow | 4c0082e9c1f625d6f6f1e1195a2c3d9dbf82bec0 | [
"MIT"
] | 6 | 2018-09-28T18:03:26.000Z | 2019-03-25T01:43:29.000Z | src/util/lossComponents/__init__.py | y788zhan/joint_seg_flow | 4c0082e9c1f625d6f6f1e1195a2c3d9dbf82bec0 | [
"MIT"
] | 7 | 2018-10-19T03:19:55.000Z | 2022-02-22T02:42:28.000Z | from photoLoss import *
from gradLoss import *
from smoothLoss import *
from smoothLoss2nd import *
from unsupFlowLoss import *
| 21.333333 | 27 | 0.804688 | 15 | 128 | 6.866667 | 0.466667 | 0.38835 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009259 | 0.15625 | 128 | 5 | 28 | 25.6 | 0.944444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
45add80aef6e5dca5a557adcdaf44b996ec2a4e7 | 37 | py | Python | app/api/__init__.py | rickywang432/flask | c956dee6c7dfbb57a5fcd247d23af37e20b96da7 | [
"MIT"
] | null | null | null | app/api/__init__.py | rickywang432/flask | c956dee6c7dfbb57a5fcd247d23af37e20b96da7 | [
"MIT"
] | 1 | 2021-06-02T02:01:38.000Z | 2021-06-02T02:01:38.000Z | app/api/__init__.py | rickywang432/flask | c956dee6c7dfbb57a5fcd247d23af37e20b96da7 | [
"MIT"
] | null | null | null | from app.api.views import api # noqa | 37 | 37 | 0.756757 | 7 | 37 | 4 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.162162 | 37 | 1 | 37 | 37 | 0.903226 | 0.108108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
45ca19f392e4106d4710b60bbe98712aa1919a13 | 56 | py | Python | heppy/modules/price10.py | hiqdev/epypy | 364fdcc0bef96e079e10b8bae7173e14da28da37 | [
"BSD-3-Clause"
] | 20 | 2016-06-02T20:29:29.000Z | 2022-01-31T07:47:02.000Z | heppy/modules/price10.py | hiqdev/epypy | 364fdcc0bef96e079e10b8bae7173e14da28da37 | [
"BSD-3-Clause"
] | 1 | 2018-10-09T16:09:24.000Z | 2018-10-10T08:17:42.000Z | heppy/modules/price10.py | hiqdev/epypy | 364fdcc0bef96e079e10b8bae7173e14da28da37 | [
"BSD-3-Clause"
] | 7 | 2018-04-11T16:05:06.000Z | 2020-01-28T16:30:40.000Z | from price import price
class price10(price):
pass
| 11.2 | 23 | 0.732143 | 8 | 56 | 5.125 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 0.214286 | 56 | 4 | 24 | 14 | 0.886364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
afddb2069a961d1e9d769507976393b3c786a8cb | 72 | py | Python | pytests/__init__.py | rosalogia/bumblebee-status | 19c3975301d8700743df745ecd5ca2c05ecf5cf0 | [
"MIT"
] | null | null | null | pytests/__init__.py | rosalogia/bumblebee-status | 19c3975301d8700743df745ecd5ca2c05ecf5cf0 | [
"MIT"
] | null | null | null | pytests/__init__.py | rosalogia/bumblebee-status | 19c3975301d8700743df745ecd5ca2c05ecf5cf0 | [
"MIT"
] | null | null | null | import bumblebee_status.discover
bumblebee_status.discover.discover()
| 14.4 | 36 | 0.861111 | 8 | 72 | 7.5 | 0.5 | 0.5 | 0.766667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069444 | 72 | 4 | 37 | 18 | 0.895522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
b322b0f855ad15f6de0a2df4e3b385807bf240e9 | 3,914 | py | Python | tests/unit_tests/surface/test_surface_pointset.py | poc11/resqpy | 5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2 | [
"MIT"
] | null | null | null | tests/unit_tests/surface/test_surface_pointset.py | poc11/resqpy | 5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2 | [
"MIT"
] | null | null | null | tests/unit_tests/surface/test_surface_pointset.py | poc11/resqpy | 5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2 | [
"MIT"
] | null | null | null | import numpy as np
from numpy.testing import assert_array_almost_equal
import resqpy.grid
import resqpy.organize
import resqpy.surface
import pytest
# Unit tests for surface/Pointset methods
def test_from_charisma_method(mocker, example_model_and_crs):
# Arrange
model, crs = example_model_and_crs
charisma_file = "INLINE : 25701 XLINE : 23693 420691.19624 6292314.22044 2799.05591\nINLINE : 25701 XLINE : 23694 420680.15765 6292308.35532 2798.08496"
open_mock = mocker.mock_open(read_data = charisma_file)
mocker.patch("builtins.open", open_mock)
test_path = "path/to/file"
array = np.array([[420691.19624, 6292314.22044, 2799.05591], [420680.15765, 6292308.35532, 2798.08496]])
patch_mock = mocker.MagicMock(name = 'add_patch')
# mocker.patch(resqpy.surface.PointSet.add_patch, patch_mock)
# Act
pointset = resqpy.surface.PointSet(model, crs_uuid = crs.uuid)
pointset.add_patch = patch_mock
pointset.from_charisma("path/to/file")
# Assert
open_mock.assert_called_once_with(test_path, 'r')
patch_mock.assert_called_once()
assert_array_almost_equal(patch_mock.call_args[0][0], array)
def test_from_charisma_method_failure(mocker, example_model_and_crs):
# Arrange
model, crs = example_model_and_crs
charisma_file = "INLINE : 25701 XLINE : 23693 420691.19624 6292314.22044 2799.05591\nINLINE : 25701 XLINE : 23694 420680.15765 6292308.35532 2798.08496"
open_mock = mocker.mock_open(read_data = charisma_file)
mocker.patch("builtins.open", open_mock)
# Act
with pytest.raises(AssertionError) as e_info:
pointset = resqpy.surface.PointSet(model, charisma_file = "path/to/file")
# Assert
assert str(e_info.value) == 'crs uuid missing when establishing point set from charisma file'
def test_from_irap_method(mocker, example_model_and_crs):
# Arrange
model, crs = example_model_and_crs
irap_file = "429450.658333 6296954.224574 2403.837646\n429444.793211 6296965.263155 2403.449707"
open_mock = mocker.mock_open(read_data = irap_file)
mocker.patch("builtins.open", open_mock)
test_path = "path/to/file"
array = np.array([[429450.658333, 6296954.224574, 2403.837646], [429444.793211, 6296965.263155, 2403.449707]])
patch_mock = mocker.MagicMock(name = 'add_patch')
# Act
pointset = resqpy.surface.PointSet(model, crs_uuid = crs.uuid)
pointset.add_patch = patch_mock
pointset.from_irap("path/to/file")
# Assert
open_mock.assert_called_once_with(test_path, 'r')
patch_mock.assert_called_once()
assert_array_almost_equal(patch_mock.call_args[0][0], array)
def test_from_irap_method_failure(mocker, example_model_and_crs):
# Arrange
model, crs = example_model_and_crs
irap_file = "429450.658333 6296954.224574 2403.837646\n429444.793211 6296965.263155 2403.449707"
open_mock = mocker.mock_open(read_data = irap_file)
mocker.patch("builtins.open", open_mock)
# Act
with pytest.raises(AssertionError) as e_info:
pointset = resqpy.surface.PointSet(model, irap_file = "path/to/file")
# Assert
assert str(e_info.value) == 'crs uuid missing when establishing point set from irap file'
@pytest.mark.parametrize('closed,coords1,coords2,expected',
[(True, np.array([[2, 2, 2], [3, 3, 3], [2, 2, 2]]), np.array(
[[1, 1, 1]]), np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])),
(False, np.array([[2, 2, 2], [3, 3, 3], [2, 2, 2]]), np.array(
[[1, 1, 1]]), np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [2, 2, 2]]))])
def test_concat_polyset_points(closed, coords1, coords2, expected):
# Act
result = resqpy.surface._pointset.concat_polyset_points(closed, coords1, coords2)
# Assert
assert_array_almost_equal(result, expected)
| 39.535354 | 180 | 0.689576 | 549 | 3,914 | 4.693989 | 0.198543 | 0.010865 | 0.046566 | 0.055879 | 0.85293 | 0.814125 | 0.74544 | 0.717501 | 0.717501 | 0.716337 | 0 | 0.15703 | 0.191364 | 3,914 | 98 | 181 | 39.938776 | 0.657188 | 0.047522 | 0 | 0.551724 | 0 | 0.034483 | 0.209378 | 0.022366 | 0 | 0 | 0 | 0 | 0.206897 | 1 | 0.086207 | false | 0 | 0.103448 | 0 | 0.189655 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b362f333352e1d0e0de1bb79abebba53d0a6f461 | 329 | py | Python | project-metrics/metrics_service/metrics/__init__.py | wassgha/amp-github-apps | 7ab254da8fd6a8247b8229ede48af77693a68651 | [
"Apache-2.0"
] | null | null | null | project-metrics/metrics_service/metrics/__init__.py | wassgha/amp-github-apps | 7ab254da8fd6a8247b8229ede48af77693a68651 | [
"Apache-2.0"
] | 1 | 2019-10-08T01:37:47.000Z | 2019-10-08T01:37:47.000Z | project-metrics/metrics_service/metrics/__init__.py | wassgha/amp-github-apps | 7ab254da8fd6a8247b8229ede48af77693a68651 | [
"Apache-2.0"
] | 1 | 2019-10-08T01:34:21.000Z | 2019-10-08T01:34:21.000Z | """Import metric implementations so they can register themselves."""
from metrics import base
from metrics import absolute_coverage
from metrics import presubmit_latency
from metrics import release_cherrypick_count
from metrics import release_granularity
from metrics import travis_greenness
from metrics import travis_flakiness
| 36.555556 | 68 | 0.869301 | 43 | 329 | 6.488372 | 0.511628 | 0.275986 | 0.426523 | 0.172043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.109422 | 329 | 8 | 69 | 41.125 | 0.952218 | 0.18845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
b37510509c22afd34a13a532c28a844616d8d933 | 60 | py | Python | spotpuppy/core/__init__.py | JoshPattman/Spot-Puppy-Lib | 90172c269ccaf7feefe55257606e0c519871a66d | [
"MIT"
] | 1 | 2021-11-16T13:24:16.000Z | 2021-11-16T13:24:16.000Z | spotpuppy/core/__init__.py | JoshPattman/spotpuppy | 90172c269ccaf7feefe55257606e0c519871a66d | [
"MIT"
] | null | null | null | spotpuppy/core/__init__.py | JoshPattman/spotpuppy | 90172c269ccaf7feefe55257606e0c519871a66d | [
"MIT"
] | null | null | null | from . import leg_control
from . import quadruped_controller | 30 | 34 | 0.85 | 8 | 60 | 6.125 | 0.75 | 0.408163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.116667 | 60 | 2 | 34 | 30 | 0.924528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2fb40464fb309e23c19faafc72df1530c99810ad | 216 | py | Python | django_cached_field/constants.py | blackrobot/django-cached-field | 1533d1045dc5d6ceac29eeb60b8a8ee6ce356d05 | [
"BSD-3-Clause"
] | 8 | 2016-04-01T17:28:56.000Z | 2021-03-04T13:34:24.000Z | django_cached_field/constants.py | blackrobot/django-cached-field | 1533d1045dc5d6ceac29eeb60b8a8ee6ce356d05 | [
"BSD-3-Clause"
] | 7 | 2015-12-17T18:36:31.000Z | 2020-05-06T23:03:29.000Z | django_cached_field/constants.py | tuxtitlan/django-cached-field | 6ee880d07d6bc6f0a58a06e286150c272ae89b2a | [
"BSD-3-Clause"
] | 4 | 2015-12-05T17:52:51.000Z | 2020-05-06T15:03:34.000Z | CACHED_FIELD_USE_TIMEZONE_SETTING = 'CACHED_FIELD_USE_TIMEZONE'
CACHED_FIELD_EAGER_RECALCULATION_SETTING = 'CACHED_FIELD_EAGER_RECALCULATION'
CACHED_FIELD_TRANSACTION_AWARE_SETTING = 'CACHED_FIELD_TRANSACTION_AWARE'
| 54 | 77 | 0.916667 | 27 | 216 | 6.555556 | 0.333333 | 0.372881 | 0.305085 | 0.248588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041667 | 216 | 3 | 78 | 72 | 0.855072 | 0 | 0 | 0 | 0 | 0 | 0.402778 | 0.402778 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
2ff1028b1416f7618a55c7bbce86dc0700a9f91e | 3,982 | py | Python | z2/part3/updated_part2_batch/jm/parser_errors_2/205227290.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 1 | 2020-04-16T12:13:47.000Z | 2020-04-16T12:13:47.000Z | z2/part3/updated_part2_batch/jm/parser_errors_2/205227290.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:50:15.000Z | 2020-05-19T14:58:30.000Z | z2/part3/updated_part2_batch/jm/parser_errors_2/205227290.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:45:13.000Z | 2020-06-09T19:18:31.000Z | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 205227290
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 5, 4, 7)
assert board is not None
assert gamma_move(board, 1, 2, 4) == 1
assert gamma_move(board, 1, 1, 4) == 1
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 2, 1) == 1
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_free_fields(board, 2) == 22
assert gamma_move(board, 3, 0, 3) == 1
assert gamma_move(board, 4, 4, 2) == 1
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_free_fields(board, 1) == 20
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_free_fields(board, 2) == 20
board142046198 = gamma_board(board)
assert board142046198 is not None
assert board142046198 == (".11..\n" "3....\n" "....4\n" "..2..\n" ".....\n")
del board142046198
board142046198 = None
assert gamma_move(board, 3, 3, 1) == 1
assert gamma_move(board, 4, 1, 0) == 1
assert gamma_move(board, 2, 4, 4) == 1
assert gamma_move(board, 3, 4, 3) == 1
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_move(board, 4, 3, 2) == 1
assert gamma_free_fields(board, 4) == 14
assert gamma_move(board, 1, 3, 3) == 1
assert gamma_move(board, 2, 0, 4) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 2, 1) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_free_fields(board, 3) == 12
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 2, 1) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_move(board, 3, 4, 3) == 0
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_free_fields(board, 3) == 11
assert gamma_move(board, 4, 1, 1) == 1
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 4, 0, 4) == 0
assert gamma_move(board, 4, 3, 0) == 1
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 1, 3, 4) == 1
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 3, 3, 3) == 0
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 2, 3) == 1
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_busy_fields(board, 1) == 4
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 3) == 1
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_move(board, 3, 1, 2) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 2, 4, 1) == 1
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_move(board, 4, 4, 3) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_move(board, 3, 4, 0) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 1, 2) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 4, 0, 2) == 0
board390817808 = gamma_board(board)
assert board390817808 is not None
assert board390817808 == ("21112\n" "32413\n" "13244\n" "34232\n" ".4.43\n")
del board390817808
board390817808 = None
assert gamma_move(board, 1, 4, 4) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_free_fields(board, 2) == 2
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 4, 0, 0) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_free_fields(board, 1) == 1
assert gamma_move(board, 2, 0, 2) == 0
gamma_delete(board)
| 32.639344 | 76 | 0.672275 | 738 | 3,982 | 3.47019 | 0.062331 | 0.352206 | 0.386568 | 0.515424 | 0.796173 | 0.783678 | 0.71886 | 0.45451 | 0.307302 | 0.301835 | 0 | 0.130723 | 0.166248 | 3,982 | 121 | 77 | 32.909091 | 0.640663 | 0 | 0 | 0.180952 | 0 | 0 | 0.017976 | 0 | 0 | 0 | 0 | 0 | 0.828571 | 1 | 0 | false | 0 | 0.009524 | 0 | 0.009524 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ff2f7393c8213b0aa63f846f81bc29c8d882e11d | 6,894 | py | Python | tests/components/mill/test_config_flow.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/mill/test_config_flow.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/mill/test_config_flow.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Tests for Mill config flow."""
from unittest.mock import patch
from homeassistant import config_entries
from homeassistant.components.mill.const import CLOUD, CONNECTION_TYPE, DOMAIN, LOCAL
from homeassistant.const import CONF_IP_ADDRESS, CONF_PASSWORD, CONF_USERNAME
from homeassistant.data_entry_flow import RESULT_TYPE_FORM
from tests.common import MockConfigEntry
async def test_show_config_form(hass):
"""Test show configuration form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_create_entry(hass):
"""Test create entry from user input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONNECTION_TYPE: CLOUD,
},
)
assert result2["type"] == RESULT_TYPE_FORM
with patch("mill.Mill.connect", return_value=True):
result = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{
CONF_USERNAME: "user",
CONF_PASSWORD: "pswd",
},
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "user"
assert result["data"] == {
CONF_USERNAME: "user",
CONF_PASSWORD: "pswd",
CONNECTION_TYPE: CLOUD,
}
async def test_flow_entry_already_exists(hass):
"""Test user input for config_entry that already exists."""
test_data = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pswd",
}
first_entry = MockConfigEntry(
domain="mill",
data=test_data,
unique_id=test_data[CONF_USERNAME],
)
first_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONNECTION_TYPE: CLOUD,
},
)
assert result2["type"] == RESULT_TYPE_FORM
with patch("mill.Mill.connect", return_value=True):
result = await hass.config_entries.flow.async_configure(
result2["flow_id"],
test_data,
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_connection_error(hass):
"""Test connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONNECTION_TYPE: CLOUD,
},
)
assert result2["type"] == RESULT_TYPE_FORM
with patch("mill.Mill.connect", return_value=False):
result = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{
CONF_USERNAME: "user",
CONF_PASSWORD: "pswd",
},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_local_create_entry(hass):
"""Test create entry from user input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONNECTION_TYPE: LOCAL,
},
)
assert result2["type"] == RESULT_TYPE_FORM
test_data = {
CONF_IP_ADDRESS: "192.168.1.59",
}
with patch(
"mill_local.Mill.connect",
return_value={
"name": "panel heater gen. 3",
"version": "0x210927",
"operation_key": "",
"status": "ok",
},
):
result = await hass.config_entries.flow.async_configure(
result2["flow_id"],
test_data,
)
test_data[CONNECTION_TYPE] = LOCAL
assert result["type"] == "create_entry"
assert result["title"] == test_data[CONF_IP_ADDRESS]
assert result["data"] == test_data
async def test_local_flow_entry_already_exists(hass):
"""Test user input for config_entry that already exists."""
test_data = {
CONF_IP_ADDRESS: "192.168.1.59",
}
first_entry = MockConfigEntry(
domain="mill",
data=test_data,
unique_id=test_data[CONF_IP_ADDRESS],
)
first_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONNECTION_TYPE: LOCAL,
},
)
assert result2["type"] == RESULT_TYPE_FORM
test_data = {
CONF_IP_ADDRESS: "192.168.1.59",
}
with patch(
"mill_local.Mill.connect",
return_value={
"name": "panel heater gen. 3",
"version": "0x210927",
"operation_key": "",
"status": "ok",
},
):
result = await hass.config_entries.flow.async_configure(
result2["flow_id"],
test_data,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_local_connection_error(hass):
"""Test connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONNECTION_TYPE: LOCAL,
},
)
assert result2["type"] == RESULT_TYPE_FORM
test_data = {
CONF_IP_ADDRESS: "192.168.1.59",
}
with patch(
"mill_local.Mill.connect",
return_value=None,
):
result = await hass.config_entries.flow.async_configure(
result2["flow_id"],
test_data,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
| 27.798387 | 85 | 0.617058 | 783 | 6,894 | 5.168582 | 0.114943 | 0.071658 | 0.070423 | 0.103286 | 0.866815 | 0.859402 | 0.859402 | 0.843588 | 0.825797 | 0.804052 | 0 | 0.013812 | 0.264868 | 6,894 | 247 | 86 | 27.910931 | 0.784728 | 0.003916 | 0 | 0.649485 | 0 | 0 | 0.110621 | 0.010499 | 0 | 0 | 0.002435 | 0 | 0.175258 | 1 | 0 | false | 0.025773 | 0.030928 | 0 | 0.030928 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.